KafkaConfiguration.java 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. package com.winhc.bigdata.task.configuration;
  2. import com.google.common.collect.Maps;
  3. import org.apache.kafka.clients.consumer.ConsumerConfig;
  4. import org.apache.kafka.clients.producer.ProducerConfig;
  5. import org.apache.kafka.common.serialization.StringDeserializer;
  6. import org.apache.kafka.common.serialization.StringSerializer;
  7. import org.springframework.beans.factory.annotation.Value;
  8. import org.springframework.context.annotation.Bean;
  9. import org.springframework.context.annotation.Configuration;
  10. import org.springframework.kafka.annotation.EnableKafka;
  11. import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
  12. import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
  13. import org.springframework.kafka.core.DefaultKafkaProducerFactory;
  14. import org.springframework.kafka.core.KafkaTemplate;
  15. import org.springframework.kafka.core.ProducerFactory;
  16. import java.util.HashMap;
  17. import java.util.Map;
  18. /**
  19. * @author: XuJiakai
  20. * 2021/6/18 16:20
  21. */
  22. @Configuration
  23. @EnableKafka
  24. public class KafkaConfiguration {
  25. @Value("${spring.kafka.bootstrap-servers}")
  26. private String bootstrapServers;
  27. @Value("${spring.kafka.consumer.enable-auto-commit}")
  28. private Boolean autoCommit;
  29. @Value("${spring.kafka.producer.retries}")
  30. private Integer retries;
  31. @Value("${spring.kafka.producer.batch-size}")
  32. private Integer batchSize;
  33. @Value("${spring.kafka.producer.buffer-memory}")
  34. private Integer bufferMemory;
  35. /**
  36. * 生产者配置信息
  37. */
  38. @Bean
  39. public Map<String, Object> producerConfigs() {
  40. Map<String, Object> props = Maps.newHashMap();
  41. props.put(ProducerConfig.ACKS_CONFIG, "-1");
  42. props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  43. props.put(ProducerConfig.RETRIES_CONFIG, retries);
  44. props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
  45. props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
  46. props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
  47. props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  48. props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  49. props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "3000");
  50. return props;
  51. }
  52. /**
  53. * 生产者工厂
  54. */
  55. @Bean
  56. public ProducerFactory<String, String> producerFactory() {
  57. return new DefaultKafkaProducerFactory<>(producerConfigs());
  58. }
  59. /**
  60. * 生产者模板
  61. */
  62. @Bean
  63. public KafkaTemplate<String, String> kafkaTemplate() {
  64. return new KafkaTemplate<>(producerFactory());
  65. }
  66. @Bean("containerFactory")
  67. public ConcurrentKafkaListenerContainerFactory<String, String> containerFactory() {
  68. ConcurrentKafkaListenerContainerFactory<String, String> container = new ConcurrentKafkaListenerContainerFactory<>();
  69. container.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerProps()));
  70. // 设置并发量,小于或等于Topic的分区数
  71. container.setConcurrency(1);
  72. // 拉取超时时间
  73. //container.getContainerProperties().setPollTimeout(1500);
  74. // 设置为批量监听
  75. container.setBatchListener(true);
  76. // 设置提交偏移量的方式
  77. //container.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
  78. return container;
  79. }
  80. @Bean("smallContainerFactory")
  81. public ConcurrentKafkaListenerContainerFactory<String, String> smallContainerFactory() {
  82. ConcurrentKafkaListenerContainerFactory<String, String> container = new ConcurrentKafkaListenerContainerFactory<>();
  83. Map<String, Object> props = consumerProps();
  84. props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 16);
  85. container.setConsumerFactory(new DefaultKafkaConsumerFactory<>(props));
  86. // 设置并发量,小于或等于Topic的分区数
  87. container.setConcurrency(1);
  88. // 拉取超时时间
  89. //container.getContainerProperties().setPollTimeout(1500);
  90. // 设置为批量监听
  91. container.setBatchListener(true);
  92. // 设置提交偏移量的方式
  93. //container.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
  94. return container;
  95. }
  96. private Map<String, Object> consumerProps() {
  97. Map<String, Object> props = new HashMap<>(8);
  98. // kafka服务地址
  99. props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  100. // 设置是否自动提交
  101. props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
  102. // 一次拉取消息数量
  103. props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
  104. props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 5 * 60 * 1000);
  105. props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 5 * 60 * 1000);
  106. // 最大处理时间
  107. props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5 * 60 * 1000);
  108. // 序列化
  109. props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  110. props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  111. return props;
  112. }
  113. }