Browse Source

多余配置删除

dev_kxc
zhuyulei 2 years ago
parent
commit
c42616815a
  1. 4
      jwtech-admin/pom.xml
  2. 126
      jwtech-admin/src/main/resources/application-test.yml
  3. 4
      pom.xml

4
jwtech-admin/pom.xml

@ -28,11 +28,11 @@
<version>2.2.6</version>
</dependency>
<dependency>
<!--<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
-->
<!-- SpringBoot 测试 -->
<dependency>

126
jwtech-admin/src/main/resources/application-test.yml

@ -45,63 +45,63 @@ logging:
level:
com.kms: debug
org.springframework: warn
thrift:
host: 192.168.0.61
port: 9710
timeOut: 60000
maxActive: 20000
max-total: 50
max-idle: 10
min-idle: 5
max-wait-millis: 200
indexName: tianhui_cg
indexType: doc
esEntity: com.kms.system.es.EsEntity
#thrift:
# host: 192.168.0.61
# port: 9710
# timeOut: 60000
# maxActive: 20000
# max-total: 50
# max-idle: 10
# min-idle: 5
# max-wait-millis: 200
# indexName: tianhui_cg
# indexType: doc
# esEntity: com.kms.system.es.EsEntity
# Spring配置
spring:
kafka:
autoStartup: false
topics: tianhui
# kafka信息 http://182.92.86.163:9092/
bootstrap-servers: 106.2.224.58:8021
#bootstrap-servers: 182.92.86.163:9092
producer: # 生产者配置
retries: 3 # 设置大于0的值,则客户端会将发送失败的记录重新发送
batch-size: 33554432 #32M
buffer-memory: 33554432 #32M
acks: 1
# 指定消息key和消息体的编解码方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
consumer:
session:
timeout: 200000000 # 链接超时时间
group-id: jianweikeji # 消费者组
enable-auto-commit: false # 关闭自动提交
auto-offset-reset: earliest # 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
listener:
# 当每一条记录被消费者监听器(ListenerConsumer)处理之后提交
# RECORD
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后提交
# BATCH
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,距离上次提交时间大于TIME时提交
# TIME
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,被处理record数量大于等于COUNT时提交
# COUNT
# TIME | COUNT 有一个条件满足时提交
# COUNT_TIME
# 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后, 手动调用Acknowledgment.acknowledge()后提交
# MANUAL
# 手动调用Acknowledgment.acknowledge()后立即提交,一般使用这种
# MANUAL_IMMEDIATE
ack-mode: manual_immediate
data:
neo4j:
uri: bolt://192.168.0.61:7687
username: neo4j
password: password
# kafka:
# autoStartup: false
# topics: tianhui
# # kafka信息 http://182.92.86.163:9092/
# bootstrap-servers: 106.2.224.58:8021
# #bootstrap-servers: 182.92.86.163:9092
# producer: # 生产者配置
# retries: 3 # 设置大于0的值,则客户端会将发送失败的记录重新发送
# batch-size: 33554432 #32M
# buffer-memory: 33554432 #32M
# acks: 1
# # 指定消息key和消息体的编解码方式
# key-serializer: org.apache.kafka.common.serialization.StringSerializer
# value-serializer: org.apache.kafka.common.serialization.StringSerializer
# consumer:
# session:
# timeout: 200000000 # 链接超时时间
# group-id: jianweikeji # 消费者组
# enable-auto-commit: false # 关闭自动提交
# auto-offset-reset: earliest # 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
# key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# listener:
# # 当每一条记录被消费者监听器(ListenerConsumer)处理之后提交
# # RECORD
# # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后提交
# # BATCH
# # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,距离上次提交时间大于TIME时提交
# # TIME
# # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,被处理record数量大于等于COUNT时提交
# # COUNT
# # TIME | COUNT 有一个条件满足时提交
# # COUNT_TIME
# # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后, 手动调用Acknowledgment.acknowledge()后提交
# # MANUAL
# # 手动调用Acknowledgment.acknowledge()后立即提交,一般使用这种
# # MANUAL_IMMEDIATE
# ack-mode: manual_immediate
# data:
# neo4j:
# uri: bolt://192.168.0.61:7687
# username: neo4j
# password: password
# mongodb:
# uri: mongodb://192.168.0.61:27017/tianhui_cg
# field-naming-strategy: org.springframework.data.mapping.model.SnakeCaseFieldNamingStrategy
@ -239,11 +239,11 @@ http:
socketTimeout: 5000000
validateAfterInactivity: 300000
#算法配置
algorithm:
#图片ocr服务
entity: http://192.168.0.14:17856/analyze_text
# 自动打抽取摘要
summary: http://192.168.0.210:12343
ocr: http://192.168.0.62:12340/ExtractServer?WSDL
#内容是文件类型识别
fielTypeAndContent: http://192.168.0.42:8089/detect
#algorithm:
# #图片ocr服务
# entity: http://192.168.0.14:17856/analyze_text
# # 自动打抽取摘要
# summary: http://192.168.0.210:12343
# ocr: http://192.168.0.62:12340/ExtractServer?WSDL
# #内容是文件类型识别
# fielTypeAndContent: http://192.168.0.42:8089/detect

4
pom.xml

@ -91,11 +91,11 @@
<version>1.0-SNAPSHOT</version>
</dependency>
<!-- 图数据库访问实现 -->
<dependency>
<!--<dependency>
<groupId>com.jianwei</groupId>
<artifactId>neo4j-imp</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
</dependency>-->
<dependency>
<groupId>org.jsoup</groupId>

Loading…
Cancel
Save