mirror of
https://gitee.com/orangeform/orange-admin.git
synced 2026-01-17 18:46:36 +08:00
commit:微服务工程目录
This commit is contained in:
@@ -0,0 +1,152 @@
|
||||
server:
|
||||
tomcat:
|
||||
uri-encoding: UTF-8
|
||||
threads:
|
||||
max: 100
|
||||
min-spare: 10
|
||||
servlet:
|
||||
encoding:
|
||||
force: true
|
||||
charset: UTF-8
|
||||
enabled: true
|
||||
|
||||
logging:
|
||||
config: classpath:log4j2.xml
|
||||
|
||||
spring:
|
||||
servlet:
|
||||
multipart:
|
||||
max-file-size: 50MB
|
||||
max-request-size: 50MB
|
||||
mvc:
|
||||
converters:
|
||||
preferred-json-mapper: fastjson
|
||||
freemarker:
|
||||
template-loader-path: classpath:/template/
|
||||
cache: false
|
||||
charset: UTF-8
|
||||
check-template-location: true
|
||||
content-type: text/html
|
||||
expose-request-attributes: false
|
||||
expose-session-attributes: false
|
||||
request-context-attribute: request
|
||||
suffix: .ftl
|
||||
cloud:
|
||||
sentinel:
|
||||
transport:
|
||||
# sentinel控制台地址。
|
||||
# 由于8080端口容易与其他服务端口冲突,可以在启动sentinel控制台时动态修改,如:
|
||||
# java -Dserver.port=8858 -jar sentinel-dashboard-$VERSION.jar
|
||||
# 改为8858后,下面的配置端口也需要改为:localhost:8858。
|
||||
dashboard: localhost:8858
|
||||
|
||||
# feign 配置
|
||||
feign:
|
||||
hystrix:
|
||||
enabled: true
|
||||
httpclient:
|
||||
enabled: true
|
||||
max-connections: 200
|
||||
max-connections-per-route: 50
|
||||
client:
|
||||
config:
|
||||
default:
|
||||
# 输出feignclient的日志,缺省值为none,可选值为none/basic/headers/full
|
||||
# 注意:需要把feignclient类所在包的日志级别设置为debug时才生效。如:
|
||||
# logging:
|
||||
# level:
|
||||
# com.demo.multi: info
|
||||
# com.demo.multi.coursepaperinterface.client: debug
|
||||
loggerLevel: full
|
||||
connectTimeout: 5000
|
||||
readTimeout: 10000
|
||||
compression:
|
||||
request:
|
||||
enabled: true
|
||||
response:
|
||||
enabled: true
|
||||
|
||||
hystrix:
|
||||
command:
|
||||
default:
|
||||
execution:
|
||||
isolation:
|
||||
strategy: SEMAPHORE
|
||||
thread:
|
||||
timeoutInMilliseconds: 30000
|
||||
shareSecurityContext: true
|
||||
|
||||
#请求处理的超时时间
|
||||
ribbon:
|
||||
ReadTimeout: 7000
|
||||
ConnectTimeout: 3000
|
||||
MaxAutoRetries: 1
|
||||
MaxAutoRetriesNextServer: 1
|
||||
|
||||
swagger:
|
||||
# 当enabled为false的时候,则可禁用swagger。
|
||||
enabled: true
|
||||
# 工程的基础包名。
|
||||
basePackage: com.orange.demo
|
||||
title: 橙单单体服务工程
|
||||
description: 橙单单体服务工程详情
|
||||
version: 1.0
|
||||
|
||||
# 暴露监控端点
|
||||
management:
|
||||
metrics:
|
||||
tags:
|
||||
application: ${spring.application.name}
|
||||
endpoints:
|
||||
web:
|
||||
exposure:
|
||||
include: '*'
|
||||
jmx:
|
||||
exposure:
|
||||
include: '*'
|
||||
endpoint:
|
||||
# 与中间件相关的健康详情也会被展示
|
||||
health:
|
||||
show-details: always
|
||||
configprops:
|
||||
# 在/actuator/configprops中,所有包含password的配置,将用 * 隐藏。
|
||||
# 如果不想隐藏任何配置项的值,可以直接使用如下被注释的空值。
|
||||
# keys-to-sanitize:
|
||||
keys-to-sanitize: password
|
||||
server:
|
||||
servlet:
|
||||
context-path: "/"
|
||||
|
||||
# 存储session数据的Redis,所有服务均需要,因此放到公共配置中。
|
||||
# 根据实际情况,该Redis也可以用于存储其他数据。
|
||||
redis:
|
||||
# redisson的配置。每个服务可以自己的配置文件中覆盖此选项。
|
||||
redisson:
|
||||
# 如果该值为false,系统将不会创建RedissionClient的bean。
|
||||
enabled: true
|
||||
# mode的可用值为,single/cluster/sentinel/master-slave
|
||||
mode: single
|
||||
# single: 单机模式
|
||||
# address: redis://localhost:6379
|
||||
# cluster: 集群模式
|
||||
# 每个节点逗号分隔,同时每个节点前必须以redis://开头。
|
||||
# address: redis://localhost:6379,redis://localhost:6378,...
|
||||
# sentinel:
|
||||
# 每个节点逗号分隔,同时每个节点前必须以redis://开头。
|
||||
# address: redis://localhost:6379,redis://localhost:6378,...
|
||||
# master-slave:
|
||||
# 每个节点逗号分隔,第一个为主节点,其余为从节点。同时每个节点前必须以redis://开头。
|
||||
# address: redis://localhost:6379,redis://localhost:6378,...
|
||||
address: redis://localhost:6379
|
||||
# 链接超时,单位毫秒。
|
||||
timeout: 6000
|
||||
# 单位毫秒。分布式锁的超时检测时长。
|
||||
# 如果一次锁内操作超该毫秒数,或在释放锁之前异常退出,Redis会在该时长之后主动删除该锁使用的key。
|
||||
lockWatchdogTimeout: 60000
|
||||
# redis 密码,空可以不填。
|
||||
password:
|
||||
pool:
|
||||
# 连接池数量。
|
||||
poolSize: 20
|
||||
# 连接池中最小空闲数量。
|
||||
minIdle: 5
|
||||
@@ -0,0 +1,76 @@
|
||||
logging:
|
||||
level:
|
||||
# 这里设置的日志级别优先于log4j2.xml文件Loggers中的日志级别。
|
||||
com.orange.demo: info
|
||||
|
||||
server:
|
||||
port: 9001
|
||||
|
||||
application:
|
||||
# 缺省的文件上传根目录。
|
||||
uploadFileBaseDir: ./zz-resource/upload-files/course-class
|
||||
# 每个微服务的url目录上下文,如(/admin/upms),通常和网关的路由path一致。
|
||||
serviceContextPath: /admin/CourseClass
|
||||
# 是否忽略远程调用中出现的任何错误,包括逻辑异常和系统异常。
|
||||
# 通常在调试和测试阶段设置为false,以便及时发现问题。
|
||||
ignoreRpcError: false
|
||||
|
||||
sequence:
|
||||
# 是否使用基于美团Leaf的分布式Id生成器。
|
||||
advanceIdGenerator: true
|
||||
# 多个zk服务之间逗号分隔。
|
||||
zkAddress: localhost:2181
|
||||
# 与本机的ip一起构成zk中标识不同服务实例的key值。
|
||||
idPort: 19001
|
||||
# zk中生成WorkNode的路径。不同的业务可以使用不同的路径,以免冲突。
|
||||
zkPath: com/orange/demo
|
||||
|
||||
# mybatis的基本配置
|
||||
mybatis:
|
||||
mapperLocations: classpath:com/orange/demo/*/dao/mapper/*Mapper.xml
|
||||
typeAliasesPackage: com.orange.demo.*.model
|
||||
|
||||
mapper:
|
||||
mappers: tk.mybatis.mapper.common.Mapper,tk.mybatis.mapper.additional.insert.InsertListMapper
|
||||
not-empty: false
|
||||
identity: MYSQL
|
||||
|
||||
pagehelper:
|
||||
helperDialect: mysql
|
||||
reasonable: true
|
||||
supportMethodsArguments: false
|
||||
params: count=countSql
|
||||
|
||||
spring:
|
||||
datasource:
|
||||
type: com.alibaba.druid.pool.DruidDataSource
|
||||
druid:
|
||||
url: jdbc:mysql://localhost:3306/zzdemo-multi?characterEncoding=utf8&useSSL=true&serverTimezone=Asia/Shanghai
|
||||
username: root
|
||||
password: 123456
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
name: course-class
|
||||
initialSize: 10
|
||||
minIdle: 10
|
||||
maxActive: 50
|
||||
maxWait: 60000
|
||||
timeBetweenEvictionRunsMillis: 60000
|
||||
minEvictableIdleTimeMillis: 300000
|
||||
poolPreparedStatements: true
|
||||
maxPoolPreparedStatementPerConnectionSize: 20
|
||||
maxOpenPreparedStatements: 20
|
||||
validationQuery: SELECT 'x'
|
||||
testWhileIdle: true
|
||||
testOnBorrow: false
|
||||
testOnReturn: false
|
||||
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
|
||||
filters: stat,wall
|
||||
useGlobalDataSourceStat: true
|
||||
web-stat-filter:
|
||||
enabled: true
|
||||
url-pattern: "/*"
|
||||
exclusions: "*.js,*.gif,*.jpg,*.bmp,*.png,*.css,*.ico,/druid/*,/actuator/*"
|
||||
stat-view-servlet:
|
||||
enabled: true
|
||||
urlPattern: /druid/*
|
||||
resetEnable: true
|
||||
@@ -0,0 +1,55 @@
|
||||
server:
|
||||
port: 8082
|
||||
|
||||
application:
|
||||
# Jwt令牌加密的签名值。该值的长度要超过10个字符(过短会报错)。
|
||||
tokenSigningKey: OrangeMultiDemo-signing-key
|
||||
# Jwt令牌在Http Header中的键名称。
|
||||
tokenHeaderKey: Authorization
|
||||
# Jwt令牌刷新后在Http Header中的键名称。
|
||||
refreshedTokenHeaderKey: RefreshedToken
|
||||
# Jwt令牌过期时间(毫秒)。
|
||||
expiration: 72000000
|
||||
# 跨域的IP(http://192.168.10.10:8086)白名单列表,多个IP之间逗号分隔(* 表示全部信任,空白表示禁用跨域信任)。
|
||||
credentialIpList: "*"
|
||||
# Session会话和用户权限在Redis中的过期时间(秒)。
|
||||
sessionExpiredSeconds: 86400
|
||||
|
||||
spring:
|
||||
cloud:
|
||||
gateway:
|
||||
discovery:
|
||||
locator:
|
||||
enabled: false
|
||||
lower-case-service-id: true
|
||||
routes:
|
||||
- id: upms
|
||||
uri: lb://upms
|
||||
predicates:
|
||||
- Path=/admin/upms/**
|
||||
filters:
|
||||
- StripPrefix=2
|
||||
- name: Hystrix
|
||||
args:
|
||||
name: default
|
||||
fallbackUri: forward:/fallback
|
||||
- id: course-class
|
||||
uri: lb://course-class
|
||||
predicates:
|
||||
- Path=/admin/CourseClass/**
|
||||
filters:
|
||||
- StripPrefix=2
|
||||
- name: Hystrix
|
||||
args:
|
||||
name: default
|
||||
fallbackUri: forward:/fallback
|
||||
- id: stats
|
||||
uri: lb://stats
|
||||
predicates:
|
||||
- Path=/admin/stats/**
|
||||
filters:
|
||||
- StripPrefix=2
|
||||
- name: Hystrix
|
||||
args:
|
||||
name: default
|
||||
fallbackUri: forward:/fallback
|
||||
@@ -0,0 +1,76 @@
|
||||
logging:
|
||||
level:
|
||||
# 这里设置的日志级别优先于log4j2.xml文件Loggers中的日志级别。
|
||||
com.orange.demo: info
|
||||
|
||||
server:
|
||||
port: 9002
|
||||
|
||||
application:
|
||||
# 缺省的文件上传根目录。
|
||||
uploadFileBaseDir: ./zz-resource/upload-files/stats
|
||||
# 每个微服务的url目录上下文,如(/admin/upms),通常和网关的路由path一致。
|
||||
serviceContextPath: /admin/stats
|
||||
# 是否忽略远程调用中出现的任何错误,包括逻辑异常和系统异常。
|
||||
# 通常在调试和测试阶段设置为false,以便及时发现问题。
|
||||
ignoreRpcError: false
|
||||
|
||||
sequence:
|
||||
# 是否使用基于美团Leaf的分布式Id生成器。
|
||||
advanceIdGenerator: true
|
||||
# 多个zk服务之间逗号分隔。
|
||||
zkAddress: localhost:2181
|
||||
# 与本机的ip一起构成zk中标识不同服务实例的key值。
|
||||
idPort: 19002
|
||||
# zk中生成WorkNode的路径。不同的业务可以使用不同的路径,以免冲突。
|
||||
zkPath: com/orange/demo
|
||||
|
||||
# mybatis的基本配置
|
||||
mybatis:
|
||||
mapperLocations: classpath:com/orange/demo/*/dao/mapper/*Mapper.xml
|
||||
typeAliasesPackage: com.orange.demo.*.model
|
||||
|
||||
mapper:
|
||||
mappers: tk.mybatis.mapper.common.Mapper,tk.mybatis.mapper.additional.insert.InsertListMapper
|
||||
not-empty: false
|
||||
identity: MYSQL
|
||||
|
||||
pagehelper:
|
||||
helperDialect: mysql
|
||||
reasonable: true
|
||||
supportMethodsArguments: false
|
||||
params: count=countSql
|
||||
|
||||
spring:
|
||||
datasource:
|
||||
type: com.alibaba.druid.pool.DruidDataSource
|
||||
druid:
|
||||
url: jdbc:mysql://localhost:3306/zzdemo-multi?characterEncoding=utf8&useSSL=true&serverTimezone=Asia/Shanghai
|
||||
username: root
|
||||
password: 123456
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
name: stats
|
||||
initialSize: 10
|
||||
minIdle: 10
|
||||
maxActive: 50
|
||||
maxWait: 60000
|
||||
timeBetweenEvictionRunsMillis: 60000
|
||||
minEvictableIdleTimeMillis: 300000
|
||||
poolPreparedStatements: true
|
||||
maxPoolPreparedStatementPerConnectionSize: 20
|
||||
maxOpenPreparedStatements: 20
|
||||
validationQuery: SELECT 'x'
|
||||
testWhileIdle: true
|
||||
testOnBorrow: false
|
||||
testOnReturn: false
|
||||
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
|
||||
filters: stat,wall
|
||||
useGlobalDataSourceStat: true
|
||||
web-stat-filter:
|
||||
enabled: true
|
||||
url-pattern: "/*"
|
||||
exclusions: "*.js,*.gif,*.jpg,*.bmp,*.png,*.css,*.ico,/druid/*,/actuator/*"
|
||||
stat-view-servlet:
|
||||
enabled: true
|
||||
urlPattern: /druid/*
|
||||
resetEnable: true
|
||||
@@ -0,0 +1,77 @@
|
||||
logging:
|
||||
level:
|
||||
# 这里设置的日志级别优先于log4j2.xml文件Loggers中的日志级别。
|
||||
com.orange.demo: info
|
||||
|
||||
server:
|
||||
port: 9000
|
||||
|
||||
application:
|
||||
# 缺省的文件上传根目录。
|
||||
uploadFileBaseDir: ./zz-resource/upload-files/upms
|
||||
# 初始化密码。
|
||||
defaultUserPassword: 123456
|
||||
# 每个微服务的url目录上下文,如(/admin/upms),通常和网关的路由path一致。
|
||||
serviceContextPath: /admin/upms
|
||||
# 是否忽略远程调用中出现的任何错误,包括逻辑异常和系统异常。
|
||||
# 通常在调试和测试阶段设置为false,以便及时发现问题。
|
||||
ignoreRpcError: false
|
||||
|
||||
sequence:
|
||||
# 是否使用基于美团Leaf的分布式Id生成器。
|
||||
advanceIdGenerator: true
|
||||
# 多个zk服务之间逗号分隔。
|
||||
zkAddress: localhost:2181
|
||||
# 与本机的ip一起构成zk中标识不同服务实例的key值。
|
||||
idPort: 19000
|
||||
# zk中生成WorkNode的路径。不同的业务可以使用不同的路径,以免冲突。
|
||||
zkPath: com/orange/demo
|
||||
|
||||
mybatis:
|
||||
mapperLocations: classpath:com/orange/demo/*/dao/mapper/*Mapper.xml,com/orange/demo/common/datasync/dao/mapper/*Mapper.xml,com/orange/demo/common/log/dao/mapper/*Mapper.xml
|
||||
typeAliasesPackage: com.orange.demo.*.model,com.orange.demo.common.datasync.model,com.orange.demo.common.log.model
|
||||
|
||||
mapper:
|
||||
mappers: tk.mybatis.mapper.common.Mapper,tk.mybatis.mapper.additional.insert.InsertListMapper
|
||||
not-empty: false
|
||||
identity: MYSQL
|
||||
|
||||
pagehelper:
|
||||
helperDialect: mysql
|
||||
reasonable: true
|
||||
supportMethodsArguments: false
|
||||
params: count=countSql
|
||||
|
||||
spring:
|
||||
datasource:
|
||||
type: com.alibaba.druid.pool.DruidDataSource
|
||||
druid:
|
||||
url: jdbc:mysql://localhost:3306/zzdemo-multi?characterEncoding=utf8&useSSL=true&serverTimezone=Asia/Shanghai
|
||||
username: root
|
||||
password: 123456
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
name: upms
|
||||
initialSize: 10
|
||||
minIdle: 10
|
||||
maxActive: 50
|
||||
maxWait: 60000
|
||||
timeBetweenEvictionRunsMillis: 60000
|
||||
minEvictableIdleTimeMillis: 300000
|
||||
poolPreparedStatements: true
|
||||
maxPoolPreparedStatementPerConnectionSize: 20
|
||||
maxOpenPreparedStatements: 20
|
||||
validationQuery: SELECT 'x'
|
||||
testWhileIdle: true
|
||||
testOnBorrow: false
|
||||
testOnReturn: false
|
||||
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
|
||||
filters: stat,wall
|
||||
useGlobalDataSourceStat: true
|
||||
web-stat-filter:
|
||||
enabled: true
|
||||
url-pattern: "/*"
|
||||
exclusions: "*.js,*.gif,*.jpg,*.bmp,*.png,*.css,*.ico,/druid/*,/actuator/*"
|
||||
stat-view-servlet:
|
||||
enabled: true
|
||||
urlPattern: /druid/*
|
||||
resetEnable: true
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,21 @@
|
||||
#! /bin/bash
|
||||
|
||||
# 如果执行的是docker-compose down,下次再docker-compose up启动中间件时,
|
||||
# kafka 可能会因为之前的强行退出而出现position错误,此时可以执行当前shell,
|
||||
# 删除他们的数据。
|
||||
# 我们更推荐docker-compose stop的方式停止服务,下次启动docker-compose start。
|
||||
|
||||
rm -rf ./data/redis/*
|
||||
rm -rf ./data/elasticsearch/*
|
||||
rm -rf ./data/kafka/*
|
||||
rm -rf ./data/rocketmq/*
|
||||
rm -rf ./data/logstash/*
|
||||
rm -rf ./data/logstash/.lock
|
||||
rm -rf ./data/zookeeper/*
|
||||
|
||||
rm -rf ./logs/redis/*
|
||||
rm -rf ./logs/elasticsearch/*
|
||||
rm -rf ./logs/kafka/*
|
||||
rm -rf ./logs/rocketmq/*
|
||||
rm -rf ./logs/logstash/*
|
||||
rm -rf ./logs/zookeeper/*
|
||||
@@ -0,0 +1,73 @@
|
||||
version: '3.5'
|
||||
|
||||
services:
|
||||
|
||||
nacos:
|
||||
image: nacos/nacos-server:1.4.1
|
||||
container_name: nacos-standalone
|
||||
environment:
|
||||
- PREFER_HOST_MODE=hostname
|
||||
- MODE=standalone
|
||||
privileged: true
|
||||
volumes:
|
||||
- ./logs/nacos/:/home/nacos/logs
|
||||
- ./data/nacos/:/home/nacos/data
|
||||
ports:
|
||||
- "8848:8848"
|
||||
networks:
|
||||
- zz-test
|
||||
|
||||
redis:
|
||||
container_name: redis
|
||||
build:
|
||||
context: services/redis/
|
||||
args:
|
||||
- REDIS_VER=4
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- ./services/redis/redis.conf:/usr/local/etc/redis/redis.conf:rw
|
||||
- ./data/redis:/data:rw
|
||||
- ./logs/redis:/var/log/:rw
|
||||
networks:
|
||||
- zz-test
|
||||
|
||||
zookeeper:
|
||||
image: zookeeper:3.5.5
|
||||
ports:
|
||||
- "2181:2181"
|
||||
volumes:
|
||||
- ./logs/zookeeper/:/data
|
||||
- ./data/zookeeper/:/datalog
|
||||
networks:
|
||||
- zz-test
|
||||
|
||||
kafka:
|
||||
image: wurstmeister/kafka:2.12-2.4.0
|
||||
ports:
|
||||
- "9092:9092"
|
||||
expose:
|
||||
- "9093"
|
||||
environment:
|
||||
KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9093,OUTSIDE://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
|
||||
KAFKA_LISTENERS: INSIDE://0.0.0.0:9093,OUTSIDE://0.0.0.0:9092
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
|
||||
volumes:
|
||||
- ./data/kafka/:/kafka
|
||||
networks:
|
||||
- zz-test
|
||||
depends_on:
|
||||
- zookeeper
|
||||
|
||||
sentinel:
|
||||
image: bladex/sentinel-dashboard
|
||||
ports:
|
||||
- "8858:8858"
|
||||
networks:
|
||||
- zz-test
|
||||
|
||||
networks:
|
||||
zz-test:
|
||||
driver: bridge
|
||||
@@ -0,0 +1,3 @@
|
||||
ARG KAFKA_VER
|
||||
|
||||
FROM wurstmeister/kafka:${KAFKA_VER}
|
||||
@@ -0,0 +1,19 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name=local-console-sink
|
||||
connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector
|
||||
tasks.max=1
|
||||
topics=connect-test
|
||||
@@ -0,0 +1,19 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name=local-console-source
|
||||
connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector
|
||||
tasks.max=1
|
||||
topic=connect-test
|
||||
@@ -0,0 +1,86 @@
|
||||
##
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
##
|
||||
|
||||
# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended
|
||||
# to be used with the examples, and some settings may differ from those used in a production system, especially
|
||||
# the `bootstrap.servers` and those specifying replication factors.
|
||||
|
||||
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
|
||||
bootstrap.servers=localhost:9092
|
||||
|
||||
# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
|
||||
group.id=connect-cluster
|
||||
|
||||
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
|
||||
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
|
||||
key.converter=org.apache.kafka.connect.json.JsonConverter
|
||||
value.converter=org.apache.kafka.connect.json.JsonConverter
|
||||
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
|
||||
# it to
|
||||
key.converter.schemas.enable=true
|
||||
value.converter.schemas.enable=true
|
||||
|
||||
# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
|
||||
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
|
||||
# the topic before starting Kafka Connect if a specific topic configuration is needed.
|
||||
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
|
||||
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
|
||||
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
|
||||
offset.storage.topic=connect-offsets
|
||||
offset.storage.replication.factor=1
|
||||
#offset.storage.partitions=25
|
||||
|
||||
# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
|
||||
# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
|
||||
# the topic before starting Kafka Connect if a specific topic configuration is needed.
|
||||
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
|
||||
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
|
||||
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
|
||||
config.storage.topic=connect-configs
|
||||
config.storage.replication.factor=1
|
||||
|
||||
# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
|
||||
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
|
||||
# the topic before starting Kafka Connect if a specific topic configuration is needed.
|
||||
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
|
||||
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
|
||||
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
|
||||
status.storage.topic=connect-status
|
||||
status.storage.replication.factor=1
|
||||
#status.storage.partitions=5
|
||||
|
||||
# Flush much faster than normal, which is useful for testing/debugging
|
||||
offset.flush.interval.ms=10000
|
||||
|
||||
# These are provided to inform the user about the presence of the REST host and port configs
|
||||
# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests.
|
||||
#rest.host.name=
|
||||
#rest.port=8083
|
||||
|
||||
# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
|
||||
#rest.advertised.host.name=
|
||||
#rest.advertised.port=
|
||||
|
||||
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
|
||||
# (connectors, converters, transformations). The list should consist of top level directories that include
|
||||
# any combination of:
|
||||
# a) directories immediately containing jars with plugins and their dependencies
|
||||
# b) uber-jars with plugins and their dependencies
|
||||
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
|
||||
# Examples:
|
||||
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
|
||||
#plugin.path=
|
||||
@@ -0,0 +1,20 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name=local-file-sink
|
||||
connector.class=FileStreamSink
|
||||
tasks.max=1
|
||||
file=test.sink.txt
|
||||
topics=connect-test
|
||||
@@ -0,0 +1,20 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name=local-file-source
|
||||
connector.class=FileStreamSource
|
||||
tasks.max=1
|
||||
file=test.txt
|
||||
topic=connect-test
|
||||
@@ -0,0 +1,32 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
log4j.rootLogger=INFO, stdout
|
||||
|
||||
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
#
|
||||
# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information
|
||||
# in the log message, where appropriate. This makes it easier to identify those log messages that apply to a
|
||||
# specific connector. Simply add this parameter to the log layout configuration below to include the contextual information.
|
||||
#
|
||||
#log4j.appender.stdout.layout.ConversionPattern=[%d] %p %X{connector.context}%m (%c:%L)%n
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n
|
||||
|
||||
log4j.logger.org.apache.zookeeper=ERROR
|
||||
log4j.logger.org.I0Itec.zkclient=ERROR
|
||||
log4j.logger.org.reflections=ERROR
|
||||
@@ -0,0 +1,41 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# These are defaults. This file just demonstrates how to override some settings.
|
||||
bootstrap.servers=localhost:9092
|
||||
|
||||
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
|
||||
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
|
||||
key.converter=org.apache.kafka.connect.json.JsonConverter
|
||||
value.converter=org.apache.kafka.connect.json.JsonConverter
|
||||
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
|
||||
# it to
|
||||
key.converter.schemas.enable=true
|
||||
value.converter.schemas.enable=true
|
||||
|
||||
offset.storage.file.filename=/tmp/connect.offsets
|
||||
# Flush much faster than normal, which is useful for testing/debugging
|
||||
offset.flush.interval.ms=10000
|
||||
|
||||
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
|
||||
# (connectors, converters, transformations). The list should consist of top level directories that include
|
||||
# any combination of:
|
||||
# a) directories immediately containing jars with plugins and their dependencies
|
||||
# b) uber-jars with plugins and their dependencies
|
||||
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
|
||||
# Note: symlinks will be followed to discover dependencies or plugins.
|
||||
# Examples:
|
||||
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
|
||||
#plugin.path=
|
||||
@@ -0,0 +1,26 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# see org.apache.kafka.clients.consumer.ConsumerConfig for more details
|
||||
|
||||
# list of brokers used for bootstrapping knowledge about the rest of the cluster
|
||||
# format: host1:port1,host2:port2 ...
|
||||
bootstrap.servers=localhost:9092
|
||||
|
||||
# consumer group id
|
||||
group.id=test-consumer-group
|
||||
|
||||
# What to do when there is no initial offset in Kafka or if the current
|
||||
# offset does not exist any more on the server: latest, earliest, none
|
||||
#auto.offset.reset=
|
||||
@@ -0,0 +1,92 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
|
||||
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
|
||||
log4j.rootLogger=INFO, stdout, kafkaAppender
|
||||
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
|
||||
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
|
||||
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
|
||||
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
|
||||
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
|
||||
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
|
||||
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
|
||||
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
|
||||
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
|
||||
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
|
||||
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
|
||||
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
|
||||
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
# Change the two lines below to adjust ZK client logging
|
||||
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
|
||||
log4j.logger.org.apache.zookeeper=INFO
|
||||
|
||||
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
|
||||
log4j.logger.kafka=INFO
|
||||
log4j.logger.org.apache.kafka=INFO
|
||||
|
||||
# Change to DEBUG or TRACE to enable request logging
|
||||
log4j.logger.kafka.request.logger=WARN, requestAppender
|
||||
log4j.additivity.kafka.request.logger=false
|
||||
|
||||
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
|
||||
# related to the handling of requests
|
||||
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
|
||||
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
|
||||
#log4j.additivity.kafka.server.KafkaApis=false
|
||||
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
|
||||
log4j.additivity.kafka.network.RequestChannel$=false
|
||||
|
||||
log4j.logger.kafka.controller=TRACE, controllerAppender
|
||||
log4j.additivity.kafka.controller=false
|
||||
|
||||
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
|
||||
log4j.additivity.kafka.log.LogCleaner=false
|
||||
|
||||
log4j.logger.state.change.logger=TRACE, stateChangeAppender
|
||||
log4j.additivity.state.change.logger=false
|
||||
|
||||
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
|
||||
log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
|
||||
log4j.additivity.kafka.authorizer.logger=false
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# see org.apache.kafka.clients.producer.ProducerConfig for more details
|
||||
|
||||
############################# Producer Basics #############################
|
||||
|
||||
# list of brokers used for bootstrapping knowledge about the rest of the cluster
|
||||
# format: host1:port1,host2:port2 ...
|
||||
bootstrap.servers=localhost:9092
|
||||
|
||||
# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd
|
||||
compression.type=none
|
||||
|
||||
# name of the partitioner class for partitioning events; default partition spreads data randomly
|
||||
#partitioner.class=
|
||||
|
||||
# the maximum amount of time the client will wait for the response of a request
|
||||
#request.timeout.ms=
|
||||
|
||||
# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for
|
||||
#max.block.ms=
|
||||
|
||||
# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together
|
||||
#linger.ms=
|
||||
|
||||
# the maximum size of a request in bytes
|
||||
#max.request.size=
|
||||
|
||||
# the default batch size in bytes when batching multiple records sent to a partition
|
||||
#batch.size=
|
||||
|
||||
# the total bytes of memory the producer can use to buffer records waiting to be sent to the server
|
||||
#buffer.memory=
|
||||
@@ -0,0 +1,137 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# see kafka.server.KafkaConfig for additional details and defaults
|
||||
|
||||
############################# Server Basics #############################
|
||||
|
||||
# The id of the broker. This must be set to a unique integer for each broker.
|
||||
broker.id=0
|
||||
|
||||
############################# Socket Server Settings #############################
|
||||
|
||||
# The address the socket server listens on. It will get the value returned from
|
||||
# java.net.InetAddress.getCanonicalHostName() if not configured.
|
||||
# FORMAT:
|
||||
# listeners = listener_name://host_name:port
|
||||
# EXAMPLE:
|
||||
# listeners = PLAINTEXT://your.host.name:9092
|
||||
listeners=PLAINTEXT://0.0.0.0:9092
|
||||
|
||||
# Hostname and port the broker will advertise to producers and consumers. If not set,
|
||||
# it uses the value for "listeners" if configured. Otherwise, it will use the value
|
||||
# returned from java.net.InetAddress.getCanonicalHostName().
|
||||
advertised.listeners=PLAINTEXT://192.168.0.1:9092
|
||||
|
||||
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
|
||||
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
|
||||
|
||||
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
|
||||
num.network.threads=3
|
||||
|
||||
# The number of threads that the server uses for processing requests, which may include disk I/O
|
||||
num.io.threads=8
|
||||
|
||||
# The send buffer (SO_SNDBUF) used by the socket server
|
||||
socket.send.buffer.bytes=102400
|
||||
|
||||
# The receive buffer (SO_RCVBUF) used by the socket server
|
||||
socket.receive.buffer.bytes=102400
|
||||
|
||||
# The maximum size of a request that the socket server will accept (protection against OOM)
|
||||
socket.request.max.bytes=104857600
|
||||
|
||||
|
||||
############################# Log Basics #############################
|
||||
|
||||
# A comma separated list of directories under which to store log files
|
||||
log.dirs=/kafka/kafka-logs-7d53de35ca1e
|
||||
|
||||
# The default number of log partitions per topic. More partitions allow greater
|
||||
# parallelism for consumption, but this will also result in more files across
|
||||
# the brokers.
|
||||
num.partitions=1
|
||||
|
||||
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
|
||||
# This value is recommended to be increased for installations with data dirs located in RAID array.
|
||||
num.recovery.threads.per.data.dir=1
|
||||
|
||||
############################# Internal Topic Settings #############################
|
||||
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
|
||||
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
|
||||
offsets.topic.replication.factor=1
|
||||
transaction.state.log.replication.factor=1
|
||||
transaction.state.log.min.isr=1
|
||||
|
||||
############################# Log Flush Policy #############################
|
||||
|
||||
# Messages are immediately written to the filesystem but by default we only fsync() to sync
|
||||
# the OS cache lazily. The following configurations control the flush of data to disk.
|
||||
# There are a few important trade-offs here:
|
||||
# 1. Durability: Unflushed data may be lost if you are not using replication.
|
||||
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
|
||||
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
|
||||
# The settings below allow one to configure the flush policy to flush data after a period of time or
|
||||
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
|
||||
|
||||
# The number of messages to accept before forcing a flush of data to disk
|
||||
#log.flush.interval.messages=10000
|
||||
|
||||
# The maximum amount of time a message can sit in a log before we force a flush
|
||||
#log.flush.interval.ms=1000
|
||||
|
||||
############################# Log Retention Policy #############################
|
||||
|
||||
# The following configurations control the disposal of log segments. The policy can
|
||||
# be set to delete segments after a period of time, or after a given size has accumulated.
|
||||
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
|
||||
# from the end of the log.
|
||||
|
||||
# The minimum age of a log file to be eligible for deletion due to age
|
||||
log.retention.hours=168
|
||||
|
||||
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
|
||||
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
|
||||
#log.retention.bytes=1073741824
|
||||
|
||||
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
|
||||
log.segment.bytes=1073741824
|
||||
|
||||
# The interval at which log segments are checked to see if they can be deleted according
|
||||
# to the retention policies
|
||||
log.retention.check.interval.ms=300000
|
||||
|
||||
############################# Zookeeper #############################
|
||||
|
||||
# Zookeeper connection string (see zookeeper docs for details).
|
||||
# This is a comma separated host:port pairs, each corresponding to a zk
|
||||
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
|
||||
# You can also append an optional chroot string to the urls to specify the
|
||||
# root directory for all kafka znodes.
|
||||
zookeeper.connect=zookeeper:2181
|
||||
|
||||
# Timeout in ms for connecting to zookeeper
|
||||
zookeeper.connection.timeout.ms=6000
|
||||
|
||||
|
||||
############################# Group Coordinator Settings #############################
|
||||
|
||||
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
|
||||
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
|
||||
# The default value for this is 3 seconds.
|
||||
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
|
||||
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
|
||||
group.initial.rebalance.delay.ms=0
|
||||
port=9092
|
||||
@@ -0,0 +1,21 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
log4j.rootLogger=WARN, stderr
|
||||
|
||||
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
log4j.appender.stderr.Target=System.err
|
||||
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"_comment": [
|
||||
"Licensed to the Apache Software Foundation (ASF) under one or more",
|
||||
"contributor license agreements. See the NOTICE file distributed with",
|
||||
"this work for additional information regarding copyright ownership.",
|
||||
"The ASF licenses this file to You under the Apache License, Version 2.0",
|
||||
"(the \"License\"); you may not use this file except in compliance with",
|
||||
"the License. You may obtain a copy of the License at",
|
||||
"",
|
||||
"http://www.apache.org/licenses/LICENSE-2.0",
|
||||
"",
|
||||
"Unless required by applicable law or agreed to in writing, software",
|
||||
"distributed under the License is distributed on an \"AS IS\" BASIS,",
|
||||
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
|
||||
"See the License for the specific language governing permissions and",
|
||||
"limitations under the License."
|
||||
],
|
||||
"platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": {
|
||||
"node0": {
|
||||
"hostname": "localhost",
|
||||
"trogdor.agent.port": 8888,
|
||||
"trogdor.coordinator.port": 8889
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# the directory where the snapshot is stored.
|
||||
dataDir=/tmp/zookeeper
|
||||
# the port at which the clients will connect
|
||||
clientPort=2181
|
||||
# disable the per-ip limit on the number of connections since this is a non-production config
|
||||
maxClientCnxns=0
|
||||
@@ -0,0 +1,13 @@
|
||||
ARG REDIS_VER
|
||||
|
||||
FROM redis:${REDIS_VER}
|
||||
|
||||
COPY redis.conf /usr/local/etc/redis/redis.conf
|
||||
CMD ["redis-server", "/usr/local/etc/redis/redis.conf"]
|
||||
|
||||
# 设置时区为上海
|
||||
ENV TZ=Asia/Shanghai
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
# Ubuntu软件源选择中国的服务器
|
||||
RUN sed -i 's/archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,110 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
SPRING_PROFILES=release
|
||||
|
||||
### Pinpoint-Hbase
|
||||
|
||||
PINPOINT_HBASE_NAME=pinpoint-hbase
|
||||
#config for hbase in external docker
|
||||
EXTERNAL_HBASE_PORT=2180
|
||||
|
||||
### Pinpoint-mysql
|
||||
MYSQL_ROOT_PASSWORD=root123
|
||||
MYSQL_USER=admin
|
||||
MYSQL_PASSWORD=admin
|
||||
MYSQL_DATABASE=pinpoint
|
||||
|
||||
### Pinpoint-Web
|
||||
|
||||
PINPOINT_WEB_NAME=pinpoint-web
|
||||
|
||||
WEB_PAGE_PORT=8079
|
||||
|
||||
CLUSTER_ENABLE=true
|
||||
#CLUSTER_WEB_TCP_PORT=9997
|
||||
CLUSTER_ZOOKEEPER_ADDRESS=zoo1
|
||||
|
||||
ADMIN_PASSWORD=admin
|
||||
|
||||
ANALYTICS=true
|
||||
|
||||
HBASE_HOST=pinpoint-hbase
|
||||
HBASE_PORT=2181
|
||||
|
||||
WEB_DEBUG_LEVEL=INFO
|
||||
|
||||
BATCH_ENABLE=false
|
||||
BATCH_SERVER_IP=127.0.0.127
|
||||
BATCH_FLINK_SERVER=pinpoint-flink-jobmanager
|
||||
|
||||
CONFIG_SHOW_APPLICATIONSTAT=true
|
||||
|
||||
JDBC_DRIVER=com.mysql.jdbc.Driver
|
||||
JDBC_URL=jdbc:mysql://pinpoint-mysql:3306/pinpoint?characterEncoding=UTF-8
|
||||
JDBC_USERNAME=admin
|
||||
JDBC_PASSWORD=admin
|
||||
MAIL_HOST=stmp.test.com
|
||||
MAIL_PORT=123
|
||||
MAIL_USERNAME=user
|
||||
MAIL_PASSWORD=pass
|
||||
MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL=smtp
|
||||
MAIL_PROPERTIES_MAIL_SMTP_AUTH=true
|
||||
MAIL_PROPERTIES_MAIL_SMTP_PORT=587
|
||||
MAIL_PROPERTIES_MAIL_SMTP_FROM=test@example.com
|
||||
MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE=true
|
||||
MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED=true
|
||||
MAIL_PROPERTIES_MAIL_DEBUG=false
|
||||
|
||||
|
||||
### Pinpoint-Collector
|
||||
|
||||
PINPOINT_COLLECTOR_NAME=pinpoint-collector
|
||||
|
||||
COLLECTOR_RECEIVER_AGENT_PORT=9991
|
||||
COLLECTOR_RECEIVER_METADATA_PORT=9991
|
||||
COLLECTOR_RECEIVER_STAT_PORT=9992
|
||||
COLLECTOR_RECEIVER_SPAN_PORT=9993
|
||||
COLLECTOR_RECEIVER_BASE_PORT=9994
|
||||
COLLECTOR_RECEIVER_STAT_UDP_PORT=9995
|
||||
COLLECTOR_RECEIVER_SPAN_UDP_PORT=9996
|
||||
|
||||
FLINK_CLUSTER_ENABLE=true
|
||||
FLINK_CLUSTER_ZOOKEEPER_ADDRESS=zoo1
|
||||
|
||||
COLLECTOR_DEBUG_LEVEL=INFO
|
||||
|
||||
|
||||
### Pinpoint-Agent
|
||||
|
||||
PINPOINT_AGENT_NAME=pinpoint-agent
|
||||
|
||||
#network module(GRPC,THRIFT)
|
||||
PROFILER_TRANSPORT_MODULE=GRPC
|
||||
|
||||
#collector information required
|
||||
COLLECTOR_IP=pinpoint-collector
|
||||
PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT=9991
|
||||
PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT=9991
|
||||
PROFILER_TRANSPORT_STAT_COLLECTOR_PORT=9992
|
||||
PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT=9993
|
||||
COLLECTOR_TCP_PORT=9994
|
||||
COLLECTOR_STAT_PORT=9995
|
||||
COLLECTOR_SPAN_PORT=9996
|
||||
|
||||
# Set sampling rate. If you set it to N, 1 out of N transaction will be sampled.
|
||||
PROFILER_SAMPLING_RATE=1
|
||||
|
||||
AGENT_ID=app-in-docker
|
||||
APP_NAME=quickapp
|
||||
|
||||
AGENT_DEBUG_LEVEL=INFO
|
||||
|
||||
|
||||
### Pinpoint-flink
|
||||
|
||||
PINPOINT_FLINK_NAME=pinpoint-flink
|
||||
FLINK_WEB_PORT=8099
|
||||
|
||||
|
||||
### Pinpoint-quickstart
|
||||
|
||||
APP_PORT=8000
|
||||
1
orange-demo-multi/orange-demo-multi-service/zz-resource/pinpoint-docker-master/.gitignore
vendored
Normal file
1
orange-demo-multi/orange-demo-multi-service/zz-resource/pinpoint-docker-master/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.idea/
|
||||
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -0,0 +1,175 @@
|
||||
|
||||

|
||||
|
||||
# Pinpoint-Docker for Pinpoint
|
||||
|
||||
Official git repository of Dockerized components of the [Pinpoint Application Monitoring](http://naver.github.io/pinpoint/).
|
||||
Installing Pinpoint with these docker files will take approximately 10min. to check out the features of pinpoint.
|
||||
|
||||
## What is Pinpoint
|
||||
|
||||
[Pinpoint](https://github.com/naver/pinpoint), is the world's leading open-source application monitoring solution - trusted by millions of users around the globe.
|
||||
It supports and helps you understand your application in a glance and allow you to build world-class, high-quality software.
|
||||
|
||||
## Supported Tags
|
||||
|
||||
- 2.0.1 (under going)
|
||||
- 1.8.5
|
||||
- 1.8.4
|
||||
- 1.8.3
|
||||
- 1.8.2
|
||||
- 1.8.1
|
||||
- 1.8.0
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
|
||||
## Requirements
|
||||
|
||||
- [docker 18.02.0+](https://docs.docker.com/compose/compose-file/)
|
||||
|
||||
## How to install Pinpoint?
|
||||
|
||||
You can easily bring up an entire Dockerized Pinpoint(latest release) environment by using [Docker Compose](https://docs.docker.com/compose/) with any of the provided `docker-compose.yml` files as below.
|
||||
With `docker-compose.yml` under *Pinpoint-Docker* folder brings up all the environment attached with Pinpoint-QuickStart(sample app).
|
||||
To monitor your agent see [configuration part](#configurations) for further details.
|
||||
|
||||
```
|
||||
git clone https://github.com/naver/pinpoint-docker.git
|
||||
cd pinpoint-docker
|
||||
docker-compose pull && docker-compose up -d
|
||||
```
|
||||
If you'd like to bring up a previous release. Try with docker-compose file from other tags.
|
||||
|
||||
```
|
||||
git clone https://github.com/naver/pinpoint-docker.git
|
||||
git checkout {tag}
|
||||
cd pinpoint-docker
|
||||
docker-compose pull && docker-compose up -d
|
||||
```
|
||||
|
||||
You can also build the image with `docker-compose up -d` command without pulling the image. But you can reduce the time to 1/3 by just downloading them.
|
||||
|
||||
This will install and run all services required to run all features in Pinpoint in docker containers joined with same network.
|
||||
- Pinpoint-Web Server
|
||||
- Pinpoint-Collector
|
||||
- Pinpoint-Agent
|
||||
- Pinpoint-Flink(to support certain feature)
|
||||
- Pinpoint-Zookeeper
|
||||
- Pinpoint-Hbase
|
||||
- Pinpoint-QuickStart(a sample application, 1.8.1+)
|
||||
- Pinpoint-Mysql(to support certain feature)
|
||||
This may take several minutes to download all necessary images.
|
||||
|
||||
You can replace `QuickStart` application part with your application to start monitoring.
|
||||
- check [`Testing QuickStart application`](#testing-quickstart-application) for a quick demo of pinpoint
|
||||
- check [`Monitoring YOUR Application`](#monitoring-your-application) part for further details
|
||||
|
||||
### Mysql (optional, 1.8.1+)
|
||||
|
||||
The Pinpoint-Mysql is necessary to use 'Alarm' feature. It's server is running on port 13306 and contains the data structure used to register users, groups, and alerts to be sent.
|
||||
|
||||
To send email alerts, you must make BATCH_ENABLE=true and change the other mail-related environment variables (MAIL_HOST, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD, MAIL_PROPERTIES_MAIL_SMTP_FROM, ...) to the Pinpoint-Web server in *.env* file.
|
||||
|
||||
For more information checkout [Setting Alarm](http://naver.github.io/pinpoint/alarm.html) in Pinpoint documentation.
|
||||
|
||||
### Flink configuration (optional)
|
||||
|
||||
The Pinpoint-Flink is necessary to use ['Application Inspector'](http://naver.github.io/pinpoint/applicationinspector.html) feature.
|
||||
|
||||
After all containers are started and ready to go. There is one more thing to do to use this features in Pinpoint.
|
||||
This feature is not mandatory, but since enabling this is a simple task, let's take care of it.
|
||||
|
||||
Register a `job` on to pinpoint-flink server.
|
||||
You can build the `job` from the [open-source of Pinpoint](https://github.com/naver/pinpoint), additional guide is [here](https://github.com/naver/pinpoint/blob/master/doc/application-inspector.md#application-inspector).
|
||||
|
||||
If anyone have solution to put the job file into flink image without doing manually, please let us know.
|
||||
|
||||
Pinpoint-Flink server is running on [port 8081](http://localhost:8081/#/submit). From `submit new job` menu
|
||||
Submit the jar file with *com.navercorp.pinpoint.flink.StatStreamingVer2Job* in entry class as below image.
|
||||
|
||||

|
||||
|
||||
### Testing QuickStart application
|
||||
|
||||
Now you are ready to monitor the sample application(Pinpoint-QuickStart [port 8000](http://localhost:8000)) provided.
|
||||
If you can't find any connected application from Pinpoint-Web's first page([port 8079](http://localhost:8079) as default), don't panic and wait for a while.
|
||||
It will take some time for Pinpoint to retrieve the application's information when running for the first time.
|
||||
|
||||
## Monitoring YOUR Application
|
||||
|
||||
Pinpoint-Agent only prepares required libraries for triggering Pinpoint-Agent.
|
||||
Running and configuring agents is manual action done by the user, but don't worry it's very simple.
|
||||
|
||||
If you are not familiar with Pinpoint concept, please read: [Overview](http://naver.github.io/pinpoint/overview.html#architecture),
|
||||
[Agents Installation](http://naver.github.io/pinpoint/installation.html#5-pinpoint-agent)
|
||||
|
||||
**You will need to attach *Pinpoint-Agent* to your application.**
|
||||
|
||||
Running Pinpoint-Agent docker-compose separately, Examples are [here](https://github.com/naver/pinpoint-docker/tree/master/pinpoint-agent-attach-example).
|
||||
Otherwise, you can check how [Pinpoint-Quickstart](https://github.com/naver/pinpoint-docker/blob/master/docker-compose.yml) is attached to Pinpoint-Agent with docker-compose.
|
||||
|
||||
We'll try to create more examples along the way.
|
||||
If anyone who can share their dockerfile, it's always welcome.
|
||||
|
||||
## Distributed System
|
||||
|
||||
Until now, every components are in one docker, single-node approach, which is excellent for test and development.
|
||||
It provides an easy way to prototype new ideas and use cases, as well as try out new functionality and the latest Pinpoint releases.
|
||||
It’s not intended nor supported for production use.
|
||||
|
||||
You can use `docker-compose` and `.env` files under each folder to install the modules separately into several servers.
|
||||
If containers are separated, ip configurations in `.env` must be changed within.
|
||||
|
||||
For example, if you want your application running from a docker and rest of Pinpoint in another.
|
||||
You can remove *pinpoint-agent* and *pinpoint-quickstart* from docker-compose.yml and run to establish all necessary component of pinpoint.
|
||||
And create another docker-compose.yml just like one under pinpoint-quickstart folder to run your application.
|
||||
Finally, since agent needs to acknowledge the collector ip. collector ip needs to be changed in .env.
|
||||
|
||||
## Configurations
|
||||
|
||||
Configuration relies on supplying `docker-compose` with environment variables defined in `.env` file. So it's recommended to change variables only from `.env` file.
|
||||
With `docker-compose` in this repository. You can create stand-alone containers that are needed to run most of the features in Pinpoint.
|
||||
|
||||
**Ports** can be also configured in .env file.
|
||||
(Default ports are Pinpoint-Web:8079, Quickstart:8000 and Flink:8081 as configured in .env file)
|
||||
|
||||
Pinpoint-Zookeeper is just an example of using zookeeper image. You can modify docker-compose files to suit your needs.
|
||||
|
||||
For more specific details on what the values represents in *.env* file. Please check [Pinpoint Github Repository](https://github.com/naver/pinpoint) or
|
||||
[Pinpoint Web properties](https://github.com/naver/pinpoint/blob/master/web/src/main/resources/pinpoint-web.properties), [Pinpoint Collector properties](https://github.com/naver/pinpoint/blob/master/collector/src/main/resources/pinpoint-collector.properties), [Pinpoint Agent configuration](https://github.com/naver/pinpoint/blob/master/agent/src/main/resources-release/pinpoint.config).
|
||||
Please note that only essential configuration options are adopted to pinpoint-docker(docker-compose).
|
||||
|
||||
## logs
|
||||
|
||||
You can check logs produced by these services
|
||||
```
|
||||
docker logs <containerId>
|
||||
```
|
||||
|
||||
You can also easily change the log level from *.env* file.
|
||||
|
||||
## Any Issues or Suggestions?
|
||||
|
||||
Feel free to share any problems and suggestions via [Pinpoint GitHub Issue page](https://github.com/naver/pinpoint/issues).
|
||||
Contributions on the pinpoint-docker image is also always welcome.
|
||||
|
||||
## License
|
||||
Pinpoint is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](https://github.com/naver/pinpoint/blob/master/LICENSE) for full license text.
|
||||
|
||||
```
|
||||
Copyright 2018 NAVER Corp.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
```
|
||||
|
||||
@@ -0,0 +1,261 @@
|
||||
version: "3.6"
|
||||
|
||||
services:
|
||||
pinpoint-hbase:
|
||||
build:
|
||||
context: ./pinpoint-hbase/
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_HBASE_NAME}"
|
||||
image: "pinpointdocker/pinpoint-hbase:${PINPOINT_VERSION}"
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
volumes:
|
||||
- /home/pinpoint/hbase
|
||||
- /home/pinpoint/zookeeper
|
||||
expose:
|
||||
# zookeeper
|
||||
- "2181"
|
||||
# HBase Master API port
|
||||
- "60000"
|
||||
# HBase Master Web UI
|
||||
- "16010"
|
||||
# Regionserver API port
|
||||
- "60020"
|
||||
# HBase Regionserver web UI
|
||||
- "16030"
|
||||
ports:
|
||||
- "${EXTERNAL_HBASE_PORT:-2181}:2181"
|
||||
- "60000:60000"
|
||||
- "16010:16010"
|
||||
- "60020:60020"
|
||||
- "16030:16030"
|
||||
restart: always
|
||||
|
||||
pinpoint-mysql:
|
||||
build:
|
||||
context: ./pinpoint-mysql/
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: pinpoint-mysql
|
||||
restart: always
|
||||
image: "pinpointdocker/pinpoint-mysql:${PINPOINT_VERSION}"
|
||||
hostname: pinpoint-mysql
|
||||
ports:
|
||||
- "13306:3306"
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
|
||||
- MYSQL_USER=${MYSQL_USER}
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
- MYSQL_DATABASE=${MYSQL_DATABASE}
|
||||
|
||||
volumes:
|
||||
- mysql_data:/var/lib/mysql
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
pinpoint-web:
|
||||
build:
|
||||
context: ./pinpoint-web/
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_WEB_NAME}"
|
||||
image: "pinpointdocker/pinpoint-web:${PINPOINT_VERSION}"
|
||||
|
||||
depends_on:
|
||||
- pinpoint-hbase
|
||||
- pinpoint-mysql
|
||||
restart: always
|
||||
expose:
|
||||
- "8080"
|
||||
- "9997"
|
||||
ports:
|
||||
- "9997:9997"
|
||||
- "${WEB_PAGE_PORT:-8080}:8080"
|
||||
environment:
|
||||
- JAVA_OPTS="-Dspring.profiles.active=${SPRING_PROFILES}"
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
- CLUSTER_ENABLE=${CLUSTER_ENABLE}
|
||||
- CLUSTER_ZOOKEEPER_ADDRESS=${CLUSTER_ZOOKEEPER_ADDRESS}
|
||||
- ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||||
- ANALYTICS=${ANALYTICS}
|
||||
- HBASE_HOST=${HBASE_HOST}
|
||||
- HBASE_PORT=${HBASE_PORT}
|
||||
- DEBUG_LEVEL=${WEB_DEBUG_LEVEL}
|
||||
- CONFIG_SHOW_APPLICATIONSTAT=${CONFIG_SHOW_APPLICATIONSTAT}
|
||||
- BATCH_ENABLE=${BATCH_ENABLE}
|
||||
- BATCH_SERVER_IP=${BATCH_SERVER_IP}
|
||||
- BATCH_FLINK_SERVER=${BATCH_FLINK_SERVER}
|
||||
- JDBC_DRIVER=${JDBC_DRIVER}
|
||||
- JDBC_URL=${JDBC_URL}
|
||||
- JDBC_USERNAME=${JDBC_USERNAME}
|
||||
- JDBC_PASSWORD=${JDBC_PASSWORD}
|
||||
- MAIL_HOST=${MAIL_HOST}
|
||||
- MAIL_PORT=${MAIL_PORT}
|
||||
- MAIL_USERNAME=${MAIL_USERNAME}
|
||||
- MAIL_PASSWORD=${MAIL_PASSWORD}
|
||||
- MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL=${MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL}
|
||||
- MAIL_PROPERTIES_MAIL_SMTP_AUTH=${MAIL_PROPERTIES_MAIL_SMTP_AUTH}
|
||||
- MAIL_PROPERTIES_MAIL_SMTP_PORT=${MAIL_PROPERTIES_MAIL_SMTP_PORT}
|
||||
- MAIL_PROPERTIES_MAIL_SMTP_FROM=${MAIL_PROPERTIES_MAIL_SMTP_FROM}
|
||||
- MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE=${MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE}
|
||||
- MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED=${MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED}
|
||||
- MAIL_PROPERTIES_MAIL_DEBUG=${MAIL_PROPERTIES_MAIL_DEBUG}
|
||||
links:
|
||||
- "pinpoint-mysql:pinpoint-mysql"
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
pinpoint-collector:
|
||||
build:
|
||||
context: ./pinpoint-collector/
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_COLLECTOR_NAME}"
|
||||
image: "pinpointdocker/pinpoint-collector:${PINPOINT_VERSION}"
|
||||
|
||||
depends_on:
|
||||
- pinpoint-hbase
|
||||
restart: always
|
||||
expose:
|
||||
- "9991"
|
||||
- "9992"
|
||||
- "9993"
|
||||
- "9994"
|
||||
- "9995"
|
||||
- "9996"
|
||||
ports:
|
||||
- "${COLLECTOR_RECEIVER_AGENT_PORT:-9991}:9991/udp"
|
||||
- "${COLLECTOR_RECEIVER_METADATA_PORT:-9991}:9991/tcp"
|
||||
- "${COLLECTOR_RECEIVER_STAT_PORT:-9992}:9992/tcp"
|
||||
- "${COLLECTOR_RECEIVER_SPAN_PORT:-9993}:9993/tcp"
|
||||
- "${COLLECTOR_RECEIVER_BASE_PORT:-9994}:9994"
|
||||
- "${COLLECTOR_RECEIVER_STAT_UDP_PORT:-9995}:9995/tcp"
|
||||
- "${COLLECTOR_RECEIVER_SPAN_UDP_PORT:-9996}:9996/tcp"
|
||||
- "${COLLECTOR_RECEIVER_STAT_UDP_PORT:-9995}:9995/udp"
|
||||
- "${COLLECTOR_RECEIVER_SPAN_UDP_PORT:-9996}:9996/udp"
|
||||
|
||||
networks:
|
||||
- pinpoint
|
||||
environment:
|
||||
- JAVA_OPTS="-Dspring.profiles.active=${SPRING_PROFILES}"
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
- CLUSTER_ENABLE=${CLUSTER_ENABLE}
|
||||
- CLUSTER_ZOOKEEPER_ADDRESS=${CLUSTER_ZOOKEEPER_ADDRESS}
|
||||
- HBASE_HOST=${HBASE_HOST}
|
||||
- HBASE_PORT=${HBASE_PORT}
|
||||
- FLINK_CLUSTER_ENABLE=${FLINK_CLUSTER_ENABLE}
|
||||
- FLINK_CLUSTER_ZOOKEEPER_ADDRESS=${FLINK_CLUSTER_ZOOKEEPER_ADDRESS}
|
||||
- DEBUG_LEVEL=${COLLECTOR_DEBUG_LEVEL}
|
||||
|
||||
pinpoint-agent:
|
||||
build:
|
||||
context: ./pinpoint-agent/
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_AGENT_NAME}"
|
||||
image: "pinpointdocker/pinpoint-agent:${PINPOINT_VERSION}"
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
- pinpoint
|
||||
volumes:
|
||||
- data-volume:/pinpoint-agent
|
||||
environment:
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
- COLLECTOR_IP=${COLLECTOR_IP}
|
||||
- PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT=${PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT}
|
||||
- PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT=${PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT}
|
||||
- PROFILER_TRANSPORT_STAT_COLLECTOR_PORT=${PROFILER_TRANSPORT_STAT_COLLECTOR_PORT}
|
||||
- PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT=${PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT}
|
||||
- PROFILER_SAMPLING_RATE=${PROFILER_SAMPLING_RATE}
|
||||
- DEBUG_LEVEL=${AGENT_DEBUG_LEVEL}
|
||||
- PROFILER_TRANSPORT_MODULE=${PROFILER_TRANSPORT_MODULE}
|
||||
depends_on:
|
||||
- pinpoint-collector
|
||||
|
||||
#zookeepers
|
||||
zoo1:
|
||||
image: zookeeper:3.4
|
||||
restart: always
|
||||
hostname: zoo1
|
||||
environment:
|
||||
ZOO_MY_ID: 1
|
||||
ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
zoo2:
|
||||
image: zookeeper:3.4
|
||||
restart: always
|
||||
hostname: zoo2
|
||||
environment:
|
||||
ZOO_MY_ID: 2
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
zoo3:
|
||||
image: zookeeper:3.4
|
||||
restart: always
|
||||
hostname: zoo3
|
||||
environment:
|
||||
ZOO_MY_ID: 3
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
##flink
|
||||
jobmanager:
|
||||
container_name: "${PINPOINT_FLINK_NAME}-jobmanager"
|
||||
image: flink:1.3.1
|
||||
expose:
|
||||
- "6123"
|
||||
ports:
|
||||
- "${FLINK_WEB_PORT:-8099}:8099"
|
||||
command: jobmanager
|
||||
environment:
|
||||
- JOB_MANAGER_RPC_ADDRESS=jobmanager
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
taskmanager:
|
||||
container_name: "${PINPOINT_FLINK_NAME}-taskmanager"
|
||||
image: flink:1.3.1
|
||||
expose:
|
||||
- "6121"
|
||||
- "6122"
|
||||
- "19994"
|
||||
ports:
|
||||
- "6121:6121"
|
||||
- "6122:6122"
|
||||
- "19994:19994"
|
||||
depends_on:
|
||||
- jobmanager
|
||||
command: taskmanager
|
||||
links:
|
||||
- "jobmanager:jobmanager"
|
||||
environment:
|
||||
- JOB_MANAGER_RPC_ADDRESS=jobmanager
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
volumes:
|
||||
data-volume:
|
||||
mysql_data:
|
||||
|
||||
networks:
|
||||
pinpoint:
|
||||
driver: bridge
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 346 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 3.7 KiB |
@@ -0,0 +1,5 @@
|
||||
### Pinpoint with Java
|
||||
PINPOINT_VERSION=2.0.1
|
||||
SPRING_PROFILES=release
|
||||
AGENT_ID=app-in-docker
|
||||
APP_NAME=quickapp
|
||||
@@ -0,0 +1,15 @@
|
||||
FROM java:8
|
||||
|
||||
LABEL maintainer="Roy Kim <roy.kim@navercorp.com>"
|
||||
|
||||
ARG PINPOINT_VERSION
|
||||
ARG AGENT_ID
|
||||
ARG APP_NAME
|
||||
ENV JAVA_OPTS="-javaagent:/pinpoint-agent/pinpoint-bootstrap-${PINPOINT_VERSION}.jar -Dpinpoint.agentId=${AGENT_ID} -Dpinpoint.applicationName=${APP_NAME} -Dspring.profiles.active=${SPRING_PROFILES}"
|
||||
|
||||
RUN mkdir -p javasample
|
||||
COPY build/Sample.java javasample
|
||||
WORKDIR javasample
|
||||
|
||||
RUN javac Sample.java
|
||||
CMD java ${JAVA_OPTS} Sample
|
||||
@@ -0,0 +1,14 @@
|
||||
public class Sample {
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println("Sample Java Start");
|
||||
try {
|
||||
while (true) {
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
}
|
||||
catch (java.lang.InterruptedException e) {
|
||||
System.out.println("Test Finished");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
version: "2.1"
|
||||
|
||||
services:
|
||||
java:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
- AGENT_ID=${AGENT_ID}
|
||||
- APP_NAME=${APP_NAME}
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
container_name: "java"
|
||||
image: "Your Image"
|
||||
|
||||
volumes_from:
|
||||
- container:pinpoint-agent
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: pinpoint-agent_pinpoint
|
||||
@@ -0,0 +1,6 @@
|
||||
### Pinpoint with Tomcat
|
||||
PINPOINT_VERSION=2.0.1
|
||||
SPRING_PROFILES=release
|
||||
APP_PORT=8000
|
||||
AGENT_ID=app-in-docker
|
||||
APP_NAME=quickapp
|
||||
@@ -0,0 +1,22 @@
|
||||
version: "2.1"
|
||||
|
||||
services:
|
||||
tomcat:
|
||||
#build if needed
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile
|
||||
container_name: "tomcat"
|
||||
image: "Your Image"
|
||||
ports:
|
||||
- "${APP_PORT:-8080}:8080"
|
||||
volumes_from:
|
||||
- container:pinpoint-agent
|
||||
environment:
|
||||
JAVA_OPTS: "-javaagent:/pinpoint-agent/pinpoint-bootstrap-${PINPOINT_VERSION}.jar -Dpinpoint.agentId=${AGENT_ID} -Dpinpoint.applicationName=${APP_NAME} -Dspring.profiles.active=${SPRING_PROFILES}"
|
||||
command: catalina.sh run
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: pinpoint-agent_pinpoint
|
||||
@@ -0,0 +1,27 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
SPRING_PROFILES=release
|
||||
|
||||
### Pinpoint-Agent
|
||||
|
||||
PINPOINT_AGENT_NAME=pinpoint-agent
|
||||
|
||||
#network module(GRPC,THRIFT)
|
||||
PROFILER_TRANSPORT_MODULE=GRPC
|
||||
|
||||
#collector information required
|
||||
COLLECTOR_IP=
|
||||
PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT=9991
|
||||
PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT=9991
|
||||
PROFILER_TRANSPORT_STAT_COLLECTOR_PORT=9992
|
||||
PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT=9993
|
||||
COLLECTOR_TCP_PORT=9994
|
||||
COLLECTOR_STAT_PORT=9995
|
||||
COLLECTOR_SPAN_PORT=9996
|
||||
|
||||
# Set sampling rate. If you set it to N, 1 out of N transaction will be sampled.
|
||||
PROFILER_SAMPLING_RATE=1
|
||||
|
||||
AGENT_ID=app-in-docker
|
||||
APP_NAME=quickapp
|
||||
|
||||
AGENT_DEBUG_LEVEL=INFO
|
||||
@@ -0,0 +1,24 @@
|
||||
FROM alpine:3.7
|
||||
|
||||
LABEL maintainer="Roy Kim <roy.kim@navercorp.com>"
|
||||
|
||||
ARG PINPOINT_VERSION=${PINPOINT_VERSION:-2.0.1}
|
||||
ARG INSTALL_URL=https://github.com/naver/pinpoint/releases/download/v${PINPOINT_VERSION}/pinpoint-agent-${PINPOINT_VERSION}.tar.gz
|
||||
|
||||
COPY /build/scripts/configure-agent.sh /usr/local/bin/
|
||||
|
||||
RUN apk add --update curl bash \
|
||||
&& chmod a+x /usr/local/bin/configure-agent.sh \
|
||||
&& mkdir -p /pinpoint-agent \
|
||||
&& chmod -R o+x /pinpoint-agent \
|
||||
&& curl -SL ${INSTALL_URL} -o pinpoint-agent.tar.gz \
|
||||
&& gunzip pinpoint-agent.tar.gz \
|
||||
&& tar -xf pinpoint-agent.tar --strip 1 -C /pinpoint-agent \
|
||||
&& rm pinpoint-agent.tar \
|
||||
&& apk del curl \
|
||||
&& rm /var/cache/apk/*
|
||||
|
||||
VOLUME ["/pinpoint-agent"]
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/configure-agent.sh"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
@@ -0,0 +1,44 @@
|
||||
|
||||
## Pinpoint Agent
|
||||
|
||||
This Docker image contains the Pinpoint Agent component of the Pinpoint application monitoring system.
|
||||
|
||||
## Supported Tags
|
||||
|
||||
- 2.0.1
|
||||
- 1.8.5
|
||||
- 1.8.4
|
||||
- 1.8.3
|
||||
- 1.8.2
|
||||
- 1.8.1
|
||||
- 1.8.0
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
|
||||
Please see [Pinpoint-Docker GitHub repository](https://github.com/naver/pinpoint-docker) for further information on how to run, configure and build this image.
|
||||
|
||||
## Any Issues or Suggestions?
|
||||
|
||||
Feel free to share any problems and suggestions via [Pinpoint GitHub Issue page](https://github.com/naver/pinpoint/issues).
|
||||
Contributions on the pinpoint-docker image is also always welcome.
|
||||
|
||||
## License
|
||||
|
||||
Pinpoint is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](https://github.com/naver/pinpoint/blob/master/LICENSE) for full license text.
|
||||
|
||||
```
|
||||
Copyright 2018 NAVER Corp.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
```
|
||||
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
sed -i "/profiler.transport.module=/ s/=.*/=${PROFILER_TRANSPORT_MODULE}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.transport.module=/ s/=.*/=${PROFILER_TRANSPORT_MODULE}/" /pinpoint-agent/profiles/${SPRING_PROFILES}/pinpoint-env.config
|
||||
|
||||
sed -i "/profiler.collector.ip=/ s/=.*/=${COLLECTOR_IP}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.collector.tcp.port=/ s/=.*/=${COLLECTOR_TCP_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.collector.stat.port=/ s/=.*/=${COLLECTOR_STAT_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.collector.span.port=/ s/=.*/=${COLLECTOR_SPAN_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
|
||||
sed -i "/profiler.transport.grpc.collector.ip=/ s/=.*/=${COLLECTOR_IP}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.transport.grpc.collector.ip=/ s/=.*/=${COLLECTOR_IP}/" /pinpoint-agent/profiles/${SPRING_PROFILES}/pinpoint-env.config
|
||||
sed -i "/profiler.transport.grpc.agent.collector.port=/ s/=.*/=${PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.transport.grpc.metadata.collector.port=/ s/=.*/=${PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.transport.grpc.stat.collector.port=/ s/=.*/=${PROFILER_TRANSPORT_STAT_COLLECTOR_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.transport.grpc.span.collector.port=/ s/=.*/=${PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT}/" /pinpoint-agent/pinpoint.config
|
||||
sed -i "/profiler.sampling.rate=/ s/=.*/=${PROFILER_SAMPLING_RATE}/" /pinpoint-agent/profiles/${SPRING_PROFILES}/pinpoint-env.config
|
||||
|
||||
sed -i "/level value=/ s/=.*/=\"${DEBUG_LEVEL}\"\/>/g" /pinpoint-agent/profiles/${SPRING_PROFILES}/log4j.xml
|
||||
|
||||
#ln -s /pinpoint-agent /pinpoint-agent
|
||||
|
||||
exec "$@"
|
||||
@@ -0,0 +1,40 @@
|
||||
version: "3.6"
|
||||
|
||||
#pinpoint-agent image will not usually be used alone
|
||||
services:
|
||||
pinpoint-agent:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_AGENT_NAME}"
|
||||
image: "pinpointdocker/pinpoint-agent:${PINPOINT_VERSION}"
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
- pinpoint-agent
|
||||
environment:
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
- COLLECTOR_IP=${COLLECTOR_IP}
|
||||
- PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT=${PROFILER_TRANSPORT_AGENT_COLLECTOR_PORT}
|
||||
- PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT=${PROFILER_TRANSPORT_METADATA_COLLECTOR_PORT}
|
||||
- PROFILER_TRANSPORT_STAT_COLLECTOR_PORT=${PROFILER_TRANSPORT_STAT_COLLECTOR_PORT}
|
||||
- PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT=${PROFILER_TRANSPORT_SPAN_COLLECTOR_PORT}
|
||||
- PROFILER_SAMPLING_RATE=${PROFILER_SAMPLING_RATE}
|
||||
- DEBUG_LEVEL=${AGENT_DEBUG_LEVEL}
|
||||
- PROFILER_TRANSPORT_MODULE=${PROFILER_TRANSPORT_MODULE}
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
#networks:
|
||||
# default:
|
||||
# external:
|
||||
# name: pinpoint
|
||||
|
||||
networks:
|
||||
pinpoint:
|
||||
driver: bridge
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
SPRING_PROFILES=release
|
||||
|
||||
### Pinpoint-Collector
|
||||
|
||||
CLUSTER_ENABLE=true
|
||||
#zookeeper information required
|
||||
CLUSTER_ZOOKEEPER_ADDRESS=
|
||||
|
||||
#hbase information required
|
||||
HBASE_HOST=
|
||||
HBASE_PORT=
|
||||
|
||||
PINPOINT_COLLECTOR_NAME=pinpoint-collector
|
||||
|
||||
COLLECTOR_RECEIVER_AGENT_PORT=9991
|
||||
COLLECTOR_RECEIVER_METADATA_PORT=9991
|
||||
COLLECTOR_RECEIVER_STAT_PORT=9992
|
||||
COLLECTOR_RECEIVER_SPAN_PORT=9993
|
||||
COLLECTOR_RECEIVER_BASE_PORT=9994
|
||||
COLLECTOR_RECEIVER_STAT_UDP_PORT=9995
|
||||
COLLECTOR_RECEIVER_SPAN_UDP_PORT=9996
|
||||
|
||||
FLINK_CLUSTER_ENABLE=true
|
||||
FLINK_CLUSTER_ZOOKEEPER_ADDRESS=zoo1
|
||||
|
||||
COLLECTOR_DEBUG_LEVEL=INFO
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
FROM tomcat:8-jre8
|
||||
|
||||
LABEL maintainer="Roy Kim <roy.kim@navercorp.com>"
|
||||
|
||||
ARG PINPOINT_VERSION=${PINPOINT_VERSION:-2.0.1}
|
||||
|
||||
ARG INSTALL_URL=https://github.com/naver/pinpoint/releases/download/v${PINPOINT_VERSION}/pinpoint-collector-${PINPOINT_VERSION}.war
|
||||
|
||||
COPY /build/scripts/start-collector.sh /usr/local/bin/
|
||||
|
||||
RUN chmod a+x /usr/local/bin/start-collector.sh \
|
||||
&& curl -SL ${INSTALL_URL} -o pinpoint-collector.war \
|
||||
&& rm -rf /usr/local/tomcat/webapps \
|
||||
&& mkdir -p /usr/local/tomcat/webapps \
|
||||
&& unzip pinpoint-collector.war -d /usr/local/tomcat/webapps/ROOT \
|
||||
&& rm -rf pinpoint-collector.war \
|
||||
&& sed -i "s/8005/9005/g" /usr/local/tomcat/conf/server.xml \
|
||||
&& sed -i "s/8080/9080/g" /usr/local/tomcat/conf/server.xml \
|
||||
&& sed -i "s/8009/9009/g" /usr/local/tomcat/conf/server.xml \
|
||||
&& sed -i "s/8443/9443/g" /usr/local/tomcat/conf/server.xml
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/start-collector.sh"]
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
|
||||
## Pinpoint Collector
|
||||
|
||||
This Docker image contains the Pinpoint Collector component of the Pinpoint application monitoring system.
|
||||
|
||||
## Supported Tags
|
||||
|
||||
- 2.0.1
|
||||
- 1.8.5
|
||||
- 1.8.4
|
||||
- 1.8.3
|
||||
- 1.8.2
|
||||
- 1.8.1
|
||||
- 1.8.0
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
|
||||
Please see [Pinpoint-Docker GitHub repository](https://github.com/naver/pinpoint-docker) for further information on how to run, configure and build this image.
|
||||
|
||||
## Any Issues or Suggestions?
|
||||
|
||||
Feel free to share any problems and suggestions via [Pinpoint GitHub Issue page](https://github.com/naver/pinpoint/issues).
|
||||
Contributions on the pinpoint-docker image is also always welcome.
|
||||
|
||||
## License
|
||||
|
||||
Pinpoint is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](https://github.com/naver/pinpoint/blob/master/LICENSE) for full license text.
|
||||
|
||||
```
|
||||
Copyright 2018 NAVER Corp.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
```
|
||||
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
sed -i "/cluster.enable=/ s/=.*/=${CLUSTER_ENABLE}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-collector.properties
|
||||
sed -i "/cluster.zookeeper.address=/ s/=.*/=${CLUSTER_ZOOKEEPER_ADDRESS}/g" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-collector.properties
|
||||
sed -i "/flink.cluster.enable=/ s/=.*/=${FLINK_CLUSTER_ENABLE}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-collector.properties
|
||||
sed -i "/flink.cluster.zookeeper.address=/ s/=.*/=${FLINK_CLUSTER_ZOOKEEPER_ADDRESS}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-collector.properties
|
||||
|
||||
sed -i "/hbase.client.host=/ s/=.*/=${HBASE_HOST}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/profiles/${SPRING_PROFILES}/hbase-env.properties
|
||||
sed -i "/hbase.client.port=/ s/=.*/=${HBASE_PORT}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/profiles/${SPRING_PROFILES}/hbase-env.properties
|
||||
|
||||
sed -i "/level value=/ s/=.*/=\"${DEBUG_LEVEL}\"\/>/g" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/profiles/${SPRING_PROFILES}/log4j.xml
|
||||
|
||||
exec /usr/local/tomcat/bin/catalina.sh run
|
||||
@@ -0,0 +1,45 @@
|
||||
version: "3.6"
|
||||
|
||||
services:
|
||||
pinpoint-collector:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_COLLECTOR_NAME}"
|
||||
image: "pinpointdocker/pinpoint-collector:${PINPOINT_VERSION}"
|
||||
|
||||
restart: always
|
||||
expose:
|
||||
- "9991"
|
||||
- "9992"
|
||||
- "9993"
|
||||
- "9994"
|
||||
- "9995"
|
||||
- "9996"
|
||||
ports:
|
||||
- "${COLLECTOR_RECEIVER_AGENT_PORT:-9991}:9991/udp"
|
||||
- "${COLLECTOR_RECEIVER_METADATA_PORT:-9991}:9991/tcp"
|
||||
- "${COLLECTOR_RECEIVER_STAT_PORT:-9992}:9992/tcp"
|
||||
- "${COLLECTOR_RECEIVER_SPAN_PORT:-9993}:9993/tcp"
|
||||
- "${COLLECTOR_RECEIVER_BASE_PORT:-9994}:9994"
|
||||
- "${COLLECTOR_RECEIVER_STAT_UDP_PORT:-9995}:9995/tcp"
|
||||
- "${COLLECTOR_RECEIVER_SPAN_UDP_PORT:-9996}:9996/tcp"
|
||||
- "${COLLECTOR_RECEIVER_STAT_UDP_PORT:-9995}:9995/udp"
|
||||
- "${COLLECTOR_RECEIVER_SPAN_UDP_PORT:-9996}:9996/udp"
|
||||
|
||||
environment:
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
- CLUSTER_ENABLE=${CLUSTER_ENABLE}
|
||||
- CLUSTER_ZOOKEEPER_ADDRESS=${CLUSTER_ZOOKEEPER_ADDRESS}
|
||||
- HBASE_HOST=${HBASE_HOST}
|
||||
- HBASE_PORT=${HBASE_PORT}
|
||||
- FLINK_CLUSTER_ENABLE=${FLINK_CLUSTER_ENABLE}
|
||||
- FLINK_CLUSTER_ZOOKEEPER_ADDRESS=${FLINK_CLUSTER_ZOOKEEPER_ADDRESS}
|
||||
- DEBUG_LEVEL=${COLLECTOR_DEBUG_LEVEL}
|
||||
|
||||
networks:
|
||||
pinpoint:
|
||||
driver: bridge
|
||||
@@ -0,0 +1,7 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
|
||||
### Pinpoint-flink
|
||||
|
||||
PINPOINT_FLINK_NAME=pinpoint-flink
|
||||
FLINK_WEB_PORT=8099
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
#
|
||||
# Copyright 2017 NAVER Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# local
|
||||
hbase.client.host=pinpoint-hbase
|
||||
hbase.client.port=2181
|
||||
|
||||
# hbase default:/hbase
|
||||
hbase.zookeeper.znode.parent=/hbase
|
||||
|
||||
# hbase namespace to use default:default
|
||||
hbase.namespace=default
|
||||
|
||||
# hbase timeout option==================================================================================
|
||||
# hbase default:true
|
||||
hbase.ipc.client.tcpnodelay=true
|
||||
# hbase default:60000
|
||||
hbase.rpc.timeout=10000
|
||||
# hbase default:Integer.MAX_VALUE
|
||||
hbase.client.operation.timeout=10000
|
||||
|
||||
# hbase socket read timeout. default: 200000
|
||||
hbase.ipc.client.socket.timeout.read=20000
|
||||
# socket write timeout. hbase default: 600000
|
||||
hbase.ipc.client.socket.timeout.write=30000
|
||||
|
||||
#==================================================================================
|
||||
# hbase client thread pool option
|
||||
hbase.client.thread.max=128
|
||||
hbase.client.threadPool.queueSize=5120
|
||||
# prestartAllCoreThreads
|
||||
hbase.client.threadPool.prestart=false
|
||||
@@ -0,0 +1,62 @@
|
||||
# configure l4 ip address to ignore health check logs
|
||||
collector.l4.ip=
|
||||
|
||||
# base data receiver config ---------------------------------------------------------------------
|
||||
collector.receiver.base.ip=pinpoint-flink-taskmanager
|
||||
collector.receiver.base.port=19994
|
||||
|
||||
# number of tcp worker threads
|
||||
collector.receiver.base.worker.threadSize=8
|
||||
# capacity of tcp worker queue
|
||||
collector.receiver.base.worker.queueSize=1024
|
||||
# monitoring for tcp worker
|
||||
collector.receiver.base.worker.monitor=true
|
||||
|
||||
# change OS level read/write socket buffer size (for linux)
|
||||
#sudo sysctl -w net.core.rmem_max=
|
||||
#sudo sysctl -w net.core.wmem_max=
|
||||
# check current values using:
|
||||
#$ /sbin/sysctl -a | grep -e rmem -e wmem
|
||||
|
||||
# number of agent event worker threads
|
||||
collector.agentEventWorker.threadSize=4
|
||||
# capacity of agent event worker queue
|
||||
collector.agentEventWorker.queueSize=1024
|
||||
|
||||
statistics.flushPeriod=1000
|
||||
|
||||
# -------------------------------------------------------------------------------------------------
|
||||
# The cluster related options are used to establish connections between the agent, collector, and web in order to send/receive data between them in real time.
|
||||
# You may enable additional features using this option (Ex : RealTime Active Thread Chart).
|
||||
# -------------------------------------------------------------------------------------------------
|
||||
# Usage : Set the following options for collector/web components that reside in the same cluster in order to enable this feature.
|
||||
# 1. cluster.enable (pinpoint-web.properties, pinpoint-flink.properties) - "true" to enable
|
||||
# 2. cluster.zookeeper.address (pinpoint-web.properties, pinpoint-flink.properties) - address of the ZooKeeper instance that will be used to manage the cluster
|
||||
# 3. cluster.web.tcp.port (pinpoint-web.properties) - any available port number (used to establish connection between web and collector)
|
||||
# -------------------------------------------------------------------------------------------------
|
||||
# Please be aware of the following:
|
||||
#1. If the network between web, collector, and the agents are not stable, it is advisable not to use this feature.
|
||||
#2. We recommend using the cluster.web.tcp.port option. However, in cases where the collector is unable to establish connection to the web, you may reverse this and make the web establish connection to the collector.
|
||||
# In this case, you must set cluster.connect.address (pinpoint-web.properties); and cluster.listen.ip, cluster.listen.port (pinpoint-flink.properties) accordingly.
|
||||
cluster.enable=true
|
||||
cluster.zookeeper.address=zoo1
|
||||
cluster.zookeeper.sessiontimeout=30000
|
||||
cluster.listen.ip=
|
||||
cluster.listen.port=
|
||||
|
||||
#collector.admin.password=
|
||||
#collector.admin.api.rest.active=
|
||||
#collector.admin.api.jmx.active=
|
||||
|
||||
collector.spanEvent.sequence.limit=10000
|
||||
|
||||
# flink cluster
|
||||
flink.cluster.enable=true
|
||||
flink.cluster.zookeeper.address=zoo1
|
||||
flink.cluster.zookeeper.sessiontimeout=3000
|
||||
flink.cluster.zookeeper.retry.interval=5000
|
||||
flink.cluster.tcp.port=19994
|
||||
|
||||
# flink env init
|
||||
flink.StreamExecutionEnvironment=
|
||||
flink.sourceFunction.Parallel=1
|
||||
@@ -0,0 +1,45 @@
|
||||
version: "3.6"
|
||||
##
|
||||
#to use this separately will require pinpoint-flink-job-{version}.jar built
|
||||
#from pinpoint-flink module with correct properties under hbase.properties, pinpoint-flink.properties
|
||||
#sample configs used to build pinpoint-flink/build/pinpoint-flink-job-{version}.jar is under build folder
|
||||
##
|
||||
services:
|
||||
##flink
|
||||
jobmanager:
|
||||
container_name: "${PINPOINT_FLINK_NAME}-jobmanager"
|
||||
image: flink:1.3.1
|
||||
expose:
|
||||
- "6123"
|
||||
ports:
|
||||
- "${FLINK_WEB_PORT:-8099}:8099"
|
||||
command: jobmanager
|
||||
environment:
|
||||
- JOB_MANAGER_RPC_ADDRESS=jobmanager
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
taskmanager:
|
||||
container_name: "${PINPOINT_FLINK_NAME}-taskmanager"
|
||||
image: flink:1.3.1
|
||||
expose:
|
||||
- "6121"
|
||||
- "6122"
|
||||
- "19994"
|
||||
ports:
|
||||
- "6121:6121"
|
||||
- "6122:6122"
|
||||
- "19994:19994"
|
||||
depends_on:
|
||||
- jobmanager
|
||||
command: taskmanager
|
||||
links:
|
||||
- "jobmanager:jobmanager"
|
||||
environment:
|
||||
- JOB_MANAGER_RPC_ADDRESS=jobmanager
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
networks:
|
||||
pinpoint:
|
||||
driver: bridge
|
||||
@@ -0,0 +1,8 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
|
||||
### Pinpoint-Hbase
|
||||
|
||||
PINPOINT_HBASE_NAME=pinpoint-hbase
|
||||
#config for hbase in external docker
|
||||
EXTERNAL_HBASE_PORT=2180
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
FROM java:8-jdk
|
||||
|
||||
LABEL maintainer="Roy Kim <roy.kim@navercorp.com>"
|
||||
|
||||
ARG PINPOINT_VERSION=${PINPOINT_VERSION:-2.0.1}
|
||||
|
||||
ENV HBASE_REPOSITORY=http://apache.mirrors.pair.com/hbase
|
||||
ENV HBASE_SUB_REPOSITORY=http://archive.apache.org/dist/hbase
|
||||
|
||||
ENV HBASE_VERSION=1.2.6
|
||||
ENV BASE_DIR=/opt/hbase
|
||||
ENV HBASE_HOME=${BASE_DIR}/hbase-${HBASE_VERSION}
|
||||
|
||||
|
||||
COPY hbase-site.xml hbase-site.xml
|
||||
|
||||
RUN mkdir -p ${BASE_DIR} \
|
||||
&& cd ${BASE_DIR} \
|
||||
&& curl -fSL "${HBASE_REPOSITORY}/${HBASE_VERSION}/hbase-${HBASE_VERSION}-bin.tar.gz" -o hbase.tar.gz || curl -fSL "${HBASE_SUB_REPOSITORY}/${HBASE_VERSION}/hbase-${HBASE_VERSION}-bin.tar.gz" -o hbase.tar.gz \
|
||||
&& tar xfvz hbase.tar.gz \
|
||||
&& mv ../../hbase-site.xml ../../${HBASE_HOME}/conf/hbase-site.xml \
|
||||
&& curl -SL "https://raw.githubusercontent.com/naver/pinpoint/v${PINPOINT_VERSION}/hbase/scripts/hbase-create.hbase" -o ${BASE_DIR}/hbase-create.hbase \
|
||||
&& ${HBASE_HOME}/bin/start-hbase.sh \
|
||||
&& sleep 10 \
|
||||
&& ${HBASE_HOME}/bin/hbase shell ${BASE_DIR}/hbase-create.hbase \
|
||||
&& ${HBASE_HOME}/bin/stop-hbase.sh \
|
||||
&& rm ${BASE_DIR}/hbase-create.hbase \
|
||||
&& rm -rf hbase.tar.gz
|
||||
|
||||
VOLUME ["/home/pinpoint/hbase", "/home/pinpoint/zookeeper"]
|
||||
|
||||
ENTRYPOINT ${BASE_DIR}/hbase-${HBASE_VERSION}/bin/hbase master start
|
||||
@@ -0,0 +1,44 @@
|
||||
|
||||
## Pinpoint Hbase
|
||||
|
||||
This Docker image contains the Pinpoint Hbase component of the Pinpoint application monitoring system.
|
||||
|
||||
## Supported Tags
|
||||
|
||||
- 2.0.1
|
||||
- 1.8.5
|
||||
- 1.8.4
|
||||
- 1.8.3
|
||||
- 1.8.2
|
||||
- 1.8.1
|
||||
- 1.8.0
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
|
||||
Please see the [Pinpoint-Docker GitHub repository](https://github.com/naver/pinpoint-docker) for further information on how to run, configure and build this image.
|
||||
|
||||
## Any Issues or Suggestions?
|
||||
|
||||
Feel free to share any problems and suggestions via [Pinpoint GitHub Issue page](https://github.com/naver/pinpoint/issues).
|
||||
Contributions on the pinpoint-docker image is also always welcome.
|
||||
|
||||
## License
|
||||
|
||||
Pinpoint is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](https://github.com/naver/pinpoint/blob/master/LICENSE) for full license text.
|
||||
|
||||
```
|
||||
Copyright 2018 NAVER Corp.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
```
|
||||
@@ -0,0 +1,34 @@
|
||||
version: "3.6"
|
||||
|
||||
services:
|
||||
pinpoint-hbase:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_HBASE_NAME}"
|
||||
image: "pinpointdocker/pinpoint-hbase:${PINPOINT_VERSION}"
|
||||
|
||||
volumes:
|
||||
- /home/pinpoint/hbase
|
||||
- /home/pinpoint/zookeeper
|
||||
expose:
|
||||
# zookeeper
|
||||
- "2181"
|
||||
# HBase Master API port
|
||||
- "60000"
|
||||
# HBase Master Web UI
|
||||
- "16010"
|
||||
# Regionserver API port
|
||||
- "60020"
|
||||
# HBase Regionserver web UI
|
||||
- "16030"
|
||||
ports:
|
||||
- "${EXTERNAL_HBASE_PORT:-2181}:2181"
|
||||
- "60000:60000"
|
||||
- "16010:16010"
|
||||
- "60020:60020"
|
||||
- "16030:16030"
|
||||
restart: always
|
||||
@@ -0,0 +1,18 @@
|
||||
<configuration>
|
||||
<property>
|
||||
<name>hbase.rootdir</name>
|
||||
<value>file:///home/pinpoint/hbase</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.zookeeper.property.dataDir</name>
|
||||
<value>/home/pinpoint/zookeeper</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.master.port</name>
|
||||
<value>60000</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.port</name>
|
||||
<value>60020</value>
|
||||
</property>
|
||||
</configuration>
|
||||
@@ -0,0 +1,7 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
|
||||
### Pinpoint-mysql
|
||||
MYSQL_ROOT_PASSWORD=root123
|
||||
MYSQL_USER=admin
|
||||
MYSQL_PASSWORD=admin
|
||||
MYSQL_DATABASE=pinpoint
|
||||
@@ -0,0 +1,11 @@
|
||||
FROM mysql:5.7
|
||||
|
||||
ARG PINPOINT_VERSION=${PINPOINT_VERSION:-2.0.1}
|
||||
|
||||
RUN apt update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates wget \
|
||||
&& wget -O /docker-entrypoint-initdb.d/CreateTableStatement-mysql.sql "https://raw.githubusercontent.com/naver/pinpoint/v$PINPOINT_VERSION/web/src/main/resources/sql/CreateTableStatement-mysql.sql" \
|
||||
&& wget -O /docker-entrypoint-initdb.d/SpringBatchJobRepositorySchema-mysql.sql "https://raw.githubusercontent.com/naver/pinpoint/v$PINPOINT_VERSION/web/src/main/resources/sql/SpringBatchJobRepositorySchema-mysql.sql" \
|
||||
&& sed -i '/^--/d' /docker-entrypoint-initdb.d/CreateTableStatement-mysql.sql \
|
||||
&& sed -i '/^--/d' /docker-entrypoint-initdb.d/SpringBatchJobRepositorySchema-mysql.sql \
|
||||
&& apt-get purge -y --auto-remove ca-certificates wget
|
||||
@@ -0,0 +1,44 @@
|
||||
|
||||
## Pinpoint Mysql
|
||||
|
||||
This Docker image contains the Pinpoint Mysql component of the Pinpoint application monitoring system.
|
||||
|
||||
## Supported Tags
|
||||
|
||||
- 2.0.1
|
||||
- 1.8.5
|
||||
- 1.8.4
|
||||
- 1.8.3
|
||||
- 1.8.2
|
||||
- 1.8.1
|
||||
- 1.8.0
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
|
||||
Please see [Pinpoint-Docker GitHub repository](https://github.com/naver/pinpoint-docker) for further information on how to run, configure and build this image.
|
||||
|
||||
## Any Issues or Suggestions?
|
||||
|
||||
Feel free to share any problems and suggestions via [Pinpoint GitHub Issue page](https://github.com/naver/pinpoint/issues).
|
||||
Contributions on the pinpoint-docker image is also always welcome.
|
||||
|
||||
## License
|
||||
|
||||
Pinpoint is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](https://github.com/naver/pinpoint/blob/master/LICENSE) for full license text.
|
||||
|
||||
```
|
||||
Copyright 2018 NAVER Corp.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
```
|
||||
@@ -0,0 +1,25 @@
|
||||
version: "3.6"
|
||||
|
||||
services:
|
||||
pinpoint-mysql:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: pinpoint-mysql
|
||||
restart: always
|
||||
image: "pinpointdocker/pinpoint-mysql:${PINPOINT_VERSION}"
|
||||
|
||||
hostname: pinpoint-mysql
|
||||
ports:
|
||||
- "13306:3306"
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
|
||||
- MYSQL_USER=${MYSQL_USER}
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
- MYSQL_DATABASE=${MYSQL_DATABASE}
|
||||
|
||||
volumes:
|
||||
- ./var/lib/mysql
|
||||
@@ -0,0 +1,49 @@
|
||||
PINPOINT_VERSION=2.0.1
|
||||
SPRING_PROFILES=release
|
||||
|
||||
### Pinpoint-Web
|
||||
|
||||
PINPOINT_WEB_NAME=pinpoint-web
|
||||
SPRING_PROFILES=release
|
||||
|
||||
WEB_PAGE_PORT=8079
|
||||
|
||||
CLUSTER_ENABLE=true
|
||||
#zookeeper information required
|
||||
CLUSTER_ZOOKEEPER_ADDRESS=
|
||||
|
||||
ADMIN_PASSWORD=admin
|
||||
|
||||
ANALYTICS=true
|
||||
|
||||
#hbase information required
|
||||
HBASE_HOST=
|
||||
HBASE_PORT=
|
||||
|
||||
WEB_DEBUG_LEVEL=INFO
|
||||
|
||||
#flink server information required if used
|
||||
BATCH_ENABLE=false
|
||||
BATCH_SERVER_IP=127.0.0.127
|
||||
BATCH_FLINK_SERVER=pinpoint-flink-jobmanager
|
||||
|
||||
CONFIG_SHOW_APPLICATIONSTAT=true
|
||||
|
||||
#mysql information required
|
||||
JDBC_DRIVER=com.mysql.jdbc.Driver
|
||||
JDBC_URL=jdbc:mysql://pinpoint-mysql:3306/pinpoint?characterEncoding=UTF-8
|
||||
JDBC_USERNAME=
|
||||
JDBC_PASSWORD=
|
||||
|
||||
#mail server information required
|
||||
MAIL_HOST=
|
||||
MAIL_PORT=
|
||||
MAIL_USERNAME=
|
||||
MAIL_PASSWORD=
|
||||
MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL=
|
||||
MAIL_PROPERTIES_MAIL_SMTP_AUTH=
|
||||
MAIL_PROPERTIES_MAIL_SMTP_PORT=
|
||||
MAIL_PROPERTIES_MAIL_SMTP_FROM=
|
||||
MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE=
|
||||
MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED=
|
||||
MAIL_PROPERTIES_MAIL_DEBUG=
|
||||
@@ -0,0 +1,21 @@
|
||||
FROM tomcat:8-jre8
|
||||
|
||||
LABEL maintainer="Roy Kim <roy.kim@navercorp.com>"
|
||||
|
||||
ARG PINPOINT_VERSION=${PINPOINT_VERSION:-2.0.1}
|
||||
ARG INSTALL_URL=https://github.com/naver/pinpoint/releases/download/v${PINPOINT_VERSION}/pinpoint-web-${PINPOINT_VERSION}.war
|
||||
|
||||
COPY /build/scripts/start-web.sh /usr/local/bin/
|
||||
COPY /build/mail.zip /assets/mail.zip
|
||||
|
||||
RUN chmod a+x /usr/local/bin/start-web.sh \
|
||||
&& curl -SL ${INSTALL_URL} -o pinpoint-web.war \
|
||||
&& rm -rf /usr/local/tomcat/webapps \
|
||||
&& mkdir -p /usr/local/tomcat/webapps \
|
||||
&& unzip pinpoint-web.war -d /usr/local/tomcat/webapps/ROOT \
|
||||
&& rm -rf pinpoint-web.war \
|
||||
&& curl -SL https://maven.java.net/content/repositories/releases/com/sun/mail/javax.mail/1.5.2/javax.mail-1.5.2.jar -o /usr/local/tomcat/webapps/ROOT/WEB-INF/lib/javax.mail-1.5.2.jar \
|
||||
&& unzip /assets/mail.zip -d /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/ \
|
||||
&& rm -rf /assets/mail.zip
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/start-web.sh"]
|
||||
@@ -0,0 +1,44 @@
|
||||
|
||||
## Pinpoint Web
|
||||
|
||||
This Docker image contains the Pinpoint Web component of the Pinpoint application monitoring system.
|
||||
|
||||
## Supported Tags
|
||||
|
||||
- 2.0.1
|
||||
- 1.8.5
|
||||
- 1.8.4
|
||||
- 1.8.3
|
||||
- 1.8.2
|
||||
- 1.8.1
|
||||
- 1.8.0
|
||||
- 1.7.3
|
||||
- 1.7.2
|
||||
|
||||
Please see the [Pinpoint-Docker GitHub repository](https://github.com/naver/pinpoint-docker) for further information on how to run, configure and build this image.
|
||||
|
||||
## Any Issues or Suggestions?
|
||||
|
||||
Feel free to share any problems and suggestions via [Pinpoint GitHub Issue page](https://github.com/naver/pinpoint/issues).
|
||||
Contributions on the pinpoint-docker image is also always welcome.
|
||||
|
||||
## License
|
||||
|
||||
Pinpoint is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](https://github.com/naver/pinpoint/blob/master/LICENSE) for full license text.
|
||||
|
||||
```
|
||||
Copyright 2018 NAVER Corp.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
```
|
||||
Binary file not shown.
@@ -0,0 +1,85 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
sed -i "/cluster.enable=/ s/=.*/=${CLUSTER_ENABLE}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-web.properties
|
||||
sed -i "/cluster.zookeeper.address=/ s/=.*/=${CLUSTER_ZOOKEEPER_ADDRESS}/g" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-web.properties
|
||||
#sed -i "/cluster.web.tcp.port=/ s/=.*/=${CLUSTER_WEB_TCP_PORT}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-web.properties
|
||||
sed -i "/admin.password=/ s/=.*/=${ADMIN_PASSWORD}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-web.properties
|
||||
sed -i "/config.sendUsage=/ s/=.*/=${ANALYTICS}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-web.properties
|
||||
sed -i "/config.show.applicationStat=/ s/=.*/=${CONFIG_SHOW_APPLICATIONSTAT}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/pinpoint-web.properties
|
||||
|
||||
sed -i "/hbase.client.host=/ s/=.*/=${HBASE_HOST}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/profiles/${SPRING_PROFILES}/hbase-env.properties
|
||||
sed -i "/hbase.client.port=/ s/=.*/=${HBASE_PORT}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/profiles/${SPRING_PROFILES}/hbase-env.properties
|
||||
|
||||
sed -i "/batch.enable=/ s/=.*/=${BATCH_ENABLE}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/batch.properties
|
||||
sed -i "/batch.server.ip=/ s/=.*/=${BATCH_SERVER_IP}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/batch.properties
|
||||
sed -i "/batch.flink.server=/ s/=.*/=${BATCH_FLINK_SERVER}/" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/batch.properties
|
||||
|
||||
sed -i "/level value=/ s/=.*/=\"${DEBUG_LEVEL}\"\/>/g" /usr/local/tomcat/webapps/ROOT/WEB-INF/classes/profiles/${SPRING_PROFILES}/log4j.xml
|
||||
|
||||
WEB_INF_CLASSES_DIR=/usr/local/tomcat/webapps/ROOT/WEB-INF/classes
|
||||
APPLICATION_CONTEXT_WEB_FILE=${WEB_INF_CLASSES_DIR}/applicationContext-web.xml
|
||||
APPLICATION_CONTEXT_MAIL_FILE=${WEB_INF_CLASSES_DIR}/applicationContext-mail.xml
|
||||
|
||||
echo -e "
|
||||
jdbc.driverClassName=${JDBC_DRIVER:-com.mysql.jdbc.Driver}
|
||||
jdbc.url=${JDBC_URL:-jdbc:mysql://localhost:13306/pinpoint?characterEncoding=UTF-8}
|
||||
jdbc.username=${JDBC_USERNAME:-admin}
|
||||
jdbc.password=${JDBC_PASSWORD:-admin}
|
||||
" > ${WEB_INF_CLASSES_DIR}/jdbc.properties
|
||||
|
||||
sed -i '/classpath:applicationContext-mail.xml/d' ${APPLICATION_CONTEXT_WEB_FILE}
|
||||
if [ "$MAIL_HOST" != "" ]; then
|
||||
sed -i 's/<\/beans>/ <import resource="classpath:applicationContext-mail.xml" \/>\
|
||||
<\/beans>/' ${APPLICATION_CONTEXT_WEB_FILE}
|
||||
|
||||
sed -i "/name=\"host\"/c\ <property name=\"host\" value=\"${MAIL_HOST}\" />" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
sed -i "/name=\"port\"/c\ <property name=\"port\" value=\"${MAIL_PORT}\" />" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
sed -i "/name=\"username\"/c\ <property name=\"username\" value=\"${MAIL_USERNAME}\" />" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
sed -i "/name=\"password\"/c\ <property name=\"password\" value=\"${MAIL_PASSWORD}\" />" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL" != "" ]; then
|
||||
sed -i "/prop key=\"mail.transport.protocol\"/c\ <prop key=\"mail.transport.protocol\">${MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.transport.protocol\"/c\ <!-- <prop key=\"mail.transport.protocol\">smtp</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_SMTP_PORT" != "" ]; then
|
||||
sed -i "/prop key=\"mail.smtp.port\"/c\ <prop key=\"mail.smtp.port\">${MAIL_PROPERTIES_MAIL_SMTP_PORT}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.smtp.port\"/c\ <!-- <prop key=\"mail.smtp.port\">25</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_SMTP_AUTH" != "" ]; then
|
||||
sed -i "/prop key=\"mail.smtp.auth\"/c\ <prop key=\"mail.smtp.auth\">${MAIL_PROPERTIES_MAIL_SMTP_AUTH}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.smtp.auth\"/c\ <!-- <prop key=\"mail.smtp.auth\">true</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE" != "" ]; then
|
||||
sed -i "/prop key=\"mail.smtp.starttls.enable\"/c\ <prop key=\"mail.smtp.starttls.enable\">${MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.smtp.starttls.enable\"/c\ <!-- <prop key=\"mail.smtp.starttls.enable\">true</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED" != "" ]; then
|
||||
sed -i "/prop key=\"mail.smtp.starttls.required\"/c\ <prop key=\"mail.smtp.starttls.required\">${MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.smtp.starttls.required\"/c\ <!-- <prop key=\"mail.smtp.starttls.required\">true</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_DEBUG" != "" ]; then
|
||||
sed -i "/prop key=\"mail.debug\"/c\ <prop key=\"mail.debug\">${MAIL_PROPERTIES_MAIL_DEBUG}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.debug\"/c\ <!-- <prop key=\"mail.debug\">true</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
|
||||
if [ "$MAIL_PROPERTIES_MAIL_SMTP_FROM" != "" ]; then
|
||||
sed -i "/prop key=\"mail.smtp.from\"/c\ <prop key=\"mail.smtp.from\">${MAIL_PROPERTIES_MAIL_SMTP_FROM}</prop>" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
else
|
||||
sed -i "/prop key=\"mail.smtp.from\"/c\ <!-- <prop key=\"mail.smtp.from\">abc@example.com</prop> -->" ${APPLICATION_CONTEXT_MAIL_FILE}
|
||||
fi
|
||||
fi
|
||||
|
||||
exec /usr/local/tomcat/bin/catalina.sh run
|
||||
@@ -0,0 +1,50 @@
|
||||
version: "3.6"
|
||||
|
||||
services:
|
||||
pinpoint-web:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- PINPOINT_VERSION=${PINPOINT_VERSION}
|
||||
|
||||
container_name: "${PINPOINT_WEB_NAME}"
|
||||
image: "pinpointdocker/pinpoint-web:${PINPOINT_VERSION}"
|
||||
|
||||
restart: always
|
||||
expose:
|
||||
- "8080"
|
||||
- "9997"
|
||||
ports:
|
||||
- "9997:9997"
|
||||
- "${WEB_PAGE_PORT:-8080}:8080"
|
||||
environment:
|
||||
- JAVA_OPTS="-Dspring.profiles.active=${SPRING_PROFILES}"
|
||||
- SPRING_PROFILES=${SPRING_PROFILES}
|
||||
- CLUSTER_ENABLE=${CLUSTER_ENABLE}
|
||||
- CLUSTER_ZOOKEEPER_ADDRESS=${CLUSTER_ZOOKEEPER_ADDRESS}
|
||||
- ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||||
- ANALYTICS=${ANALYTICS}
|
||||
- HBASE_HOST=${HBASE_HOST}
|
||||
- HBASE_PORT=${HBASE_PORT}
|
||||
- DEBUG_LEVEL=${WEB_DEBUG_LEVEL}
|
||||
- CONFIG_SHOW_APPLICATIONSTAT=${CONFIG_SHOW_APPLICATIONSTAT}
|
||||
- BATCH_ENABLE=${BATCH_ENABLE}
|
||||
- BATCH_SERVER_IP=${BATCH_SERVER_IP}
|
||||
- BATCH_FLINK_SERVER=${BATCH_FLINK_SERVER}
|
||||
- JDBC_DRIVER=${JDBC_DRIVER}
|
||||
- JDBC_URL=${JDBC_URL}
|
||||
- JDBC_USERNAME=${JDBC_USERNAME}
|
||||
- JDBC_PASSWORD=${JDBC_PASSWORD}
|
||||
- MAIL_HOST=${MAIL_HOST}
|
||||
- MAIL_PORT=${MAIL_PORT}
|
||||
- MAIL_USERNAME=${MAIL_USERNAME}
|
||||
- MAIL_PASSWORD=${MAIL_PASSWORD}
|
||||
- MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL=${MAIL_PROPERTIES_MAIL_TRANSPORT_PROTOCOL}
|
||||
- MAIL_PROPERTIES_MAIL_SMTP_AUTH=${MAIL_PROPERTIES_MAIL_SMTP_AUTH}
|
||||
- MAIL_PROPERTIES_MAIL_SMTP_PORT=${MAIL_PROPERTIES_MAIL_SMTP_PORT}
|
||||
- MAIL_PROPERTIES_MAIL_SMTP_FROM=${MAIL_PROPERTIES_MAIL_SMTP_FROM}
|
||||
- MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE=${MAIL_PROPERTIES_MAIL_STARTTLS_ENABLE}
|
||||
- MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED=${MAIL_PROPERTIES_MAIL_STARTTLS_REQUIRED}
|
||||
- MAIL_PROPERTIES_MAIL_DEBUG=${MAIL_PROPERTIES_MAIL_DEBUG}
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
version: "3.6"
|
||||
|
||||
servidocker
|
||||
docker-compose pull && docker-compose upces:
|
||||
#zookeepers
|
||||
zoo1:
|
||||
image: zookeeper:3.4
|
||||
restart: always
|
||||
hostname: zoo1
|
||||
environment:
|
||||
ZOO_MY_ID: 1
|
||||
ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
zoo2:
|
||||
image: zookeeper:3.4
|
||||
restart: always
|
||||
hostname: zoo2
|
||||
environment:
|
||||
ZOO_MY_ID: 2
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
zoo3:
|
||||
image: zookeeper:3.4
|
||||
restart: always
|
||||
hostname: zoo3
|
||||
environment:
|
||||
ZOO_MY_ID: 3
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888
|
||||
networks:
|
||||
- pinpoint
|
||||
|
||||
networks:
|
||||
pinpoint:
|
||||
driver: bridge
|
||||
Reference in New Issue
Block a user