同步1.9.0

This commit is contained in:
Jerry
2021-09-01 20:08:27 +08:00
parent 6e84b4234c
commit 4f4995e6f0
198 changed files with 4017 additions and 1500 deletions

View File

@@ -0,0 +1,7 @@
PINPOINT_VERSION=2.0.1
### Pinpoint-flink
PINPOINT_FLINK_NAME=pinpoint-flink
FLINK_WEB_PORT=8099

View File

@@ -0,0 +1,45 @@
#
# Copyright 2017 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# local
hbase.client.host=pinpoint-hbase
hbase.client.port=2181
# hbase default:/hbase
hbase.zookeeper.znode.parent=/hbase
# hbase namespace to use default:default
hbase.namespace=default
# hbase timeout option==================================================================================
# hbase default:true
hbase.ipc.client.tcpnodelay=true
# hbase default:60000
hbase.rpc.timeout=10000
# hbase default:Integer.MAX_VALUE
hbase.client.operation.timeout=10000
# hbase socket read timeout. default: 200000
hbase.ipc.client.socket.timeout.read=20000
# socket write timeout. hbase default: 600000
hbase.ipc.client.socket.timeout.write=30000
#==================================================================================
# hbase client thread pool option
hbase.client.thread.max=128
hbase.client.threadPool.queueSize=5120
# prestartAllCoreThreads
hbase.client.threadPool.prestart=false

View File

@@ -0,0 +1,62 @@
# configure l4 ip address to ignore health check logs
collector.l4.ip=
# base data receiver config ---------------------------------------------------------------------
collector.receiver.base.ip=pinpoint-flink-taskmanager
collector.receiver.base.port=19994
# number of tcp worker threads
collector.receiver.base.worker.threadSize=8
# capacity of tcp worker queue
collector.receiver.base.worker.queueSize=1024
# monitoring for tcp worker
collector.receiver.base.worker.monitor=true
# change OS level read/write socket buffer size (for linux)
#sudo sysctl -w net.core.rmem_max=
#sudo sysctl -w net.core.wmem_max=
# check current values using:
#$ /sbin/sysctl -a | grep -e rmem -e wmem
# number of agent event worker threads
collector.agentEventWorker.threadSize=4
# capacity of agent event worker queue
collector.agentEventWorker.queueSize=1024
statistics.flushPeriod=1000
# -------------------------------------------------------------------------------------------------
# The cluster related options are used to establish connections between the agent, collector, and web in order to send/receive data between them in real time.
# You may enable additional features using this option (Ex : RealTime Active Thread Chart).
# -------------------------------------------------------------------------------------------------
# Usage : Set the following options for collector/web components that reside in the same cluster in order to enable this feature.
# 1. cluster.enable (pinpoint-web.properties, pinpoint-flink.properties) - "true" to enable
# 2. cluster.zookeeper.address (pinpoint-web.properties, pinpoint-flink.properties) - address of the ZooKeeper instance that will be used to manage the cluster
# 3. cluster.web.tcp.port (pinpoint-web.properties) - any available port number (used to establish connection between web and collector)
# -------------------------------------------------------------------------------------------------
# Please be aware of the following:
#1. If the network between web, collector, and the agents are not stable, it is advisable not to use this feature.
#2. We recommend using the cluster.web.tcp.port option. However, in cases where the collector is unable to establish connection to the web, you may reverse this and make the web establish connection to the collector.
# In this case, you must set cluster.connect.address (pinpoint-web.properties); and cluster.listen.ip, cluster.listen.port (pinpoint-flink.properties) accordingly.
cluster.enable=true
cluster.zookeeper.address=zoo1
cluster.zookeeper.sessiontimeout=30000
cluster.listen.ip=
cluster.listen.port=
#collector.admin.password=
#collector.admin.api.rest.active=
#collector.admin.api.jmx.active=
collector.spanEvent.sequence.limit=10000
# flink cluster
flink.cluster.enable=true
flink.cluster.zookeeper.address=zoo1
flink.cluster.zookeeper.sessiontimeout=3000
flink.cluster.zookeeper.retry.interval=5000
flink.cluster.tcp.port=19994
# flink env init
flink.StreamExecutionEnvironment=
flink.sourceFunction.Parallel=1

View File

@@ -0,0 +1,45 @@
version: "3.6"
##
#to use this separately will require pinpoint-flink-job-{version}.jar built
#from pinpoint-flink module with correct properties under hbase.properties, pinpoint-flink.properties
#sample configs used to build pinpoint-flink/build/pinpoint-flink-job-{version}.jar is under build folder
##
services:
##flink
jobmanager:
container_name: "${PINPOINT_FLINK_NAME}-jobmanager"
image: flink:1.3.1
expose:
- "6123"
ports:
- "${FLINK_WEB_PORT:-8099}:8099"
command: jobmanager
environment:
- JOB_MANAGER_RPC_ADDRESS=jobmanager
networks:
- pinpoint
taskmanager:
container_name: "${PINPOINT_FLINK_NAME}-taskmanager"
image: flink:1.3.1
expose:
- "6121"
- "6122"
- "19994"
ports:
- "6121:6121"
- "6122:6122"
- "19994:19994"
depends_on:
- jobmanager
command: taskmanager
links:
- "jobmanager:jobmanager"
environment:
- JOB_MANAGER_RPC_ADDRESS=jobmanager
networks:
- pinpoint
networks:
pinpoint:
driver: bridge