skywalking环境搭建

安装ES

docker安装

安装AOP和ui

docker-compose.yml

version: '3.3'
services:
  oap:
    image: apache/skywalking-oap-server:7.0.0-es7
    container_name: skywalking-oap
    #restart: always
    command: sleep 36000
    ports:
      - 11800:11800
      - 12800:12800
    environment:
      SW_STORAGE: elasticsearch7
      SW_ES_USER: elastic
      SW_ES_PASSWORD: XXXX
      SW_STORAGE_ES_CLUSTER_NODES: 192.168.1.XX:9200
      SW_NAMESPACE: sz_skywalking_index
      SW_STORAGE_ES_BULK_ACTIONS: 4000
      SW_STORAGE_ES_FLUSH_INTERVAL: 30
      SW_STORAGE_ES_CONCURRENT_REQUESTS: 4
      SW_STORAGE_ES_QUERY_MAX_SIZE: 8000
      SW_STORAGE_ES_RECORD_DATA_TTL: 3
      SW_STORAGE_ES_OTHER_METRIC_DATA_TTL: 3
      SW_STORAGE_ES_MONTH_METRIC_DATA_TTL: 1
    volumes:
      - ./application.yml:/skywalking/config/application.yml  
 
  ui:
    image: apache/skywalking-ui:7.0.0
    container_name: skywalking-ui
    depends_on:
      - oap
    links:
      - oap
    ports:
      - 8080:8080
    environment:
      SW_OAP_ADDRESS: oap:12800

application.yml

AOP的配置在/skywalking/config 目录下,

cluster:
  selector: ${
    
    SW_CLUSTER:standalone}
  standalone:
  # Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+
  # library the oap-libs folder with your ZooKeeper 3.4.x library.
  zookeeper:
    nameSpace: ${
    
    SW_NAMESPACE:""}
    hostPort: ${
    
    SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
    # Retry Policy
    baseSleepTimeMs: ${
    
    SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
    maxRetries: ${
    
    SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
    # Enable ACL
    enableACL: ${
    
    SW_ZK_ENABLE_ACL:false} # disable ACL in default
    schema: ${
    
    SW_ZK_SCHEMA:digest} # only support digest schema
    expression: ${
    
    SW_ZK_EXPRESSION:skywalking:skywalking}
  kubernetes:
    watchTimeoutSeconds: ${
    
    SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
    namespace: ${
    
    SW_CLUSTER_K8S_NAMESPACE:default}
    labelSelector: ${
    
    SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
    uidEnvName: ${
    
    SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
  consul:
    serviceName: ${
    
    SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
    # Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
    hostPort: ${
    
    SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
    aclToken: ${
    
    SW_CLUSTER_CONSUL_ACLTOKEN:""}
  nacos:
    serviceName: ${
    
    SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
    hostPort: ${
    
    SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
    # Nacos Configuration namespace
    namespace: ${
    
    SW_CLUSTER_NACOS_NAMESPACE:"public"}
  etcd:
    serviceName: ${
    
    SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
    # etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
    hostPort: ${
    
    SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}

core:
  selector: ${
    
    SW_CORE:default}
  default:
    # Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
    # Receiver: Receive agent data, Level 1 aggregate
    # Aggregator: Level 2 aggregate
    role: ${
    
    SW_CORE_ROLE:Mixed} # Mixed/Receiver/Aggregator
    restHost: ${
    
    SW_CORE_REST_HOST:0.0.0.0}
    restPort: ${
    
    SW_CORE_REST_PORT:12800}
    restContextPath: ${
    
    SW_CORE_REST_CONTEXT_PATH:/}
    gRPCHost: ${
    
    SW_CORE_GRPC_HOST:0.0.0.0}
    gRPCPort: ${
    
    SW_CORE_GRPC_PORT:11800}
    gRPCSslEnabled: ${
    
    SW_CORE_GRPC_SSL_ENABLED:false}
    gRPCSslKeyPath: ${
    
    SW_CORE_GRPC_SSL_KEY_PATH:""}
    gRPCSslCertChainPath: ${
    
    SW_CORE_GRPC_SSL_CERT_CHAIN_PATH:""}
    gRPCSslTrustedCAPath: ${
    
    SW_CORE_GRPC_SSL_TRUSTED_CA_PATH:""}
    downsampling:
      - Hour
      - Day
      - Month
    # Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.
    enableDataKeeperExecutor: ${
    
    SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR:true} # Turn it off then automatically metrics data delete will be close.
    dataKeeperExecutePeriod: ${
    
    SW_CORE_DATA_KEEPER_EXECUTE_PERIOD:5} # How often the data keeper executor runs periodically, unit is minute
    recordDataTTL: ${
    
    SW_CORE_RECORD_DATA_TTL:90} # Unit is minute
    minuteMetricsDataTTL: ${
    
    SW_CORE_MINUTE_METRIC_DATA_TTL:90} # Unit is minute
    hourMetricsDataTTL: ${
    
    SW_CORE_HOUR_METRIC_DATA_TTL:36} # Unit is hour
    dayMetricsDataTTL: ${
    
    SW_CORE_DAY_METRIC_DATA_TTL:45} # Unit is day
    monthMetricsDataTTL: ${
    
    SW_CORE_MONTH_METRIC_DATA_TTL:18} # Unit is month
    # Cache metric data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute,
    # the metrics may not be accurate within that minute.
    enableDatabaseSession: ${
    
    SW_CORE_ENABLE_DATABASE_SESSION:true}
    topNReportPeriod: ${
    
    SW_CORE_TOPN_REPORT_PERIOD:10} # top_n record worker report cycle, unit is minute
    # Extra model column are the column defined by in the codes, These columns of model are not required logically in aggregation or further query,
    # and it will cause more load for memory, network of OAP and storage.
    # But, being activated, user could see the name in the storage entities, which make users easier to use 3rd party tool, such as Kibana->ES, to query the data by themselves.
    activeExtraModelColumns: ${
    
    SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS:false}

storage:
  selector: ${
    
    SW_STORAGE:h2}
  elasticsearch:
    nameSpace: ${
    
    SW_NAMESPACE:""}
    clusterNodes: ${
    
    SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
    protocol: ${
    
    SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
    trustStorePath: ${
    
    SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
    trustStorePass: ${
    
    SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
    user: ${
    
    SW_ES_USER:""}
    password: ${
    
    SW_ES_PASSWORD:""}
    secretsManagementFile: ${
    
    SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
    enablePackedDownsampling: ${
    
    SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
    dayStep: ${
    
    SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
    indexShardsNumber: ${
    
    SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
    indexReplicasNumber: ${
    
    SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
    # Those data TTL settings will override the same settings in core module.
    recordDataTTL: ${
    
    SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
    otherMetricsDataTTL: ${
    
    SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
    monthMetricsDataTTL: ${
    
    SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
    bulkActions: ${
    
    SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
    flushInterval: ${
    
    SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
    concurrentRequests: ${
    
    SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
    resultWindowMaxSize: ${
    
    SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
    metadataQueryMaxSize: ${
    
    SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
    segmentQueryMaxSize: ${
    
    SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
    profileTaskQueryMaxSize: ${
    
    SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
    advanced: ${
    
    SW_STORAGE_ES_ADVANCED:""}
  elasticsearch7:
    nameSpace: ${
    
    SW_NAMESPACE:""}
    clusterNodes: ${
    
    SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
    protocol: ${
    
    SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
    # trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
    # trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
    enablePackedDownsampling: ${
    
    SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.
    dayStep: ${
    
    SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.
    user: ${
    
    SW_ES_USER:""}
    password: ${
    
    SW_ES_PASSWORD:""}
    secretsManagementFile: ${
    
    SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.
    indexShardsNumber: ${
    
    SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
    indexReplicasNumber: ${
    
    SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
    # Those data TTL settings will override the same settings in core module.
    recordDataTTL: ${
    
    SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
    otherMetricsDataTTL: ${
    
    SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
    monthMetricsDataTTL: ${
    
    SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
    # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
    bulkActions: ${
    
    SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
    flushInterval: ${
    
    SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
    concurrentRequests: ${
    
    SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
    resultWindowMaxSize: ${
    
    SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}
    metadataQueryMaxSize: ${
    
    SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
    segmentQueryMaxSize: ${
    
    SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
    profileTaskQueryMaxSize: ${
    
    SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
    advanced: ${
    
    SW_STORAGE_ES_ADVANCED:""}
  h2:
    driver: ${
    
    SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
    url: ${
    
    SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
    user: ${
    
    SW_STORAGE_H2_USER:sa}
    metadataQueryMaxSize: ${
    
    SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
  mysql:
    properties:
      jdbcUrl: ${
    
    SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
      dataSource.user: ${
    
    SW_DATA_SOURCE_USER:root}
      dataSource.password: ${
    
    SW_DATA_SOURCE_PASSWORD:root@1234}
      dataSource.cachePrepStmts: ${
    
    SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
      dataSource.prepStmtCacheSize: ${
    
    SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
      dataSource.prepStmtCacheSqlLimit: ${
    
    SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
      dataSource.useServerPrepStmts: ${
    
    SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
    metadataQueryMaxSize: ${
    
    SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
  influxdb:
    # Metadata storage provider configuration
    metabaseType: ${
    
    SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
    h2Props:
      dataSourceClassName: ${
    
    SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
      dataSource.url: ${
    
    SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
      dataSource.user: ${
    
    SW_STORAGE_METABASE_USER:sa}
      dataSource.password: ${
    
    SW_STORAGE_METABASE_PASSWORD:}
    mysqlProps:
      jdbcUrl: ${
    
    SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
      dataSource.user: ${
    
    SW_STORAGE_METABASE_USER:root}
      dataSource.password: ${
    
    SW_STORAGE_METABASE_PASSWORD:root@1234}
      dataSource.cachePrepStmts: ${
    
    SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
      dataSource.prepStmtCacheSize: ${
    
    SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
      dataSource.prepStmtCacheSqlLimit: ${
    
    SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
      dataSource.useServerPrepStmts: ${
    
    SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
    metadataQueryMaxSize: ${
    
    SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
    # InfluxDB configuration
    url: ${
    
    SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
    user: ${
    
    SW_STORAGE_INFLUXDB_USER:root}
    password: ${
    
    SW_STORAGE_INFLUXDB_PASSWORD:}
    database: ${
    
    SW_STORAGE_INFLUXDB_DATABASE:skywalking}
    actions: ${
    
    SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
    duration: ${
    
    SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
    fetchTaskLogMaxSize: ${
    
    SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request

receiver-sharing-server:
  selector: ${
    
    SW_RECEIVER_SHARING_SERVER:default}
  default:
    authentication: ${
    
    SW_AUTHENTICATION:""}
receiver-register:
  selector: ${
    
    SW_RECEIVER_REGISTER:default}
  default:

receiver-trace:
  selector: ${
    
    SW_RECEIVER_TRACE:default}
  default:
    bufferPath: ${
    
    SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute path
    bufferOffsetMaxFileSize: ${
    
    SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
    bufferDataMaxFileSize: ${
    
    SW_RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
    bufferFileCleanWhenRestart: ${
    
    SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
    sampleRate: ${
    
    SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
    slowDBAccessThreshold: ${
    
    SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.

receiver-jvm:
  selector: ${
    
    SW_RECEIVER_JVM:default}
  default:

receiver-clr:
  selector: ${
    
    SW_RECEIVER_CLR:default}
  default:

receiver-profile:
  selector: ${
    
    SW_RECEIVER_PROFILE:default}
  default:

service-mesh:
  selector: ${
    
    SW_SERVICE_MESH:default}
  default:
    bufferPath: ${
    
    SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute path
    bufferOffsetMaxFileSize: ${
    
    SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
    bufferDataMaxFileSize: ${
    
    SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
    bufferFileCleanWhenRestart: ${
    
    SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}

istio-telemetry:
  selector: ${
    
    SW_ISTIO_TELEMETRY:default}
  default:

envoy-metric:
  selector: ${
    
    SW_ENVOY_METRIC:default}
  default:
    alsHTTPAnalysis: ${
    
    SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:""}

receiver_zipkin:
  selector: ${
    
    SW_RECEIVER_ZIPKIN:-}
  default:
    host: ${
    
    SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
    port: ${
    
    SW_RECEIVER_ZIPKIN_PORT:9411}
    contextPath: ${
    
    SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}

receiver_jaeger:
  selector: ${
    
    SW_RECEIVER_JAEGER:-}
  default:
    gRPCHost: ${
    
    SW_RECEIVER_JAEGER_HOST:0.0.0.0}
    gRPCPort: ${
    
    SW_RECEIVER_JAEGER_PORT:14250}

query:
  selector: ${
    
    SW_QUERY:graphql}
  graphql:
    path: ${
    
    SW_QUERY_GRAPHQL_PATH:/graphql}

alarm:
  selector: ${
    
    SW_ALARM:default}
  default:

telemetry:
  selector: ${
    
    SW_TELEMETRY:none}
  none:
  prometheus:
    host: ${
    
    SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
    port: ${
    
    SW_TELEMETRY_PROMETHEUS_PORT:1234}
  so11y:
    prometheusExporterEnabled: ${
    
    SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
    prometheusExporterHost: ${
    
    SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
    prometheusExporterPort: ${
    
    SW_TELEMETRY_PROMETHEUS_PORT:1234}

receiver-so11y:
  selector: ${
    
    SW_RECEIVER_SO11Y:-}
  default:

configuration:
  selector: ${
    
    SW_CONFIGURATION:none}
  none:
  apollo:
    apolloMeta: http://106.12.25.204:8080
    apolloCluster: default
    apolloEnv: ""
    appId: skywalking
    period: 5
  nacos:
    # Nacos Server Host
    serverAddr: 127.0.0.1
    # Nacos Server Port
    port: 8848
    # Nacos Configuration Group
    group: 'skywalking'
    # Nacos Configuration namespace
    namespace: ''
    # Unit seconds, sync period. Default fetch every 60 seconds.
    period : 60
    # the name of current cluster, set the name if you want to upstream system known.
    clusterName: "default"
  zookeeper:
    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
    nameSpace: /default
    hostPort: localhost:2181
    # Retry Policy
    baseSleepTimeMs: 1000 # initial amount of time to wait between retries
    maxRetries: 3 # max number of times to retry
  etcd:
    period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
    group :  'skywalking'
    serverAddr: localhost:2379
    clusterName: "default"
  consul:
    # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
    hostAndPorts: ${
    
    consul.address}
    # Sync period in seconds. Defaults to 60 seconds.
    period: 1
    # Consul aclToken
    #aclToken: ${consul.aclToken}

exporter:
  selector: ${
    
    SW_EXPORTER:-}
  grpc:
    targetHost: ${
    
    SW_EXPORTER_GRPC_HOST:127.0.0.1}
    targetPort: ${
    
    SW_EXPORTER_GRPC_PORT:9870}

配置说明

配置主要包括以下几部分:

1、集群控制

  • standalone (默认值)
  • zookeeper
  • kubernetes
  • consul
  • nacos
  • etcd

2、core

3、存储

  • elasticsearch
  • elasticsearch7
  • h2 (默认值)
  • mysql
  • influxdb

4、receiver

5、配置

  • none (默认值)
  • apollo
  • nacos
  • zookeeper
  • etcd
  • consul

集群安装

服务器上安装agent

使用agent探针 需要把 apache-skywalking-apm-bin/agent 拷贝到服务运行服务器中。

目录结构如下:

.
├── activations     
│   ├── apm-toolkit-log4j-1.x-activation-6.6.0.jar
│   ├── apm-toolkit-log4j-2.x-activation-6.6.0.jar
│   ├── apm-toolkit-logback-1.x-activation-6.6.0.jar
│   ├── apm-toolkit-opentracing-activation-6.6.0.jar
│   └── apm-toolkit-trace-activation-6.6.0.jar
├── bootstrap-plugins
│   ├── apm-jdk-http-plugin-6.6.0.jar
│   └── apm-jdk-threading-plugin-6.6.0.jar
├── config
│   └── agent.config    #agent使用的配置文件
├── logs
├── optional-plugins   #可选的插件。
│   ├── apm-armeria-0.85.x-plugin-6.6.0.jar
│   ├── apm-customize-enhance-plugin-6.6.0.jar
│   ├── apm-gson-2.x-plugin-6.6.0.jar
│   ├── apm-lettuce-5.x-plugin-6.6.0.jar
│   ├── apm-play-2.x-plugin-6.6.0.jar
│   ├── apm-spring-annotation-plugin-6.6.0.jar
│   ├── apm-spring-cloud-gateway-2.x-plugin-6.6.0.jar
│   ├── apm-spring-tx-plugin-6.6.0.jar
│   ├── apm-spring-webflux-5.x-plugin-6.6.0.jar
│   ├── apm-trace-ignore-plugin-6.6.0.jar
│   └── apm-zookeeper-3.4.x-plugin-6.6.0.jar
├── plugins     #
│   ├── apm-activemq-5.x-plugin-6.6.0.jar
│   ├── ... ...
│   └── tomcat-7.x-8.x-plugin-6.6.0.jar
└── skywalking-agent.jar

有些插件会对性能有影响在 /optional-plugins文件夹下,想要使用的话,复制到/plugins文件夹下。

agent.config

# The agent namespace
# agent.namespace=${SW_AGENT_NAMESPACE:default-namespace}

# The service name in UI
agent.service_name=${SW_AGENT_NAME:Your_ApplicationName}

# 每3秒钟采样条数。-1表示100%
# agent.sample_n_per_3_secs=${SW_AGENT_SAMPLE:-1}

# Authentication active is based on backend setting, see application.yml for more details.
# agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx}

# 一条segment中最大spans数,可以评估应用内存使用。
# agent.span_limit_per_segment=${SW_AGENT_SPAN_LIMIT:300}

# Ignore the segments if their operation names end with these suffix.
# agent.ignore_suffix=${SW_AGENT_IGNORE_SUFFIX:.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg}

# If true, SkyWalking agent will save all instrumented classes files in `/debugging` folder.
# SkyWalking team may ask for these files in order to resolve compatible problem.
# agent.is_open_debugging_class = ${SW_AGENT_OPEN_DEBUG:true}

# The operationName max length
# agent.operation_name_threshold=${SW_AGENT_OPERATION_NAME_THRESHOLD:500}

# Backend service addresses.
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}

# Logging file_name
logging.file_name=${SW_LOGGING_FILE_NAME:skywalking-api.log}

# Logging level
logging.level=${SW_LOGGING_LEVEL:DEBUG}

# Logging dir
# logging.dir=${SW_LOGGING_DIR:""}

# Logging max_file_size, default: 300 * 1024 * 1024 = 314572800
# logging.max_file_size=${SW_LOGGING_MAX_FILE_SIZE:314572800}

# The max history log files. When rollover happened, if log files exceed this number,
# then the oldest file will be delete. Negative or zero means off, by default.
# logging.max_history_files=${SW_LOGGING_MAX_HISTORY_FILES:-1}

# mysql plugin configuration
# plugin.mysql.trace_sql_parameters=${SW_MYSQL_TRACE_SQL_PARAMETERS:false}

使用agent

服务器运行

# SkyWalking Agent 配置
export SW_AGENT_NAME=demo-application # 配置 Agent 名字。一般来说,我们直接使用 Spring Boot 项目的 `spring.application.name` 。
export SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 # 配置 Collector 地址。
export SW_AGENT_SPAN_LIMIT=2000 # 配置链路的最大 Span 数量。一般情况下,不需要配置,默认为 300 。主要考虑,有些新上 SkyWalking Agent 的项目,代码可能比较糟糕。
export JAVA_AGENT=-javaagent:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar # SkyWalking Agent jar 地址。

# Jar 启动
java -jar $JAVA_AGENT -jar lab-39-demo-2.2.2.RELEASE.jar

docker运行

通过环境变量设置(当然要匹配配置文件变量和 java 启动运行命令)

    environment:
      - SW_AGENT_NAME=XXXXXX
      - SW_AGENT_COLLECTOR_BACKEND_SERVICES=192.168.1.xxx:11800
      - TRACK=-javaagent:/opt/skywalking-agent.jar
      - JAVA_OPTS=-Xms256m -Xmx512m -XX:SurvivorRatio=8 -XX:+UseConcMarkSweepGC

idea开发

IDEA 界面

猜你喜欢

转载自blog.csdn.net/demon7552003/article/details/112598869