安装ES

docker安装

安装AOP和ui

docker-compose.yml

version: '3.3'
services:oap:image: apache/skywalking-oap-server:7.0.0-es7container_name: skywalking-oap#restart: alwayscommand: sleep 36000ports:- 11800:11800- 12800:12800environment:SW_STORAGE: elasticsearch7SW_ES_USER: elasticSW_ES_PASSWORD: XXXXSW_STORAGE_ES_CLUSTER_NODES: 192.168.1.XX:9200SW_NAMESPACE: sz_skywalking_indexSW_STORAGE_ES_BULK_ACTIONS: 4000SW_STORAGE_ES_FLUSH_INTERVAL: 30SW_STORAGE_ES_CONCURRENT_REQUESTS: 4SW_STORAGE_ES_QUERY_MAX_SIZE: 8000SW_STORAGE_ES_RECORD_DATA_TTL: 3SW_STORAGE_ES_OTHER_METRIC_DATA_TTL: 3SW_STORAGE_ES_MONTH_METRIC_DATA_TTL: 1volumes:- ./application.yml:/skywalking/config/application.yml  ui:image: apache/skywalking-ui:7.0.0container_name: skywalking-uidepends_on:- oaplinks:- oapports:- 8080:8080environment:SW_OAP_ADDRESS: oap:12800

application.yml

AOP的配置在/skywalking/config 目录下,

cluster:selector: ${SW_CLUSTER:standalone}standalone:# Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+# library the oap-libs folder with your ZooKeeper 3.4.x library.zookeeper:nameSpace: ${SW_NAMESPACE:""}hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retriesmaxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry# Enable ACLenableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in defaultschema: ${SW_ZK_SCHEMA:digest} # only support digest schemaexpression: ${SW_ZK_EXPRESSION:skywalking:skywalking}kubernetes:watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}consul:serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}# Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}aclToken: ${SW_CLUSTER_CONSUL_ACLTOKEN:""}nacos:serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}# Nacos Configuration namespacenamespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}etcd:serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}core:selector: ${SW_CORE:default}default:# Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate# Receiver: Receive agent data, Level 1 aggregate# Aggregator: Level 2 aggregaterole: ${SW_CORE_ROLE:Mixed} # Mixed/Receiver/AggregatorrestHost: ${SW_CORE_REST_HOST:0.0.0.0}restPort: ${SW_CORE_REST_PORT:12800}restContextPath: ${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost: ${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort: ${SW_CORE_GRPC_PORT:11800}gRPCSslEnabled: ${SW_CORE_GRPC_SSL_ENABLED:false}gRPCSslKeyPath: ${SW_CORE_GRPC_SSL_KEY_PATH:""}gRPCSslCertChainPath: ${SW_CORE_GRPC_SSL_CERT_CHAIN_PATH:""}gRPCSslTrustedCAPath: ${SW_CORE_GRPC_SSL_TRUSTED_CA_PATH:""}downsampling:- Hour- Day- Month# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.enableDataKeeperExecutor: ${SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR:true} # Turn it off then automatically metrics data delete will be close.dataKeeperExecutePeriod: ${SW_CORE_DATA_KEEPER_EXECUTE_PERIOD:5} # How often the data keeper executor runs periodically, unit is minuterecordDataTTL: ${SW_CORE_RECORD_DATA_TTL:90} # Unit is minuteminuteMetricsDataTTL: ${SW_CORE_MINUTE_METRIC_DATA_TTL:90} # Unit is minutehourMetricsDataTTL: ${SW_CORE_HOUR_METRIC_DATA_TTL:36} # Unit is hourdayMetricsDataTTL: ${SW_CORE_DAY_METRIC_DATA_TTL:45} # Unit is daymonthMetricsDataTTL: ${SW_CORE_MONTH_METRIC_DATA_TTL:18} # Unit is month# Cache metric data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute,# the metrics may not be accurate within that minute.enableDatabaseSession: ${SW_CORE_ENABLE_DATABASE_SESSION:true}topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10} # top_n record worker report cycle, unit is minute# Extra model column are the column defined by in the codes, These columns of model are not required logically in aggregation or further query,# and it will cause more load for memory, network of OAP and storage.# But, being activated, user could see the name in the storage entities, which make users easier to use 3rd party tool, such as Kibana->ES, to query the data by themselves.activeExtraModelColumns: ${SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS:false}storage:selector: ${SW_STORAGE:h2}elasticsearch:nameSpace: ${SW_NAMESPACE:""}clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}user: ${SW_ES_USER:""}password: ${SW_ES_PASSWORD:""}secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Those data TTL settings will override the same settings in core module.recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is dayotherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is daymonthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requestsflushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requestsresultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}advanced: ${SW_STORAGE_ES_ADVANCED:""}elasticsearch7:nameSpace: ${SW_NAMESPACE:""}clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}# trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}# trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}enablePackedDownsampling: ${SW_STORAGE_ENABLE_PACKED_DOWNSAMPLING:true} # Hour and Day metrics will be merged into minute index.dayStep: ${SW_STORAGE_DAY_STEP:1} # Represent the number of days in the one minute/hour/day index.user: ${SW_ES_USER:""}password: ${SW_ES_PASSWORD:""}secretsManagementFile: ${SW_ES_SECRETS_MANAGEMENT_FILE:""} # Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Those data TTL settings will override the same settings in core module.recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is dayotherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is daymonthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requestsflushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requestsresultWindowMaxSize: ${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}advanced: ${SW_STORAGE_ES_ADVANCED:""}h2:driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user: ${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}dataSource.user: ${SW_DATA_SOURCE_USER:root}dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}influxdb:# Metadata storage provider configurationmetabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.h2Props:dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}dataSource.user: ${SW_STORAGE_METABASE_USER:sa}dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}mysqlProps:jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}dataSource.user: ${SW_STORAGE_METABASE_USER:root}dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}# InfluxDB configurationurl: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}user: ${SW_STORAGE_INFLUXDB_USER:root}password: ${SW_STORAGE_INFLUXDB_PASSWORD:}database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collectduration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a requestreceiver-sharing-server:selector: ${SW_RECEIVER_SHARING_SERVER:default}default:authentication: ${SW_AUTHENTICATION:""}
receiver-register:selector: ${SW_RECEIVER_REGISTER:default}default:receiver-trace:selector: ${SW_RECEIVER_TRACE:default}default:bufferPath: ${SW_RECEIVER_BUFFER_PATH:../trace-buffer/}  # Path to trace buffer files, suggest to use absolute pathbufferOffsetMaxFileSize: ${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MBbufferDataMaxFileSize: ${SW_RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MBbufferFileCleanWhenRestart: ${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}sampleRate: ${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.slowDBAccessThreshold: ${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.receiver-jvm:selector: ${SW_RECEIVER_JVM:default}default:receiver-clr:selector: ${SW_RECEIVER_CLR:default}default:receiver-profile:selector: ${SW_RECEIVER_PROFILE:default}default:service-mesh:selector: ${SW_SERVICE_MESH:default}default:bufferPath: ${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/}  # Path to trace buffer files, suggest to use absolute pathbufferOffsetMaxFileSize: ${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MBbufferDataMaxFileSize: ${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MBbufferFileCleanWhenRestart: ${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}istio-telemetry:selector: ${SW_ISTIO_TELEMETRY:default}default:envoy-metric:selector: ${SW_ENVOY_METRIC:default}default:alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:""}receiver_zipkin:selector: ${SW_RECEIVER_ZIPKIN:-}default:host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}port: ${SW_RECEIVER_ZIPKIN_PORT:9411}contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}receiver_jaeger:selector: ${SW_RECEIVER_JAEGER:-}default:gRPCHost: ${SW_RECEIVER_JAEGER_HOST:0.0.0.0}gRPCPort: ${SW_RECEIVER_JAEGER_PORT:14250}query:selector: ${SW_QUERY:graphql}graphql:path: ${SW_QUERY_GRAPHQL_PATH:/graphql}alarm:selector: ${SW_ALARM:default}default:telemetry:selector: ${SW_TELEMETRY:none}none:prometheus:host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}so11y:prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}receiver-so11y:selector: ${SW_RECEIVER_SO11Y:-}default:configuration:selector: ${SW_CONFIGURATION:none}none:apollo:apolloMeta: http://106.12.25.204:8080apolloCluster: defaultapolloEnv: ""appId: skywalkingperiod: 5nacos:# Nacos Server HostserverAddr: 127.0.0.1# Nacos Server Portport: 8848# Nacos Configuration Groupgroup: 'skywalking'# Nacos Configuration namespacenamespace: ''# Unit seconds, sync period. Default fetch every 60 seconds.period : 60# the name of current cluster, set the name if you want to upstream system known.clusterName: "default"zookeeper:period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.nameSpace: /defaulthostPort: localhost:2181# Retry PolicybaseSleepTimeMs: 1000 # initial amount of time to wait between retriesmaxRetries: 3 # max number of times to retryetcd:period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.group :  'skywalking'serverAddr: localhost:2379clusterName: "default"consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts: ${consul.address}# Sync period in seconds. Defaults to 60 seconds.period: 1# Consul aclToken#aclToken: ${consul.aclToken}exporter:selector: ${SW_EXPORTER:-}grpc:targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}targetPort: ${SW_EXPORTER_GRPC_PORT:9870}

配置说明

配置主要包括以下几部分:

1、集群控制

  • standalone (默认值)
  • zookeeper
  • kubernetes
  • consul
  • nacos
  • etcd

2、core

3、存储

  • elasticsearch
  • elasticsearch7
  • h2 (默认值)
  • mysql
  • influxdb

4、receiver

5、配置

  • none (默认值)
  • apollo
  • nacos
  • zookeeper
  • etcd
  • consul

集群安装

服务器上安装agent

使用agent探针 需要把 apache-skywalking-apm-bin/agent 拷贝到服务运行服务器中。

目录结构如下:

.
├── activations
│   ├── apm-toolkit-log4j-1.x-activation-6.6.0.jar
│   ├── apm-toolkit-log4j-2.x-activation-6.6.0.jar
│   ├── apm-toolkit-logback-1.x-activation-6.6.0.jar
│   ├── apm-toolkit-opentracing-activation-6.6.0.jar
│   └── apm-toolkit-trace-activation-6.6.0.jar
├── bootstrap-plugins
│   ├── apm-jdk-http-plugin-6.6.0.jar
│   └── apm-jdk-threading-plugin-6.6.0.jar
├── config
│   └── agent.config    #agent使用的配置文件
├── logs
├── optional-plugins   #可选的插件。
│   ├── apm-armeria-0.85.x-plugin-6.6.0.jar
│   ├── apm-customize-enhance-plugin-6.6.0.jar
│   ├── apm-gson-2.x-plugin-6.6.0.jar
│   ├── apm-lettuce-5.x-plugin-6.6.0.jar
│   ├── apm-play-2.x-plugin-6.6.0.jar
│   ├── apm-spring-annotation-plugin-6.6.0.jar
│   ├── apm-spring-cloud-gateway-2.x-plugin-6.6.0.jar
│   ├── apm-spring-tx-plugin-6.6.0.jar
│   ├── apm-spring-webflux-5.x-plugin-6.6.0.jar
│   ├── apm-trace-ignore-plugin-6.6.0.jar
│   └── apm-zookeeper-3.4.x-plugin-6.6.0.jar
├── plugins     #
│   ├── apm-activemq-5.x-plugin-6.6.0.jar
│   ├── ... ...
│   └── tomcat-7.x-8.x-plugin-6.6.0.jar
└── skywalking-agent.jar

有些插件会对性能有影响在 /optional-plugins文件夹下,想要使用的话,复制到/plugins文件夹下。

agent.config

# The agent namespace
# agent.namespace=${SW_AGENT_NAMESPACE:default-namespace}# The service name in UI
agent.service_name=${SW_AGENT_NAME:Your_ApplicationName}# 每3秒钟采样条数。-1表示100%
# agent.sample_n_per_3_secs=${SW_AGENT_SAMPLE:-1}# Authentication active is based on backend setting, see application.yml for more details.
# agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx}# 一条segment中最大spans数,可以评估应用内存使用。
# agent.span_limit_per_segment=${SW_AGENT_SPAN_LIMIT:300}# Ignore the segments if their operation names end with these suffix.
# agent.ignore_suffix=${SW_AGENT_IGNORE_SUFFIX:.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg}# If true, SkyWalking agent will save all instrumented classes files in `/debugging` folder.
# SkyWalking team may ask for these files in order to resolve compatible problem.
# agent.is_open_debugging_class = ${SW_AGENT_OPEN_DEBUG:true}# The operationName max length
# agent.operation_name_threshold=${SW_AGENT_OPERATION_NAME_THRESHOLD:500}# Backend service addresses.
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}# Logging file_name
logging.file_name=${SW_LOGGING_FILE_NAME:skywalking-api.log}# Logging level
logging.level=${SW_LOGGING_LEVEL:DEBUG}# Logging dir
# logging.dir=${SW_LOGGING_DIR:""}# Logging max_file_size, default: 300 * 1024 * 1024 = 314572800
# logging.max_file_size=${SW_LOGGING_MAX_FILE_SIZE:314572800}# The max history log files. When rollover happened, if log files exceed this number,
# then the oldest file will be delete. Negative or zero means off, by default.
# logging.max_history_files=${SW_LOGGING_MAX_HISTORY_FILES:-1}# mysql plugin configuration
# plugin.mysql.trace_sql_parameters=${SW_MYSQL_TRACE_SQL_PARAMETERS:false}

使用agent

服务器运行

# SkyWalking Agent 配置
export SW_AGENT_NAME=demo-application # 配置 Agent 名字。一般来说,我们直接使用 Spring Boot 项目的 `spring.application.name` 。
export SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 # 配置 Collector 地址。
export SW_AGENT_SPAN_LIMIT=2000 # 配置链路的最大 Span 数量。一般情况下,不需要配置,默认为 300 。主要考虑,有些新上 SkyWalking Agent 的项目,代码可能比较糟糕。
export JAVA_AGENT=-javaagent:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar # SkyWalking Agent jar 地址。# Jar 启动
java -jar $JAVA_AGENT -jar lab-39-demo-2.2.2.RELEASE.jar

docker运行

通过环境变量设置(当然要匹配配置文件变量和 java 启动运行命令)

    environment:- SW_AGENT_NAME=XXXXXX- SW_AGENT_COLLECTOR_BACKEND_SERVICES=192.168.1.xxx:11800- TRACK=-javaagent:/opt/skywalking-agent.jar- JAVA_OPTS=-Xms256m -Xmx512m -XX:SurvivorRatio=8 -XX:+UseConcMarkSweepGC

idea开发

skywalking环境搭建相关推荐

  1. SkyWalking环境搭建与使用

    一.SkyWalking简介 SkyWalking是一个国产开源框架,b并加入Apache孵化器.简介来自官方文档. SkyWalking是 一个开源的可观测平台,用于从服务和云原生基础设施收集,分析 ...

  2. Skywalking环境搭建及demo实战

    在微服务应用非常广的今天,面临必要的问题 一个大型的互联网项目,采用微服务的架构设计,可能保证整个项目的完整运营就需要几十上百个服务的互相协作,那如果某个服务突然宕机或出现死锁等bug,怎么办?这个时 ...

  3. skywalking环境搭建及使用

    1.创建目录 mkdir /usr/local/skywalking 建议将虚拟机内存设置为3G并将CPU设置为2核,防止资 源不足. 2.将资源目录中的elasticsearch和skywalkin ...

  4. SkyWalking环境搭建(elasticsearch7)

    主要有部分内容 oap服务:接收和保存采集的数据,同时个UI展示 SkyWalking Website:展示oap服务的数据 SkyWalking agent:采集应用数据 下载安装包 下载地址:ht ...

  5. SkyWalking Liunx 环境搭建NetCore接入

    背景 前两天看见有小哥介绍windows下安装skywalking的介绍地址. 正好最近也在搭建linux环境的SkyWalking,顺便把linux环境搭建的经验分享下,帮助下使用linux部署Do ...

  6. 普罗米修斯监控系统_基于Prometheus和Grafana的监控平台 - 环境搭建

    导读 微服务中的监控分根据作用领域分为三大类,Logging,Tracing,Metrics. Logging - 用于记录离散的事件.例如,应用程序的调试信息或错误信息.它是我们诊断问题的依据.比如 ...

  7. Anaconda3+python3.7.10+TensorFlow2.3.0+PyQt5环境搭建

    Anaconda3+python3.7.10+TensorFlow2.3.0+PyQt5环境搭建 一.Anaconda 创建 python3.7环境 1.进入 C:\Users\用户名 目录下,找到 ...

  8. Windows10+Anaconda3+Pycharm环境搭建

    Windows10+Anaconda3+Pycharm环境搭建 一.安装Anaconda3 1.进入 anconda官网,下载Anaconda3 2.安装,选项默认继续安装(可以自行更改安装位置),等 ...

  9. SpringCloud Alibaba微服务实战(一) - 基础环境搭建

    说在前面 Spring Cloud Alibaba 致力于提供微服务开发的一站式解决方案.此项目包含开发分布式应用微服务的必需组件,方便开发者通过 Spring Cloud 编程模型轻松使用这些组件来 ...

最新文章

  1. 爱奇艺才被做空又爆裁员,技术研发为裁员重灾区
  2. java B2B2C Springboot仿淘宝电子商城系统-负载均衡之ribbon+feign
  3. 阻碍GIS产业发展的三大问题
  4. boost::parameter::python相关的测试程序
  5. ZooKeeper 集群:集群概念、选举流程、机器数量
  6. 【ST表】【单调队列】Window(jzoj 1326)
  7. Android MVP模式简单易懂的介绍方式 (一)
  8. index.wxss 导入不显示结果
  9. python list中append()与extend()用法
  10. 风云的银光志Silverlight4.0教程之遍历访问客户端用户的本地文件
  11. css position, display, float 内联元素、块级元素
  12. Dubbo源码解读:appendAnnotation [01]
  13. UrlRewrite重写url
  14. Android根据经纬度计算距离
  15. 智慧商圈支付宝小程序(ISV系统服务商开发模式)
  16. Vue element 自定义表单验证(验证联系方式、邮箱、邮政编码)
  17. HTML协议目标端口和源端口,协议:TCP源IP:源端口:80目的IP:目的端口:4049TT? 爱问知识人...
  18. 蓝桥杯——青蛙过河(JAVA)
  19. C++的STL库,vector sort排序时间复杂度 及常见容器比较
  20. 区块链落地就看这一“会”了

热门文章

  1. delphi开发日志——基窗体,使用面向对象编程的编程思想创建基类
  2. 有关数据库表被锁定的问题
  3. POJ 1611 The Suspects (并查集)
  4. js 获取时间戳的方法
  5. python3之日期和时间(转载)
  6. Docker服务器的图形显示方案
  7. linux文件读写 文件锁、select、poll【转】
  8. 从 github 执行 git clone 一个大的项目时提示 error: RPC failed
  9. android旋转动画和平移动画具体解释,补充说一下假设制作gif动画放到csdn博客上...
  10. [Everyday Mathematics]20150221