在此博客中,我将解释如何为您的Mule CloudHub应用程序启用AWS Cloudwatch日志 。 AWS提供了Cloudwatch Logs Services,以便您可以更好地管理日志。 它比松散便宜。 由于cloudhub会自动翻转超过100 MB的日志,因此我们需要一种机制来更有效地管理日志。 为此,我们创建了这个自定义附加程序,它将日志发送到cloudwatch。

package com.javaroots.appenders;import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.status.StatusLogger;import com.amazonaws.regions.Regions;
import com.amazonaws.services.logs.AWSLogs;
import com.amazonaws.services.logs.model.CreateLogGroupRequest;
import com.amazonaws.services.logs.model.CreateLogStreamRequest;
import com.amazonaws.services.logs.model.CreateLogStreamResult;
import com.amazonaws.services.logs.model.DataAlreadyAcceptedException;
import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
import com.amazonaws.services.logs.model.InputLogEvent;
import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
import com.amazonaws.services.logs.model.LogGroup;
import com.amazonaws.services.logs.model.LogStream;
import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.amazonaws.services.logs.model.PutLogEventsResult;@Plugin(name = "CLOUDW", category = "Core", elementType = "appender", printObject = true)
public class CloudwatchAppender extends AbstractAppender {/*** */private static final long serialVersionUID = 12321345L;private static Logger logger2 = LogManager.getLogger(CloudwatchAppender.class);private final Boolean DEBUG_MODE = System.getProperty("log4j.debug") != null;/*** Used to make sure that on close() our daemon thread isn't also trying to sendMessage()s*/private Object sendMessagesLock = new Object();/*** The queue used to buffer log entries*/private LinkedBlockingQueue loggingEventsQueue;/*** the AWS Cloudwatch Logs API client*/private AWSLogs awsLogsClient;private AtomicReference lastSequenceToken = new AtomicReference<>();/*** The AWS Cloudwatch Log group name*/private String logGroupName;/*** The AWS Cloudwatch Log stream name*/private String logStreamName;/*** The queue / buffer size*/private int queueLength = 1024;/*** The maximum number of log entries to send in one go to the AWS Cloudwatch Log service*/private int messagesBatchSize = 128;private AtomicBoolean cloudwatchAppenderInitialised = new AtomicBoolean(false);private CloudwatchAppender(final String name,final Layout layout,final Filter filter,final boolean ignoreExceptions,String logGroupName, String logStreamName,Integer queueLength,Integer messagesBatchSize) {super(name, filter, layout, ignoreExceptions);this.logGroupName = logGroupName;this.logStreamName = logStreamName;this.queueLength = queueLength;this.messagesBatchSize = messagesBatchSize;this.activateOptions();}@Overridepublic void append(LogEvent event) {if (cloudwatchAppenderInitialised.get()) {loggingEventsQueue.offer(event);} else {// just do nothing}}public void activateOptions() {if (isBlank(logGroupName) || isBlank(logStreamName)) {logger2.error("Could not initialise CloudwatchAppender because either or both LogGroupName(" + logGroupName + ") and LogStreamName(" + logStreamName + ") are null or empty");this.stop();} else {//below lines work with aws version 1.9.40 for local build//this.awsLogsClient = new AWSLogsClient();//awsLogsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_2));this.awsLogsClient = com.amazonaws.services.logs.AWSLogsClientBuilder.standard().withRegion(Regions.AP_SOUTHEAST_2).build();loggingEventsQueue = new LinkedBlockingQueue<>(queueLength);try {initializeCloudwatchResources();initCloudwatchDaemon();cloudwatchAppenderInitialised.set(true);} catch (Exception e) {logger2.error("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName, e);if (DEBUG_MODE) {System.err.println("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName);e.printStackTrace();}}}}private void initCloudwatchDaemon() {Thread t = new Thread(() -> {while (true) {try {if (loggingEventsQueue.size() > 0) {sendMessages();}Thread.currentThread().sleep(20L);} catch (InterruptedException e) {if (DEBUG_MODE) {e.printStackTrace();}}}});t.setName("CloudwatchThread");t.setDaemon(true);t.start();}private void sendMessages() {synchronized (sendMessagesLock) {LogEvent polledLoggingEvent;final Layout layout = getLayout();List loggingEvents = new ArrayList<>();try {while ((polledLoggingEvent = loggingEventsQueue.poll()) != null && loggingEvents.size() <= messagesBatchSize) {loggingEvents.add(polledLoggingEvent);}List inputLogEvents = loggingEvents.stream().map(loggingEvent -> new InputLogEvent().withTimestamp(loggingEvent.getTimeMillis()).withMessage(layout == null ?loggingEvent.getMessage().getFormattedMessage():new String(layout.toByteArray(loggingEvent), StandardCharsets.UTF_8))).sorted(comparing(InputLogEvent::getTimestamp)).collect(toList());if (!inputLogEvents.isEmpty()) {PutLogEventsRequest putLogEventsRequest = new PutLogEventsRequest(logGroupName,logStreamName,inputLogEvents);try {putLogEventsRequest.setSequenceToken(lastSequenceToken.get());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());} catch (DataAlreadyAcceptedException dataAlreadyAcceptedExcepted) {putLogEventsRequest.setSequenceToken(dataAlreadyAcceptedExcepted.getExpectedSequenceToken());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());if (DEBUG_MODE) {dataAlreadyAcceptedExcepted.printStackTrace();}} catch (InvalidSequenceTokenException invalidSequenceTokenException) {putLogEventsRequest.setSequenceToken(invalidSequenceTokenException.getExpectedSequenceToken());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());if (DEBUG_MODE) {invalidSequenceTokenException.printStackTrace();}}}} catch (Exception e) {if (DEBUG_MODE) {logger2.error(" error inserting cloudwatch:",e);e.printStackTrace();}}}}private void initializeCloudwatchResources() {DescribeLogGroupsRequest describeLogGroupsRequest = new DescribeLogGroupsRequest();describeLogGroupsRequest.setLogGroupNamePrefix(logGroupName);Optional logGroupOptional = awsLogsClient.describeLogGroups(describeLogGroupsRequest).getLogGroups().stream().filter(logGroup -> logGroup.getLogGroupName().equals(logGroupName)).findFirst();if (!logGroupOptional.isPresent()) {CreateLogGroupRequest createLogGroupRequest = new CreateLogGroupRequest().withLogGroupName(logGroupName);awsLogsClient.createLogGroup(createLogGroupRequest);}DescribeLogStreamsRequest describeLogStreamsRequest = new DescribeLogStreamsRequest().withLogGroupName(logGroupName).withLogStreamNamePrefix(logStreamName);Optional logStreamOptional = awsLogsClient.describeLogStreams(describeLogStreamsRequest).getLogStreams().stream().filter(logStream -> logStream.getLogStreamName().equals(logStreamName)).findFirst();if (!logStreamOptional.isPresent()) {CreateLogStreamRequest createLogStreamRequest = new CreateLogStreamRequest().withLogGroupName(logGroupName).withLogStreamName(logStreamName);CreateLogStreamResult o = awsLogsClient.createLogStream(createLogStreamRequest);}}private boolean isBlank(String string) {return null == string || string.trim().length() == 0;}protected String getSimpleStacktraceAsString(final Throwable thrown) {final StringBuilder stackTraceBuilder = new StringBuilder();for (StackTraceElement stackTraceElement : thrown.getStackTrace()) {new Formatter(stackTraceBuilder).format("%s.%s(%s:%d)%n",stackTraceElement.getClassName(),stackTraceElement.getMethodName(),stackTraceElement.getFileName(),stackTraceElement.getLineNumber());}return stackTraceBuilder.toString();}@Overridepublic void start() {super.start();}@Overridepublic void stop() {super.stop();while (loggingEventsQueue != null && !loggingEventsQueue.isEmpty()) {this.sendMessages();}}@Overridepublic String toString() {return CloudwatchAppender.class.getSimpleName() + "{"+ "name=" + getName() + " loggroupName=" + logGroupName+" logstreamName=" + logStreamName;}@PluginFactory@SuppressWarnings("unused")public static CloudwatchAppender createCloudWatchAppender(@PluginAttribute(value = "queueLength" ) Integer queueLength,@PluginElement("Layout") Layout layout,@PluginAttribute(value = "logGroupName") String logGroupName,@PluginAttribute(value = "logStreamName") String logStreamName,@PluginAttribute(value = "name") String name,@PluginAttribute(value = "ignoreExceptions", defaultBoolean = false) Boolean ignoreExceptions,@PluginAttribute(value = "messagesBatchSize") Integer messagesBatchSize){return new CloudwatchAppender(name, layout, null, ignoreExceptions, logGroupName, logStreamName ,queueLength,messagesBatchSize);}
}

我们在pom.xml文件中添加依赖项。

<dependency><groupId>com.amazonaws</groupId><artifactId>aws-java-sdk-logs</artifactId><!-- for local 3.8.5 we need to use this version cloudhub 3.8.5 has jackson 2.6.6 --><!-- <version>1.9.40</version> --><version>1.11.105</version><exclusions><exclusion>  <!-- declare the exclusion here --><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-1.2-api</artifactId></exclusion><exclusion>  <!-- declare the exclusion here --><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-core</artifactId></exclusion><exclusion>  <!-- declare the exclusion here --><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-databind</artifactId></exclusion></exclusions></dependency><!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api --><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-api</artifactId><version>2.5</version></dependency><!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core --><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-core</artifactId><version>2.5</version></dependency>

现在我们需要修改我们的log4j2.xml。 还要添加自定义cloudwatch附加程序和CloudhubLogs附加程序,以便我们也可以获取cloudhub上的日志。

<?xml version="1.0" encoding="utf-8"?>
<Configuration status="trace" packages="au.edu.vu.appenders,com.mulesoft.ch.logging.appender"><!--These are some of the loggers you can enable. There are several more you can find in the documentation. Besides this log4j configuration, you can also use Java VM environment variablesto enable other logs like network (-Djavax.net.debug=ssl or all) and Garbage Collector (-XX:+PrintGC). These will be append to the console, so you will see them in the mule_ee.log file. --><Appenders><CLOUDW name="CloudW" logGroupName="test-log-stream" logStreamName="test44" messagesBatchSize="${sys:cloudwatch.msg.batch.size}" queueLength="${sys:cloudwatch.queue.length}"><PatternLayout pattern="%d [%t] %-5p %c - %m%n"/></CLOUDW><Log4J2CloudhubLogAppender name="CLOUDHUB"addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"appendRetryIntervalMs="${sys:logging.appendRetryInterval}"appendMaxAttempts="${sys:logging.appendMaxAttempts}"batchSendIntervalMs="${sys:logging.batchSendInterval}"batchMaxRecords="${sys:logging.batchMaxRecords}"memBufferMaxSize="${sys:logging.memBufferMaxSize}"journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"journalMaxFileSize="${sys:logging.journalMaxFileSize}"clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}"><PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/></Log4J2CloudhubLogAppender></Appenders><Loggers><!-- Http Logger shows wire traffic on DEBUG --><AsyncLogger name="org.mule.module.http.internal.HttpMessageLogger" level="WARN"/><!-- JDBC Logger shows queries and parameters values on DEBUG --><AsyncLogger name="com.mulesoft.mule.transport.jdbc" level="WARN"/><!-- CXF is used heavily by Mule for web services --><AsyncLogger name="org.apache.cxf" level="WARN"/><!-- Apache Commons tend to make a lot of noise which can clutter the log--><AsyncLogger name="org.apache" level="WARN"/><!-- Reduce startup noise --><AsyncLogger name="org.springframework.beans.factory" level="WARN"/><!-- Mule classes --><AsyncLogger name="org.mule" level="INFO"/><AsyncLogger name="com.mulesoft" level="INFO"/><!-- Reduce DM verbosity --><AsyncLogger name="org.jetel" level="WARN"/><AsyncLogger name="Tracking" level="WARN"/><AsyncRoot level="INFO"><AppenderRef ref="CLOUDHUB" level="INFO"/><AppenderRef ref="CloudW" level="INFO"/></AsyncRoot></Loggers>
</Configuration>

最后,我们需要在cloudhub运行时管理器上禁用cloudhub日志。

这适用于cloudhub mule运行时版本3.8.4。 cloudhub 3.8.5版本存在一些问题,该版本已正确初始化并发送日志,但是缺少事件和消息。

翻译自: https://www.javacodegeeks.com/2017/10/integrate-cloudwatch-logs-cloudhub-mule.html

将CloudWatch Logs与Cloudhub Mule集成相关推荐

  1. cloudwatch_将CloudWatch Logs与Cloudhub Mule集成

    cloudwatch 在此博客中,我将解释如何为您的Mule CloudHub应用程序启用AWS Cloudwatch日志 . AWS提供了Cloudwatch Logs Services,以便您可以 ...

  2. Mule ESB 3.3与CloudHub

    MuleSoft最近发布了企业服务总线(ESB)产品Mule ESB 3.3.在新版本中,除了应用程序集成之外,Mule ESB还拥有了数据集成功能:从而为开发者提供了一个面向本地或云端应用的集成解决 ...

  3. mule 怎样集成外部的webservice

    <?xml version="1.0" encoding="UTF-8"?> < mule xmlns:file="http://w ...

  4. 云监控介绍 - Amazon CloudWatch

    云监控介绍 - Amazon CloudWatch 作者:张航东 本文主要用于个人学习.总结,欢迎转载,但请务必注明作者和出处,感谢! Amazon CloudWatch 是一项针对 AWS 云资源和 ...

  5. 企业集成平台 Cloud Hub 5.3版本发布 [EAI、B2BI、EDI、数据集成平台]

    企业集成平台 Cloud Hub 5.3版本发布 [EAI.B2BI.EDI.数据集成平台] ​如何为企业数据赋能?Cloud HUB 5企业集成平台推荐 而随着业务的发展,企业部署了越来越多的系统, ...

  6. 虚拟私有云网络VPC

    虚拟私有云网络VPC 1 VPC基础 2 AWS默认VPC 3 用户创建的VPC 4 NAT实例 5 NAT网关 6 NACL网络访问控制列表 7 在VPC中使用DNS域名解析 8 对等互联peeri ...

  7. 七牛服务器入门教程_教程:使用无服务器,StepFunction和StackStorm构建社区的入门应用程序…...

    七牛服务器入门教程 by Dmitri Zimine 由Dmitri Zimine 使用无服务器,StepFunction和StackStorm Exchange构建社区注册应用 (Building ...

  8. 可观测告警运维系统调研——SLS告警与多款方案对比

    简介: 本文介绍对比多款告警监控运维平台方案,覆盖阿里云SLS.Azure.AWS.自建系统(ELK.Prometheus.TICK)等方案. 前言 本篇是SLS新版告警系列宣传与培训的第三篇,后续我 ...

  9. 亚马逊AWS IoT

    亚马逊AWS IoT 原文链接:http://www.cnblogs.com/ibrahim/p/amazon-aws-iot.html 平台定位 AWS IoT是一款托管的云平台,使互联设备可以轻松 ...

最新文章

  1. OKR能给公司和团队带来什么价值?
  2. 关于微信小程序开发中遇到的缺少game.json问题的解决
  3. C语言选择排序(解析)
  4. 「场景化」增长的践行者:探寻大数据时代的商业变革
  5. 内容流量管理的关键技术:多任务保量优化算法实践
  6. Overlapped I/O模型深入分析[转]
  7. MySQL kill操作
  8. hybbs接口php,HYBBS
  9. Mr.J--Java基础问题30问
  10. java8 functionalinterface注解_@FunctionalInterface注解的使用
  11. linux安装opencv让输入密码,linux下安装opencv的全过程(对初学者或者linux不熟悉的童鞋,非常适合)...
  12. html文件实践总结,html,css学习实践总结
  13. 如何在MyEclipse上耍Chrome
  14. android4.0 底部菜单,Android自定义控件系列(四)—底部菜单(下)
  15. Thinking in Java 14.3.3递归计数
  16. 301.删除无效的括号
  17. Enterprise Library 4.1 Configuration Sources 图文笔记
  18. Git 提交大文件提示 fatal: The remote end hung up unexpectedly
  19. html5点击刷新的效果,HTML5 手机屏幕下拉(下刷)动作的弹性刷新效果
  20. 字体粗细怎么设置 html,html中字体的粗细怎么设置?字体大小是font-size,那粗细怎么设置的?...

热门文章

  1. 部署shiro官方源码时,执行maven命令出错
  2. Hibernate注解(一)之持久化实体
  3. 如何quot;优雅quot;地终止一个线程?
  4. Spring Boot整合Thymeleaf模板引擎
  5. Java GUI界面
  6. struts+hibernate+oracle+easyui实现lazyout组件的简单案例——OpSessionview实现
  7. 所有的软弱,都是昂贵的
  8. jQuery 表格实现
  9. Mysql对字符串去掉前后空格(trim)或者指定字符
  10. 利用赫夫曼编码进行数据解压