ElasticSearch2.3.5源码包结构截图:

先看下node 这个类,sonar是调用node start 方法启动的

/** Licensed to Elasticsearch under one or more contributor* license agreements. See the NOTICE file distributed with* this work for additional information regarding copyright* ownership. Elasticsearch licenses this file to you under* the Apache License, Version 2.0 (the "License"); you may* not use this file except in compliance with the License.* You may obtain a copy of the License at**    http://www.apache.org/licenses/LICENSE-2.0** Unless required by applicable law or agreed to in writing,* software distributed under the License is distributed on an* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY* KIND, either express or implied.  See the License for the* specific language governing permissions and limitations* under the License.*/package org.elasticsearch.node;import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClientModule;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterNameModule;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.NodeEnvironmentModule;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerModule;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.search.shape.ShapeModule;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.memory.IndexingMemoryController;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.monitor.MonitorModule;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.percolator.PercolatorModule;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestModule;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.snapshots.SnapshotShardsService;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolModule;
import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.tribe.TribeModule;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherModule;
import org.elasticsearch.watcher.ResourceWatcherService;import java.io.BufferedWriter;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.TimeUnit;import static org.elasticsearch.common.settings.Settings.settingsBuilder;/*** A node represent a node within a cluster (<tt>cluster.name</tt>). The {@link #client()} can be used* in order to use a {@link Client} to perform actions/operations against the cluster.* <p>In order to create a node, the {@link NodeBuilder} can be used. When done with it, make sure to* call {@link #close()} on it.*/
public class Node implements Releasable {private static final String CLIENT_TYPE = "node";public static final String HTTP_ENABLED = "http.enabled";private final Lifecycle lifecycle = new Lifecycle();private final Injector injector;private final Settings settings;private final Environment environment;private final PluginsService pluginsService;private final Client client;/*** Constructs a node with the given settings.** @param preparedSettings Base settings to configure the node with*/public Node(Settings preparedSettings) {this(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), Version.CURRENT, Collections.<Class<? extends Plugin>>emptyList());}protected Node(Environment tmpEnv, Version version, Collection<Class<? extends Plugin>> classpathPlugins) {Settings tmpSettings = settingsBuilder().put(tmpEnv.settings()).put(Client.CLIENT_TYPE_SETTING, CLIENT_TYPE).build();tmpSettings = TribeService.processSettings(tmpSettings);ESLogger logger = Loggers.getLogger(Node.class, tmpSettings.get("name"));logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.hashShort(), Build.CURRENT.timestamp());logger.info("initializing ...");if (logger.isDebugEnabled()) {logger.debug("using config [{}], data [{}], logs [{}], plugins [{}]",tmpEnv.configFile(), Arrays.toString(tmpEnv.dataFiles()), tmpEnv.logsFile(), tmpEnv.pluginsFile());}this.pluginsService = new PluginsService(tmpSettings, tmpEnv.modulesFile(), tmpEnv.pluginsFile(), classpathPlugins);this.settings = pluginsService.updatedSettings();// create the environment based on the finalized (processed) view of the settingsthis.environment = new Environment(this.settings());final NodeEnvironment nodeEnvironment;try {nodeEnvironment = new NodeEnvironment(this.settings, this.environment);} catch (IOException ex) {throw new IllegalStateException("Failed to created node environment", ex);}final ThreadPool threadPool = new ThreadPool(settings);NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();boolean success = false;try {ModulesBuilder modules = new ModulesBuilder();modules.add(new Version.Module(version));modules.add(new CircuitBreakerModule(settings));// plugin modules must be added here, before others or we can get crazy injection errors...for (Module pluginModule : pluginsService.nodeModules()) {modules.add(pluginModule);}modules.add(new PluginsModule(pluginsService));modules.add(new SettingsModule(this.settings));modules.add(new NodeModule(this));modules.add(new NetworkModule(namedWriteableRegistry));modules.add(new ScriptModule(this.settings));modules.add(new EnvironmentModule(environment));modules.add(new NodeEnvironmentModule(nodeEnvironment));modules.add(new ClusterNameModule(this.settings));modules.add(new ThreadPoolModule(threadPool));modules.add(new DiscoveryModule(this.settings));modules.add(new ClusterModule(this.settings));modules.add(new RestModule(this.settings));modules.add(new TransportModule(settings, namedWriteableRegistry));if (settings.getAsBoolean(HTTP_ENABLED, true)) {modules.add(new HttpServerModule(settings));}modules.add(new IndicesModule());modules.add(new SearchModule());modules.add(new ActionModule(false));modules.add(new MonitorModule(settings));modules.add(new GatewayModule(settings));modules.add(new NodeClientModule());modules.add(new ShapeModule());modules.add(new PercolatorModule());modules.add(new ResourceWatcherModule());modules.add(new RepositoriesModule());modules.add(new TribeModule());pluginsService.processModules(modules);injector = modules.createInjector();client = injector.getInstance(Client.class);threadPool.setNodeSettingsService(injector.getInstance(NodeSettingsService.class));success = true;} finally {if (!success) {nodeEnvironment.close();ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);}}logger.info("initialized");}/*** The settings that were used to create the node.*/public Settings settings() {return this.settings;}/*** A client that can be used to execute actions (operations) against the cluster.*/public Client client() {return client;}/*** Start the node. If the node is already started, this method is no-op.*/public Node start() {if (!lifecycle.moveToStarted()) {return this;}ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));logger.info("starting ...");// hack around dependency injection problem (for now...)injector.getInstance(Discovery.class).setRoutingService(injector.getInstance(RoutingService.class));for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {injector.getInstance(plugin).start();}injector.getInstance(MappingUpdatedAction.class).setClient(client);injector.getInstance(IndicesService.class).start();injector.getInstance(IndexingMemoryController.class).start();injector.getInstance(IndicesClusterStateService.class).start();injector.getInstance(IndicesTTLService.class).start();injector.getInstance(SnapshotsService.class).start();injector.getInstance(SnapshotShardsService.class).start();injector.getInstance(RoutingService.class).start();injector.getInstance(SearchService.class).start();injector.getInstance(MonitorService.class).start();injector.getInstance(RestController.class).start();// TODO hack around circular dependencies problemsinjector.getInstance(GatewayAllocator.class).setReallocation(injector.getInstance(ClusterService.class), injector.getInstance(RoutingService.class));injector.getInstance(ResourceWatcherService.class).start();injector.getInstance(GatewayService.class).start();// Start the transport service now so the publish address will be added to the local disco node in ClusterServiceTransportService transportService = injector.getInstance(TransportService.class);transportService.start();injector.getInstance(ClusterService.class).start();// start after cluster service so the local disco is knownDiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();transportService.acceptIncomingRequests();discoService.joinClusterAndWaitForInitialState();if (settings.getAsBoolean("http.enabled", true)) {injector.getInstance(HttpServer.class).start();}injector.getInstance(TribeService.class).start();if (settings.getAsBoolean("node.portsfile", false)) {if (settings.getAsBoolean("http.enabled", true)) {HttpServerTransport http = injector.getInstance(HttpServerTransport.class);writePortsFile("http", http.boundAddress());}TransportService transport = injector.getInstance(TransportService.class);writePortsFile("transport", transport.boundAddress());}logger.info("started");return this;}private Node stop() {if (!lifecycle.moveToStopped()) {return this;}ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));logger.info("stopping ...");injector.getInstance(TribeService.class).stop();injector.getInstance(ResourceWatcherService.class).stop();if (settings.getAsBoolean("http.enabled", true)) {injector.getInstance(HttpServer.class).stop();}injector.getInstance(SnapshotsService.class).stop();injector.getInstance(SnapshotShardsService.class).stop();// stop any changes happening as a result of cluster state changesinjector.getInstance(IndicesClusterStateService.class).stop();// we close indices first, so operations won't be allowed on itinjector.getInstance(IndexingMemoryController.class).stop();injector.getInstance(IndicesTTLService.class).stop();injector.getInstance(RoutingService.class).stop();injector.getInstance(ClusterService.class).stop();injector.getInstance(DiscoveryService.class).stop();injector.getInstance(MonitorService.class).stop();injector.getInstance(GatewayService.class).stop();injector.getInstance(SearchService.class).stop();injector.getInstance(RestController.class).stop();injector.getInstance(TransportService.class).stop();for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {injector.getInstance(plugin).stop();}// we should stop this last since it waits for resources to get released// if we had scroll searchers etc or recovery going on we wait for to finish.injector.getInstance(IndicesService.class).stop();logger.info("stopped");return this;}// During concurrent close() calls we want to make sure that all of them return after the node has completed it's shutdown cycle.// If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be executed, in case another (for example api) call// to close() has already set some lifecycles to stopped. In this case the process will be terminated even if the first call to close() has not finished yet.@Overridepublic synchronized void close() {if (lifecycle.started()) {stop();}if (!lifecycle.moveToClosed()) {return;}ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));logger.info("closing ...");StopWatch stopWatch = new StopWatch("node_close");stopWatch.start("tribe");injector.getInstance(TribeService.class).close();stopWatch.stop().start("http");if (settings.getAsBoolean("http.enabled", true)) {injector.getInstance(HttpServer.class).close();}stopWatch.stop().start("snapshot_service");injector.getInstance(SnapshotsService.class).close();injector.getInstance(SnapshotShardsService.class).close();stopWatch.stop().start("client");Releasables.close(injector.getInstance(Client.class));stopWatch.stop().start("indices_cluster");injector.getInstance(IndicesClusterStateService.class).close();stopWatch.stop().start("indices");injector.getInstance(IndexingMemoryController.class).close();injector.getInstance(IndicesTTLService.class).close();injector.getInstance(IndicesService.class).close();// close filter/fielddata caches after indicesinjector.getInstance(IndicesQueryCache.class).close();injector.getInstance(IndicesFieldDataCache.class).close();injector.getInstance(IndicesStore.class).close();stopWatch.stop().start("routing");injector.getInstance(RoutingService.class).close();stopWatch.stop().start("cluster");injector.getInstance(ClusterService.class).close();stopWatch.stop().start("discovery");injector.getInstance(DiscoveryService.class).close();stopWatch.stop().start("monitor");injector.getInstance(MonitorService.class).close();stopWatch.stop().start("gateway");injector.getInstance(GatewayService.class).close();stopWatch.stop().start("search");injector.getInstance(SearchService.class).close();stopWatch.stop().start("rest");injector.getInstance(RestController.class).close();stopWatch.stop().start("transport");injector.getInstance(TransportService.class).close();stopWatch.stop().start("percolator_service");injector.getInstance(PercolatorService.class).close();for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {stopWatch.stop().start("plugin(" + plugin.getName() + ")");injector.getInstance(plugin).close();}stopWatch.stop().start("script");try {injector.getInstance(ScriptService.class).close();} catch (IOException e) {logger.warn("ScriptService close failed", e);}stopWatch.stop().start("thread_pool");// TODO this should really use ThreadPool.terminate()injector.getInstance(ThreadPool.class).shutdown();try {injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);} catch (InterruptedException e) {// ignore}stopWatch.stop().start("thread_pool_force_shutdown");try {injector.getInstance(ThreadPool.class).shutdownNow();} catch (Exception e) {// ignore}stopWatch.stop();if (logger.isTraceEnabled()) {logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());}injector.getInstance(NodeEnvironment.class).close();injector.getInstance(PageCacheRecycler.class).close();logger.info("closed");}/*** Returns <tt>true</tt> if the node is closed.*/public boolean isClosed() {return lifecycle.closed();}public Injector injector() {return this.injector;}/** Writes a file to the logs dir containing the ports for the given transport type */private void writePortsFile(String type, BoundTransportAddress boundAddress) {Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp");try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) {for (TransportAddress address : boundAddress.boundAddresses()) {InetAddress inetAddress = InetAddress.getByName(address.getAddress());if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) {// no link local, just causes problemscontinue;}writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n");}} catch (IOException e) {throw new RuntimeException("Failed to write ports file", e);}Path portsFile = environment.logsFile().resolve(type + ".ports");try {Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE);} catch (IOException e) {throw new RuntimeException("Failed to rename ports file", e);}}
}

ElasticSearch2.3.5源码研究(一)相关推荐

  1. 一起谈.NET技术,.NET Framework源码研究系列之---万法归宗Object

    经过前面三篇关于.NET Framework源码研究系列的随笔,相信大家都发现其实.NET Framework的实现其实并不复杂,也许跟我们自己做的项目开发差不多.本人也是这样的看法.不过,经过仔细深 ...

  2. Apache Jackrabbit源码研究(五)

    上文最后提到jackrabbit的检索默认实现类QueryImpl,先熟悉一下该类的继承层次 QueryImpl继承自抽象类AbstractQueryImpl,而抽象类实现了Query接口(JCR的接 ...

  3. Nginx源码研究之nginx限流模块详解

    这篇文章主要介绍了Nginx源码研究之nginx限流模块详解,小编觉得挺不错的,现在分享给大家,也给大家做个参考.一起跟随小编过来看看吧 高并发系统有三把利器:缓存.降级和限流: 限流的目的是通过对并 ...

  4. 转载一篇《Redis源码研究—哈希表》重点是如何重新哈希

    <Redis源码研究-哈希表>来自:董的博客 网址:http://dongxicheng.org/nosql/redis-code-hashtable/ 转载于:https://www.c ...

  5. underscore.js源码研究(5)

    概述 很早就想研究underscore源码了,虽然underscore.js这个库有些过时了,但是我还是想学习一下库的架构,函数式编程以及常用方法的编写这些方面的内容,又恰好没什么其它要研究的了,所以 ...

  6. WebRTC源码研究(4)web服务器工作原理和常用协议基础

    文章目录 WebRTC源码研究(4)web服务器工作原理和常用协议基础 前言 做WebRTC 开发为啥要懂服务器开发知识 1. Web 服务器简介 2. Web 服务器的类型 3. Web 服务器的工 ...

  7. WebRTC源码研究(47)WebRCT传输非音视频数据

    文章目录 WebRTC源码研究(47)WebRCT传输非音视频数据 WebRTC源码研究(47)WebRCT传输非音视频数据

  8. Apache Camel源码研究之Rest

    本文以Camel2.24.3 + SpringBoot2.x 为基础简单解读Camel中的Rest组件的源码级实现逻辑. 0. 目录 1. 前言 2. 源码解读 2.1 启动时 2.1.1 `Rest ...

  9. WebRTC源码研究(7)创建简单的HTTPS服务

    文章目录 WebRTC源码研究(7)创建简单的HTTPS服务 1. HTTPS简介 2. HTTPS 协议 3. HTTPS 证书 4. 创建简单的HTTPS服务 4.1 生成HTTPS证书 4.2 ...

最新文章

  1. [webapi] 如何在查看api时 显示api的说明
  2. 鸿蒙系统支持最低处理器,这四款华为手机可升级到鸿蒙系统,老机型居多,最低只需千元!...
  3. 在视图控制器之间传递数据
  4. 图解HTTP--笔记
  5. vnctf——babyvm
  6. Bug邮件发送附件失败 笔误附件名字多了一个空格
  7. 常用宏定义 - 系统相关
  8. sqlserver 分组合并列_哪个“三人组”是历史最强组合?数据显示最均衡组合令人意外...
  9. python 排列组合算法_排 列 组 合 公 式 及 排 列 组 合 算 法
  10. SQL查询表中的有那些索引
  11. ATM系统之问题描述与词汇表
  12. android安卓 通知notification
  13. 高通的快充协议_高通发布QC5.0快充技术最高100W+功率!手机厂商私有协议更好...
  14. sqlserver2017不允许保存更改
  15. 【数据结构实验一】线性表
  16. Java - HuTool 使用 PhoneUtil、ReUtil等工具类(三)
  17. Synonyms,一个开源的中文近义词工具包
  18. Vuepress 如何引入百度统计和谷歌统计
  19. CapsNet入门系列之二:胶囊如何工作
  20. 爬虫出现selenium.common.exceptions.NoSuchWindowException: Message: Browsing context has been discarded

热门文章

  1. linux下使用sed批量替换关键词(带斜杠处理+kaggle路径批量替换)
  2. genymotion无法安装apk的问题
  3. scala中的算术符重载代码详细解释
  4. ubuntu16.04下面安装mongodb
  5. subpress installed post-installation script returned error exit status 1
  6. 错误代码#1045 Access denied for user 'root'@'localhost' (using password:YES)
  7. C++ 线程池的思想
  8. 深度学习(四十五)——Stack GAN, GAN Ensemble, Pix2Pix, CycleGAN
  9. html5画板功能,JS实现canvas简单小画板功能
  10. php数组循环便利,浅析PHP中for与foreach两个循环结构遍历数组的区别