core-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><configuration><property><name>fs.defaultFS</name><value>hdfs://bigdata/</value>
</property><property><name>ha.zookeeper.quorum</name><value>192.168.56.70:2181,192.168.56.71:2181,192.168.56.72:2181</value> -->
</property><property><name>hadoop.tmp.dir</name><value>/export/data/hadoop/tmp</value>
</property><property><name>fs.trash.interval</name><value>1440</value>
</property><property> <name>io.file.buffer.size</name> <value>131072</value>
</property>
<!-- 压缩格式 -->
<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.BZip2Codec</value>
</property><property><name>net.topology.script.file.name</name><value>/export/common/hadoop/conf/topo.sh</value>
</property><property><name>net.topology.script.number.args</name><value>1</value>
</property><property><name>ha.health-monitor.rpc-timeout.ms</name><value>180000</value>
</property><property><name>hadoop.security.authorization</name><value>true</value>
</property><property><name>hadoop.security.authentication</name><value>kerberos</value>
</property><property><name>dfs.permissions.enabled</name><value>true</value>
</property><property><name>dfs.namenode.acls.enabled</name><value>true</value>
</property><property> <name>ipc.maximum.data.length</name> <value>268435456</value>
</property> <property>  <name>hadoop.proxyuser.httpfs.hosts</name>  <value>*</value>
</property>
<property>  <name>hadoop.proxyuser.httpfs.groups</name>  <value>*</value>
</property><property><name>hadoop.proxyuser.hdfs.hosts</name><value>*</value>
</property><property><name>hadoop.proxyuser.hdfs.groups</name><value>*</value>
</property><property><name>hadoop.proxyuser.hue.hosts</name><value>*</value>
</property><property><name>hadoop.proxyuser.hue.groups</name><value>*</value>
</property><property><name>hadoop.proxyuser.hive.hosts</name><value>*</value>
</property><property><name>hadoop.proxyuser.hive.groups</name><value>*</value>
</property><property><name>hadoop.proxyuser.spark.hosts</name><value>*</value>
</property><property><name>hadoop.proxyuser.spark.groups</name><value>*</value>
</property><property><name>hadoop.proxyuser.dwetl.hosts</name><value>*</value>
</property><property><name>hadoop.proxyuser.dwetl.groups</name><value>*</value>
</property><property><name>hadoop.proxyuser.hbase.hosts</name><value>*</value>
</property><property><name>hadoop.proxyuser.hbase.groups</name><value>*</value>
</property></configuration>

hadoop-env.sh

export JAVA_HOME=/export/java
export HADOOP_COMMON_LIB_NATIVE_DIR="${HADOOP_HOME}/lib/native"
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib/native/"for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; doif [ "$HADOOP_CLASSPATH" ]; thenexport HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$felseexport HADOOP_CLASSPATH=$ffi
done# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=2048
export HADOOP_HEAPSIZE=4096# Extra Java runtime options.  Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_NAMENODE_OPTS="-Xmx85g -Xms85g -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_DATANODE_OPTS="-server -XX:+UseConcMarkSweepGC -XX:SurvivorRatio=3 -XX:MaxTenuringThreshold=10 -XX:CMSInitiatingOccupancyFraction=80 -XX:+ExplicitGCInvokesConcurrent -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANOODE_OPTS"
#export HADOOP_DATANODE_OPTS="-Xmx8g -Xms8g ${HADOOP_DATANODE_OPTS}"
export HADOOP_DATANODE_OPTS="-Xmx16g -Xms16g ${HADOOP_DATANODE_OPTS}"export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}export HADOOP_PID_DIR=/export/hadoop/pids
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USERexport LD_LIBRARY_PATH=/export/hadoop/lib
export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/export/hadoop/lib
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/export/hadoop/lib/*TEZ_CONF_DIR=/export/common/hadoop/conf/tez-site.xml
TEZ_JARS=/export/tez
export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${TEZ_CONF_DIR}:${TEZ_JARS}/*:${TEZ_JARS}/lib/*
export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:/export/hadoop/lib/native/"

HDFS

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><configuration><property><name>dfs.nameservices</name><value>bigdata</value>
</property><property><name>dfs.ha.namenodes.bigdata</name><value>nn1,nn2</value>
</property><property><name>dfs.namenode.rpc-address.bigdata.nn1</name><value>192.168.56.71:8020</value>
</property>
<property><name>dfs.namenode.rpc-address.bigdata.nn2</name><value>192.168.56.72:8020</value>
</property>
<property><name>dfs.namenode.http-address.bigdata.nn1</name><value>192.168.56.71:50070</value>
</property>
<property><name>dfs.namenode.http-address.bigdata.nn2</name><value>192.168.56.72:50070</value>
</property>
<property><name>dfs.namenode.shared.edits.dir</name><value>qjournal://192.168.56.71:8485;192.168.56.72:8485;192.168.56.70:8485/bigdata</value>
</property>
<property><name>dfs.journalnode.edits.dir</name><value>/export/data/hadoop/journal</value>
</property>
<property><name>dfs.ha.automatic-failover.enabled</name><value>true</value>
</property><property><name>dfs.client.failover.proxy.provider.bigdata</name><value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property><property><name>dfs.namenode.name.dir</name><value>/export/data/hadoop/namenode</value>
</property><property><name>dfs.datanode.data.dir</name> <value>/export/grid/01/hadoop/hdfs/data,/export/grid/02/hadoop/hdfs/data</value>
</property>
<property><name>dfs.datanode.address</name><value>0.0.0.0:50010</value>
</property><property><name>dfs.datanode.http.address</name><value>0.0.0.0:50075</value>
</property><property><name>dfs.datanode.ipc.address</name><value>0.0.0.0:50020</value>
</property><property> <name>dfs.replication</name> <value>3</value>
</property> <property> <name>dfs.permissions</name> <value>true</value>
</property>  <property><name>dfs.webhdfs.enabled</name><value>true</value>
</property><property><name>dfs.blocksize</name><value>134217728</value>
</property><property>  <name>dfs.datanode.balance.bandwidthPerSec</name>  <value>41943040</value>
</property><property><name>dfs.datanode.max.transfer.threads</name><value>8192</value>
</property>
<!-- namenode工作线程池,20*log e为底  集群数量n的倍数 -->
<!-- python计算,import math ; print int(20*math.log(n)) -->
<property><name>dfs.namenode.handler.count</name><value>200</value>
</property>
<!-- datanode工作线程池 -->
<property><name>dfs.datanode.handler.count</name><value>100</value>
</property><property><name>dfs.datanode.max.xcievers</name><value>65535</value>
</property><property><name>dfs.namenode.name.dir.restore</name> <value>false</value>
</property><property><name>dfs.namenode.checkpoint.period</name> <value>6000</value>
</property><property><name>dfs.hosts</name><value>/export/common/hadoop/conf/allowed_hosts</value>
</property><property><name>dfs.hosts.exclude</name><value>/export/common/hadoop/conf/exclude_datanode_hosts</value>
</property><property><name>dfs.webhdfs.enabled</name><value>true</value>
</property><property><name>dfs.qjournal.write-txns.timeout.ms</name><value>60000</value>
</property><property><name>dfs.permissions.enabled</name><value>true</value>
</property><property><name>dfs.namenode.acls.enabled</name><value>true</value>
</property><property><name>dfs.ha.fencing.methods</name><value>shell(/bin/true)</value>
</property><property><name>dfs.client.block.write.replace-datanode-on-failure.enable</name><value>true</value>
</property><property><name>dfs.client.block.write.replace-datanode-on-failure.policy</name><value>DEFAULT</value>
</property><property><name>dfs.block.access.token.enable</name><value>true</value>
</property><property><name>dfs.namenode.keytab.file</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property><property><name>dfs.namenode.kerberos.principal</name><value>hdfs/_HOST@BIGDATA.COM</value>
</property><property><name>dfs.namenode.kerberos.internal.spnego.principal</name><value>HTTP/_HOST@BIGDATA.COM</value>
</property><property><name>dfs.datanode.data.dir.perm</name><value>700</value>
</property><property><name>dfs.datanode.address</name><value>0.0.0.0:2828</value>
</property><property><name>dfs.datanode.http.address</name><value>0.0.0.0:2829</value>
</property><property><name>dfs.datanode.keytab.file</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property><property><name>dfs.datanode.kerberos.principal</name><value>hdfs/_HOST@BIGDATA.COM</value>
</property><!--journalnode hdfs HA -->
<property><name>dfs.journalnode.keytab.file</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property><property><name>dfs.journalnode.kerberos.principal</name><value>hdfs/_HOST@BIGDATA.COM</value>
</property><property><name>dfs.journalnode.kerberos.internal.spnego.principal</name><value>HTTP/_HOST@BIGDATA.COM</value>
</property><!-- WebHdfs secure -->
<property><name>dfs.web.authentication.kerberos.principal</name><value>HTTP/_HOST@BIGDATA.COM</value>
</property><property><name>dfs.web.authentication.kerberos.keytab</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property><property><name>dfs.http.policy</name><value>HTTPS_ONLY</value>
</property><property><name>dfs.data.transfer.protection</name><value>integrity</value>
</property><property><name>dfs.encrypt.data.thransfer</name><value>true</value>
</property><property><name>mapreduce.jobtracker.handler.count</name><value>40</value>
</property><!-- 开启HDFS权限 --><property><name>dfs.permissions.enabled</name><value>true</value></property><property><name>jdjr.hadoop.path.permission.enable</name><value>true</value></property><property><name>dfs.namenode.inode.attributes.provider.class</name><value>com.jdjr.flowyed.hadoop.permission.JdjrHdfsAuthorizer</value></property><property><name>jdjr.hadoop.path.permission.file.path</name><value>/export/common/hadoop/conf/hdfs-policies.json</value></property><property><name>jdjr.hadoop.cluster.name</name><value>agent-hadoop-dev</value></property>
<!-- 开启HDFS权限 结束 --></configuration>

allowed_hosts

(datanode节点IP)

192.168.56.70
192.168.56.71
192.168.56.72

YARN

yarn-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><configuration><property><name>yarn.resourcemanager.ha.enabled</name><value>true</value>
</property>
<property><name>yarn.resourcemanager.cluster-id</name><value>bigdata</value>
</property>
<property><name>yarn.resourcemanager.ha.rm-ids</name><value>rm1,rm2</value>
</property>
<property><name>yarn.resourcemanager.hostname.rm1</name><value>192.168.56.71</value>
</property><property><name>yarn.resourcemanager.hostname.rm2</name><value>192.168.56.72</value>
</property>
<property><name>yarn.resourcemanager.webapp.address.rm1</name><value>192.168.56.71:8088</value>
</property>
<property><name>yarn.resourcemanager.webapp.address.rm2</name><value>192.168.56.72:8088</value>
</property>
<property><name>yarn.resourcemanager.zk-address</name><value>192.168.56.71:2181,192.168.56.72:2181,192.168.56.70:2181</value>
</property>
<property><name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name><value>/hadoop-yarn-ha</value>
</property>
<property><name>yarn.resourcemanager.ha.automatic-failover.enabled</name><value>true</value>
</property>
<property><name>yarn.resourcemanager.address.rm1</name><value>192.168.56.71:8132</value>
</property>
<property><name>yarn.resourcemanager.address.rm2</name><value>192.168.56.72:8132</value>
</property>
<property><name>yarn.resourcemanager.scheduler.address.rm1</name><value>192.168.56.71:8130</value>
</property>
<property><name>yarn.resourcemanager.scheduler.address.rm2</name><value>192.168.56.72:8130</value>
</property>
<property><name>yarn.resourcemanager.resource-tracker.address.rm1</name><value>192.168.56.71:8131</value>
</property>
<property><name>yarn.resourcemanager.resource-tracker.address.rm2</name><value>192.168.56.72:8131</value>
</property>
<property><name>yarn.resourcemanager.webapp.address.rm1</name><value>192.168.56.71:8088</value>
</property>
<property><name>yarn.resourcemanager.webapp.address.rm2</name><value>192.168.56.72:8088</value>
</property>
<property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value>
</property> <property><name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name><value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- nodemanager内存,生产环境需调大 -->
<property><name>yarn.nodemanager.resource.memory-mb</name><value>78848</value>
</property><property><name>yarn.nodemanager.vmem-pmem-ratio</name><value>10</value>
</property><property><name>yarn.scheduler.minimum-allocation-mb</name><value>1024</value>
</property><property><name>yarn.scheduler.maximum-allocation-mb</name><value>78848</value>
</property><property><name>yarn.app.mapreduce.am.resource.mb</name><value>4096</value>
</property><property><name>yarn.app.mapreduce.am.command-opts</name><value>-Xmx3584M</value>
</property>
<!-- nodemanagerCPU核数,生产环境需调大 -->
<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>76</value>
</property><property><name>yarn.nodemanager.log-dirs</name><value>/export/grid/01/hadoop/yarn/log,/export/grid/02/hadoop/yarn/log,/export/grid/03/hadoop/yarn/log,/export/grid/04/hadoop/yarn/log,/export/grid/05/hadoop/yarn/log,/export/grid/06/hadoop/yarn/log,/export/grid/07/hadoop/yarn/log,/export/grid/08/hadoop/yarn/log,/export/grid/09/hadoop/yarn/log,/export/grid/10/hadoop/yarn/log,/export/grid/11/hadoop/yarn/log,/export/grid/12/hadoop/yarn/log</value>
</property><property><name>yarn.acl.enable</name><value>false</value>
</property>
<property><name>yarn.admin.acl</name><value>*</value>
</property><property><name>yarn.nodemanager.local-dirs</name><value>/export/grid/01/hadoop/yarn/local,/export/grid/02/hadoop/yarn/local</value>
</property>
<property><name>yarn.log.server.url</name><value>http://192.168.56.70:19888/jobhistory/logs</value>
</property>
<property><name>yarn.log-aggregation-enable</name><value>true</value>
</property>
<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>95</value>
</property>
<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
</property>
<property><name>yarn.scheduler.fair.preemption</name><value>true</value>
</property>
<property><name>yarn.nodemanager.address</name><value>${yarn.nodemanager.hostname}:65033</value>
</property><property><name>yarn.resourcemanager.nodes.exclude-path</name><value>/export/common/hadoop/conf/exclude_nodemanager_hosts</value>
</property><property><name>yarn.nodemanager.vmem-check-enabled</name><value>false</value>
</property><property><name>yarn.resourcemanager.keytab</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property><property><name>yarn.resourcemanager.principal</name><value>hdfs/_HOST@BIGDATA.COM</value>
</property><!-- NodeManager security configs -->
<property><name>yarn.nodemanager.keytab</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>
<property><name>yarn.nodemanager.principal</name><value>hdfs/_HOST@BIGDATA.COM</value>
</property><property><name>yarn.nodemanager.linux-container-executor.group</name><value>hadoop</value>
</property><property><name>yarn.nodemanager.container-executor.class</name><value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
</property><property><name>yarn.nodemanager.aux-services</name><value>spark_shuffle,mapreduce_shuffle</value>
</property><property><name>yarn.nodemanager.aux-services.spark_shuffle.class</name><value>org.apache.spark.network.yarn.YarnShuffleService</value>
</property><property><name>yarn.scheduler.fair.user-as-default-queue</name><value>false</value>
</property><!-- 开启yarn重启任务保护 -->
<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value>
</property>
<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property><name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name><value>10000</value>
</property></configuration>

fail-scheduler.xml

<?xml version="1.0" encoding="utf-8"?>
<allocations><userMaxAppsDefault>100</userMaxAppsDefault><fairSharePreemptionTimeout>3000</fairSharePreemptionTimeout><queue name="root"><aclSubmitApps>hdfs</aclSubmitApps><aclAdministerApps>hdfs</aclAdministerApps><queue name="default"><!--<maxResources>最多可以使用的资源量</maxResources>--><maxResources>249311 mb, 147 vcores</maxResources><!--<minResources>最少资源保证量,设置格式为“X mb, Y vcores”,当一个队列的最少资源保证量未满足时,它将优先于其他同级队列获得资源,对于不同的调度策略(后面会详细介绍),最少资源保证量的含义不同,对于fair策略,则只考虑内存资源,即如果一个队列使用的内存资源超过了它的最少资源量,则认为它已得到了满足;对于drf策略,则考虑主资源使用的资源量,即如果一个队列的主资源量超过它的最少资源量,则认为它已得到了满足。</minResources>--><minResources>1024 mb, 1 vcores</minResources><!--<maxRunningApps>最多同时运行的应用程序数目。通过限制该数目,可防止超量Map Task同时运行时产生的中间输出结果撑爆磁盘。</maxRunningApps>--><maxRunningApps>1000</maxRunningApps><!--<weight>1.0</weight>--><weight>1.0</weight><!--<aclSubmitApps>可向队列中提交应用程序的Linux用户或用户组列表,默认情况下为*,表示都可提交</aclSubmitApps>--><aclSubmitApps>hbase,hive</aclSubmitApps><!--<aclAdministerApps>该队列的管理员列表。一个队列的管理员可管理该队列中的资源和应用程序,比如可杀死任意应用程序。</aclAdministerApps>--><aclAdministerApps>hbase,hive</aclAdministerApps></queue><queue name="etl"><maxResources>598346 mb, 352 vcores</maxResources><minResources>1024 mb, 1 vcores</minResources><maxRunningApps>1000</maxRunningApps><weight>1.0</weight><aclSubmitApps>*</aclSubmitApps><aclAdministerApps>hdfs</aclAdministerApps></queue><queue name="personal"><maxResources>149586 mb, 88 vcores</maxResources><minResources>1024 mb, 1 vcores</minResources><maxRunningApps>50</maxRunningApps><weight>1.0</weight><aclSubmitApps>*</aclSubmitApps><aclAdministerApps>hdfs</aclAdministerApps></queue></queue>
</allocations>

mapred-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><configuration><property><name>mapreduce.framework.name</name> <value>yarn</value>
</property> <property><name>mapreduce.jobhistory.address</name> <value>192.168.56.70:10020</value>
</property><property><name>mapreduce.jobhistory.webapp.address</name> <value>192.168.56.70:19888</value>
</property><property><name>mapreduce.map.memory.mb</name><value>2048</value>
</property><property><name>mapreduce.reduce.memory.mb</name><value>4096</value>
</property><property><name>mapred.child.java.opts</name><value>-Xmx4096M</value>
</property>
<property><name>mapreduce.map.java.opts</name><value>-Xmx1536M</value>
</property>
<property><name>mapreduce.reduce.java.opts</name><value>-Xmx3276M</value>
</property><property><name>mapreduce.map.output.compress</name><value>true</value>
</property><property><name>mapreduce.task.io.sort.mb</name><value>200</value>
</property><property><name>mapreduce.task.io.sort.factor</name><value>50</value>
</property><property><name>mapreduce.reduce.shuffle.parallelcopies</name><value>50</value>
</property><property><name>mapreduce.job.reduce.slowstart.completedmaps</name><value>0.3</value>
</property><property><name>mapred.job.reuse.jvm.num.tasks</name><value>5</value>
</property><property><name>mapreduce.job.counters.group.name.max</name><value>100000</value>
</property><property><name>mapreduce.job.counters.counter.name.max</name><value>100000</value>
</property><property><name>mapreduce.job.counters.groups.max</name><value>100000</value>
</property><property><name>mapreduce.job.counters.max</name><value>100000</value>
</property><property><name>mapreduce.jobhistory.keytab</name><value>/export/common/hadoop/conf/hdfs.keytab</value>
</property><property><name>mapreduce.jobhistory.principal</name><value>hdfs/_HOST@{KDC_REALM}</value>
</property><property><name>yarn.app.mapreduce.am.env</name><value>HADOOP_MAPRED_HOME=/export/hadoop</value>
</property><property><name>mapreduce.map.env</name><value>HADOOP_MAPRED_HOME=/export/hadoop</value>
</property><property><name>mapreduce.reduce.env</name><value>HADOOP_MAPRED_HOME=/export/hadoop</value>
</property><property><name>mapreduce.tasktracker.map.tasks.maximum</name><value>34</value>
</property><property><name>mapreduce.tasktracker.reduce.tasks.maximum</name><value>18</value>
</property></configuration>

log4j.properties

hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.loglog4j.rootLogger=${hadoop.root.logger}, EventCounterlog4j.threshold=ALLlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppenderhadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}log4j.appender.RFA.layout=org.apache.log4j.PatternLayoutlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}log4j.appender.DRFA.DatePattern=.yyyy-MM-ddlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayoutlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%nlog4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%nhadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%nhadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-ddhdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERRORlog4j.logger.com.amazonaws=ERROR
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARNlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounterhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=falseyarn.server.resourcemanager.appsummary.log.file=rm-appsummary.logyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
log4j.appender.RMSUMMARY.MaxFileSize=256MB
log4j.appender.RMSUMMARY.MaxBackupIndex=20
log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

Hadoop2.7配置相关推荐

  1. hadoop1 hadoop2 fair-schduler 配置和使用

    hadoop1 配置 mapred-site.xml,增加如下内容 <property><name>mapred.jobtracker.taskScheduler</na ...

  2. Hadoop2.x配置-Ubuntu18.04-Vm虚拟机

    Hadoop杂烩 启动进程 虚拟机准备 1. 安装VM Tool 2. 换源 3. 安装相应软件 4. 安装Jdk和Hadoop 5. grep案例 6. wordcount案例 伪分布式案例 1. ...

  3. hadoop2.8配置_Hadoop2.8.2安装与配置(单机)

    解压并移动到/software目录: tar -zxvf hadoop-2.8.2.tar.gz mv hadoop-2.8.2 /software/hadoop 在/etc/profile文件添加: ...

  4. hadoop2.8配置_Hadoop 2.8集群安装及配置记录

    第一部分:环境配置(含操作系统.防火墙.SSH.JAVA安装等) Hadoop 2.8集群安装模拟环境为: 主机:Hostname:Hadoop-host,IP:10.10.11.225 节点1:Ho ...

  5. 高效5步走,高速搭建Hadoop2伪分布环境

    前两天将Hadoop2的全然分布式搭建文档整理公布于网上(http://blog.csdn.net/aaronhadoop/article/details/24859369).朋友相邀.就再将Hado ...

  6. Strom集群安裝,Python安裝,Strom配置,Strom常用命令

    #一.集群安装说明 1.storm安装依赖Python,所以在安装前请确保Python已经安装成功了 2.在安装前要保证hadoop1,hadoop2,hadoop3之间能够互相两两之间ssh免登陆. ...

  7. Hadoop伪分布配置与基于Eclipse开发环境搭建

    国内私募机构九鼎控股打造APP,来就送 20元现金领取地址:http://jdb.jiudingcapital.com/phone.html 内部邀请码:C8E245J (不写邀请码,没有现金送) 国 ...

  8. docker 安装oracle_阿里云使用Docker搭建Hadoop集群

    摘要 吐血整理,搭建了两遍,亲测可用!!! 我买的是阿里云2C4G的服务器,使用的是CentOS 7.7版本.在搭建过程中踩了不少坑,本篇文章希望对大家有用 CentOS 7.7安装Docker 查看 ...

  9. nagios整合ganglia实现hadoop、Hbase监控及手机短信报警

    预计该博文篇幅较长,这里不再废话,对ganglia不太了解的去问谷老师,直接看环境: hadoop1.updb.com    192.168.0.101 hadoop2.updb.com    192 ...

最新文章

  1. Ubuntu 14.04 64位上安装Adobe reader 9.5.5
  2. 解决Mysql错误Too many connections的方法
  3. ASP.NET中常用到的27个函数集
  4. swift瀑布流实现_CSS 实现瀑布流布局(display: flex)
  5. java线程礼让yield
  6. php网页标点前置,为什么在网页里,标点符号跑到行首了呢?_html/css_WEB-ITnose
  7. 进击的UI------------------UISegmentedControlUISlide
  8. hdu4714 Tree2cycle 把树剪成链
  9. AWR报告中Top 10 Foreground Events存在”reliable message”等待事件的处理办法
  10. “骗子”成民企院士第一人:把认真当信仰,人生就会开挂
  11. php取整数余数,js取整数、取余数的方法
  12. php 虚类,减伤、虚弱类技能汇总 - 游戏服务区 - 《洛克王国》官方论坛 - Powered by Discuz!...
  13. RSA的JavaScript程序
  14. 借教室(codevs 1217)
  15. Camtasia卡点相册视频教程
  16. Vue学习之从入门到神经(两万字收藏篇)
  17. 灰色预测模型 matlab人口预测模型代码如下
  18. 【每周论文】Graphene: Packing and Dependency-aware Scheduling for Data-Parallel Clusters(OSDI 2016)
  19. CentOS7系统安装参考
  20. Day10安卓 专高 day10 ContentProvider内容提供者

热门文章

  1. 一个公告栏的源码(利用marquee作的)
  2. Mysql时间加减函数应用
  3. 计算机考研复试真题 密码翻译
  4. 用友固定资产对账提示不平衡
  5. 怎样才能延长计算机主板寿命,电子设备的使用时长一般是几年,怎样延长使用寿命...
  6. SPSS20.O---软件安装
  7. 华为eNSP实验:NAT地址转换加端口地址映射
  8. VUE学习-基础(一)
  9. 前端 JavaScript 焦點事件處理
  10. 计算机提示网络不可用,网络连接不可用怎么办 电脑网络连接不可用解决办法...