workload_functions.sh 是测试程序的入口,粘连了监控程序 monitor.py 和 主运行程序:

#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.set -uexport HIBENCH_PRINTFULLLOG=0
this="${BASH_SOURCE-$0}"
workload_func_bin=$(cd -P -- "$(dirname -- "$this")" && pwd -P)
. ${workload_func_bin}/assert.sh
. ${workload_func_bin}/color.shHIBENCH_CONF_FOLDER=${HIBENCH_CONF_FOLDER:-${workload_func_bin}/../../conf}function enter_bench(){        # declare the entrance of a workloadassert $1 "Workload name not specified."assert $2 "Workload config file not specified."assert $3 "Current workload folder not specified."export HIBENCH_CUR_WORKLOAD_NAME=$1workload_config_file=$2workload_folder=$3shift 3patching_args=$@echo "patching args=$patching_args"local CONF_FILE=`${workload_func_bin}/load_config.py ${HIBENCH_CONF_FOLDER} $workload_config_file $workload_folder $patching_args`. $CONF_FILE
}function leave_bench(){        # declare the workload is finishedassert $HIBENCH_CUR_WORKLOAD_NAME "BUG, HIBENCH_CUR_WORKLOAD_NAME unset."unset HIBENCH_CUR_WORKLOAD_NAME
}function show_bannar(){        # print bannarassert $HIBENCH_CUR_WORKLOAD_NAME "HIBENCH_CUR_WORKLOAD_NAME not specified."assert $1 "Unknown banner operation"echo -e "${BGreen}$1 ${Color_Off}${UGreen}$HIBENCH_CUR_WORKLOAD_NAME${Color_Off} ${BGreen}bench${Color_Off}"
}function timestamp(){        # get current timestampsec=`date +%s`nanosec=`date +%N`re='^[0-9]+$'if ! [[ $nanosec =~ $re ]] ; then$nanosec=0fitmp=`expr $sec \* 1000 `msec=`expr $nanosec / 1000000 `echo `expr $tmp + $msec`
}function start_monitor(){MONITOR_PID=`${workload_func_bin}/monitor.py ${HIBENCH_CUR_WORKLOAD_NAME} $$ ${WORKLOAD_RESULT_FOLDER}/monitor.log ${WORKLOAD_RESULT_FOLDER}/bench.log ${WORKLOAD_RESULT_FOLDER}/monitor.html ${SLAVES} &`
#    echo "start monitor, got child pid:${MONITOR_PID}" > /dev/stderrecho ${MONITOR_PID}
}function stop_monitor(){MONITOR_PID=$1assert $1 "monitor pid missing"
#    echo "stop monitor, kill ${MONITOR_PID}" > /dev/stderrkill ${MONITOR_PID}
}function get_field_name() {    # print report column headerprintf "${REPORT_COLUMN_FORMATS}" Type Date Time Input_data_size "Duration(s)" "Throughput(bytes/s)" Throughput/node
}function gen_report() {        # dump the result to report fileassert ${HIBENCH_CUR_WORKLOAD_NAME} "HIBENCH_CUR_WORKLOAD_NAME not specified."local start=$1local end=$2local size=$3which bc > /dev/null 2>&1if [ $? -eq 1 ]; thenassert 0 "\"bc\" utility missing. Please install it to generate proper report."return 1filocal duration=$(echo "scale=3;($end-$start)/1000"|bc)local tput=`echo "$size/$duration"|bc`
#    local nodes=`cat ${SPARK_HOME}/conf/slaves 2>/dev/null | grep -v '^\s*$' | sed "/^#/ d" | wc -l`local nodes=`echo ${SLAVES} | wc -w`nodes=${nodes:-1}if [ $nodes -eq 0 ]; then nodes=1; filocal tput_node=`echo "$tput/$nodes"|bc`REPORT_TITLE=`get_field_name`if [ ! -f ${HIBENCH_REPORT}/${HIBENCH_REPORT_NAME} ] ; thenecho "${REPORT_TITLE}" > ${HIBENCH_REPORT}/${HIBENCH_REPORT_NAME}fiREPORT_LINE=$(printf "${REPORT_COLUMN_FORMATS}" ${HIBENCH_CUR_WORKLOAD_NAME} $(date +%F) $(date +%T) $size $duration $tput $tput_node)echo "${REPORT_LINE}" >> ${HIBENCH_REPORT}/${HIBENCH_REPORT_NAME}echo "# ${REPORT_TITLE}" >> ${HIBENCH_WORKLOAD_CONF}echo "# ${REPORT_LINE}" >> ${HIBENCH_WORKLOAD_CONF}
}function rmr_hdfs(){        # rm -r for hdfsassert $1 "dir parameter missing"RMDIR_CMD="fs -rm -r -skipTrash"local CMD="$HADOOP_EXECUTABLE --config $HADOOP_CONF_DIR $RMDIR_CMD $1"echo -e "${BCyan}hdfs rm -r: ${Cyan}${CMD}${Color_Off}" 1>&2execute_withlog ${CMD}
}function upload_to_hdfs(){assert $1 "local parameter missing"assert $2 "remote parameter missing"LOCAL_FILE_PATH=$1REMOTE_FILE_PATH=$2echo "REMOTE_FILE_PATH:$REMOTE_FILE_PATH" 1>&2if [[ `echo $REMOTE_FILE_PATH | tr A-Z a-z` = hdfs://* ]]; then # strip leading "HDFS://xxx:xxx/" stringecho "HDFS_MASTER:$HDFS_MASTER" 1>&2local LEADING_HDFS_STRING_LENGTH=${#HDFS_MASTER}REMOTE_FILE_PATH=${REMOTE_FILE_PATH:$LEADING_HDFS_STRING_LENGTH}echo "stripped REMOTE_FILE_PATH:$REMOTE_FILE_PATH" 1>&2fi# clear previous package filelocal CMD="$HADOOP_EXECUTABLE --config $HADOOP_CONF_DIR fs -rm $REMOTE_FILE_PATH"echo -e "${BCyan}hdfs rm : ${Cyan}${CMD}${Color_Off}" 1>&2execute_withlog ${CMD}# prepare parent folderCMD="$HADOOP_EXECUTABLE --config $HADOOP_CONF_DIR fs -mkdir `dirname $REMOTE_FILE_PATH`"echo -e "${BCyan}hdfs mkdir : ${Cyan}${CMD}${Color_Off}" 1>&2execute_withlog ${CMD}# uploadCMD="$HADOOP_EXECUTABLE --config $HADOOP_CONF_DIR fs -put $LOCAL_FILE_PATH $REMOTE_FILE_PATH"echo -e "${BCyan}hdfs put : ${Cyan}${CMD}${Color_Off}" 1>&2execute_withlog ${CMD}
}function dus_hdfs(){                # du -s for hdfsassert $1 "dir parameter missing"DUS_CMD="fs -du -s"local CMD="$HADOOP_EXECUTABLE --config $HADOOP_CONF_DIR $DUS_CMD $1"echo -e "${BPurple}hdfs du -s: ${Purple}${CMD}${Color_Off}" 1>&2execute_withlog ${CMD}
}function check_dir() {                # ensure dir is createdlocal dir=$1assert $1 "dir parameter missing"if [ -z "$dir" ];thenecho -e "${BYellow}WARN${Color_Off}: payload missing."return 1fiif [ ! -d "$dir" ];thenecho -e "${BRed}ERROR${Color_Off}: directory $dir does not exist."exit 1fitouch "$dir"/touchtestif [ $? -ne 0 ]; thenecho -e "${BRed}ERROR${Color_Off}: directory unwritable."exit 1elserm "$dir"/touchtestfi
}function dir_size() {                for item in $(dus_hdfs $1); doif [[ $item =~ ^[0-9]+$ ]]; thenecho $itemfidone
}function run_spark_job() {LIB_JARS=while (($#)); doif [ "$1" = "--jars" ]; thenLIB_JARS="--jars $2"shift 2continuefibreakdoneCLS=$1shiftexport_withlog SPARKBENCH_PROPERTIES_FILESYARN_OPTS=""if [[ "$SPARK_MASTER" == yarn-* ]]; thenexport_withlog HADOOP_CONF_DIRYARN_OPTS="--num-executors ${YARN_NUM_EXECUTORS}"if [[ -n "${YARN_EXECUTOR_CORES:-}" ]]; thenYARN_OPTS="${YARN_OPTS} --executor-cores ${YARN_EXECUTOR_CORES}"fiif [[ -n "${SPARK_YARN_EXECUTOR_MEMORY:-}" ]]; thenYARN_OPTS="${YARN_OPTS} --executor-memory ${SPARK_YARN_EXECUTOR_MEMORY}"fiif [[ -n "${SPAKR_YARN_DRIVER_MEMORY:-}" ]]; thenYARN_OPTS="${YARN_OPTS} --driver-memory ${SPARK_YARN_DRIVER_MEMORY}"fifiif [[ "$CLS" == *.py ]]; then LIB_JARS="$LIB_JARS --jars ${SPARKBENCH_JAR}"SUBMIT_CMD="${SPARK_HOME}/bin/spark-submit ${LIB_JARS} --properties-file ${SPARK_PROP_CONF} --master ${SPARK_MASTER} ${YARN_OPTS} ${CLS} $@"elseSUBMIT_CMD="${SPARK_HOME}/bin/spark-submit ${LIB_JARS} --properties-file ${SPARK_PROP_CONF} --class ${CLS} --master ${SPARK_MASTER} ${YARN_OPTS} ${SPARKBENCH_JAR} $@"fiecho -e "${BGreen}Submit Spark job: ${Green}${SUBMIT_CMD}${Color_Off}"MONITOR_PID=`start_monitor`execute_withlog ${SUBMIT_CMD}result=$?stop_monitor ${MONITOR_PID}if [ $result -ne 0 ]thenecho -e "${BRed}ERROR${Color_Off}: Spark job ${BYellow}${CLS}${Color_Off} failed to run successfully."echo -e "${BBlue}Hint${Color_Off}: You can goto ${BYellow}${WORKLOAD_RESULT_FOLDER}/bench.log${Color_Off} to check for detailed log.\nOpening log tail for you:\n"tail ${WORKLOAD_RESULT_FOLDER}/bench.logexit $resultfi
}function run_storm_job(){CMD="${STORM_HOME}/bin/storm jar ${STREAMBENCH_STORM_JAR} $@"echo -e "${BGreen}Submit Storm Job: ${Green}$CMD${Color_Off}"execute_withlog $CMD
}function run_gearpump_app(){CMD="${GEARPUMP_HOME}/bin/gear app -executors ${STREAMBENCH_GEARPUMP_EXECUTORS} -jar ${STREAMBENCH_GEARPUMP_JAR} $@"echo -e "${BGreen}Submit Gearpump Application: ${Green}$CMD${Color_Off}"execute_withlog $CMD
}function run_flink_job(){CMD="${FLINK_HOME}/bin/flink run -p ${STREAMBENCH_FLINK_PARALLELISM} -m ${HIBENCH_FLINK_MASTER} $@ ${STREAMBENCH_FLINK_JAR} ${SPARKBENCH_PROPERTIES_FILES}"echo -e "${BGreen}Submit Flink Job: ${Green}$CMD${Color_Off}"execute_withlog $CMD
}function run_hadoop_job(){ENABLE_MONITOR=1if [ "$1" = "--without-monitor" ]; thenENABLE_MONITOR=0shift 1filocal job_jar=$1shiftlocal job_name=$1shiftlocal tail_arguments=$@local CMD="${HADOOP_EXECUTABLE} --config ${HADOOP_CONF_DIR} jar $job_jar $job_name $tail_arguments"echo -e "${BGreen}Submit MapReduce Job: ${Green}$CMD${Color_Off}"if [ ${ENABLE_MONITOR} = 1 ]; thenMONITOR_PID=`start_monitor`fiexecute_withlog ${CMD}result=$?if [ ${ENABLE_MONITOR} = 1 ]; thenstop_monitor ${MONITOR_PID}fiif [ $result -ne 0 ]; thenecho -e "${BRed}ERROR${Color_Off}: Hadoop job ${BYellow}${job_jar} ${job_name}${Color_Off} failed to run successfully."echo -e "${BBlue}Hint${Color_Off}: You can goto ${BYellow}${WORKLOAD_RESULT_FOLDER}/bench.log${Color_Off} to check for detailed log.\nOpening log tail for you:\n"tail ${WORKLOAD_RESULT_FOLDER}/bench.logexit $resultfi
}function ensure_hivebench_release(){if [ ! -e ${HIBENCH_HOME}"/hadoopbench/sql/target/"$HIVE_RELEASE".tar.gz" ]; thenassert 0 "Error: The hive bin file hasn't be downloaded by maven, please check!"exitficd ${HIBENCH_HOME}"/hadoopbench/sql/target"if [ ! -d $HIVE_HOME ]; thentar zxf $HIVE_RELEASE".tar.gz"fiexport_withlog HADOOP_EXECUTABLE
}function ensure_mahout_release (){if [ ! -e ${HIBENCH_HOME}"/hadoopbench/mahout/target/"$MAHOUT_RELEASE".tar.gz" ]; thenassert 0 "Error: The mahout bin file hasn't be downloaded by maven, please check!"exitficd ${HIBENCH_HOME}"/hadoopbench/mahout/target"if [ ! -d $MAHOUT_HOME ]; thentar zxf $MAHOUT_RELEASE".tar.gz"fiexport_withlog HADOOP_EXECUTABLEexport_withlog HADOOP_HOMEexport_withlog HADOOP_CONF_DIR
}function execute () {CMD="$@"echo -e "${BCyan}Executing: ${Cyan}${CMD}${Color_Off}"$CMD
}function printFullLog(){export HIBENCH_PRINTFULLLOG=1
}function execute_withlog () {CMD="$@"if [ -t 1 ] ; then          # Terminal, beautify the output.${workload_func_bin}/execute_with_log.py ${WORKLOAD_RESULT_FOLDER}/bench.log $CMDelse                        # pipe, do nothing.$CMDfi
}function export_withlog () {var_name=$1var_val=${!1}assert $1 "export without a variable name!"echo -e "${BCyan}Export env: ${Cyan}${var_name}${BCyan}=${Cyan}${var_val}${Color_Off}"export ${var_name}
}function command_exist ()
{result=$(which $1)if [ $? -eq 0 ] thenreturn 0elsereturn 1fi
}function ensure_nutchindexing_release () {if [ ! -e ${HIBENCH_HOME}"/hadoopbench/nutchindexing/target/apache-nutch-1.2-bin.tar.gz" ]; thenassert 0 "Error: The nutch bin file hasn't be downloaded by maven, please check!"exitfiNUTCH_ROOT=${WORKLOAD_RESULT_FOLDER}cp -a $NUTCH_DIR/nutch $NUTCH_ROOTcd ${HIBENCH_HOME}"/hadoopbench/nutchindexing/target"if [ ! -d $NUTCH_HOME ]; thentar zxf apache-nutch-1.2-bin.tar.gzfifind $NUTCH_HOME/lib ! -name "lucene-*" -type f -exec rm -rf {} \;rm -rf $NUTCH_ROOT/nutch_releasecp -a $NUTCH_HOME $NUTCH_ROOT/nutch_releaseNUTCH_HOME_WORKLOAD=$NUTCH_ROOT/nutch_releasecp $NUTCH_ROOT/nutch/conf/nutch-site.xml $NUTCH_HOME_WORKLOAD/confcp $NUTCH_ROOT/nutch/bin/nutch $NUTCH_HOME_WORKLOAD/bin# Patching jcl-over-slf4j version against cdh or hadoop2mkdir $NUTCH_HOME_WORKLOAD/tempunzip -q $NUTCH_HOME_WORKLOAD/nutch-1.2.job -d $NUTCH_HOME_WORKLOAD/temprm -f $NUTCH_HOME_WORKLOAD/temp/lib/jcl-over-slf4j-*.jarrm -f $NUTCH_HOME_WORKLOAD/temp/lib/slf4j-log4j*.jarcp ${NUTCH_DIR}/target/dependency/jcl-over-slf4j-*.jar $NUTCH_HOME_WORKLOAD/temp/librm -f $NUTCH_HOME_WORKLOAD/nutch-1.2.jobcd $NUTCH_HOME_WORKLOAD/tempzip -qr $NUTCH_HOME_WORKLOAD/nutch-1.2.job *rm -rf $NUTCH_HOME_WORKLOAD/tempecho $NUTCH_HOME_WORKLOAD
}function prepare_sql_aggregation () {assert $1 "SQL file path not exist"HIVEBENCH_SQL_FILE=$1find . -name "metastore_db" -exec rm -rf "{}" \; 2>/dev/nullcat <<EOF > ${HIVEBENCH_SQL_FILE}
USE DEFAULT;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
set ${MAP_CONFIG_NAME}=$NUM_MAPS;
set ${REDUCER_CONFIG_NAME}=$NUM_REDS;
set hive.stats.autogather=false;DROP TABLE IF EXISTS uservisits;
CREATE EXTERNAL TABLE uservisits (sourceIP STRING,destURL STRING,visitDate STRING,adRevenue DOUBLE,userAgent STRING,countryCode STRING,languageCode STRING,searchWord STRING,duration INT ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' STORED AS  SEQUENCEFILE LOCATION '$INPUT_HDFS/uservisits';
DROP TABLE IF EXISTS uservisits_aggre;
CREATE EXTERNAL TABLE uservisits_aggre ( sourceIP STRING, sumAdRevenue DOUBLE) STORED AS  SEQUENCEFILE LOCATION '$OUTPUT_HDFS/uservisits_aggre';
INSERT OVERWRITE TABLE uservisits_aggre SELECT sourceIP, SUM(adRevenue) FROM uservisits GROUP BY sourceIP;
EOF
}function prepare_sql_join () {assert $1 "SQL file path not exist"HIVEBENCH_SQL_FILE=$1find . -name "metastore_db" -exec rm -rf "{}" \; 2>/dev/nullcat <<EOF > ${HIVEBENCH_SQL_FILE}
USE DEFAULT;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
set ${MAP_CONFIG_NAME}=$NUM_MAPS;
set ${REDUCER_CONFIG_NAME}=$NUM_REDS;
set hive.stats.autogather=false;DROP TABLE IF EXISTS rankings;
CREATE EXTERNAL TABLE rankings (pageURL STRING, pageRank INT, avgDuration INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' STORED AS  SEQUENCEFILE LOCATION '$INPUT_HDFS/rankings';
DROP TABLE IF EXISTS uservisits_copy;
CREATE EXTERNAL TABLE uservisits_copy (sourceIP STRING,destURL STRING,visitDate STRING,adRevenue DOUBLE,userAgent STRING,countryCode STRING,languageCode STRING,searchWord STRING,duration INT ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' STORED AS  SEQUENCEFILE LOCATION '$INPUT_HDFS/uservisits';
DROP TABLE IF EXISTS rankings_uservisits_join;
CREATE EXTERNAL TABLE rankings_uservisits_join ( sourceIP STRING, avgPageRank DOUBLE, totalRevenue DOUBLE) STORED AS  SEQUENCEFILE LOCATION '$OUTPUT_HDFS/rankings_uservisits_join';
INSERT OVERWRITE TABLE rankings_uservisits_join SELECT sourceIP, avg(pageRank), sum(adRevenue) as totalRevenue FROM rankings R JOIN (SELECT sourceIP, destURL, adRevenue FROM uservisits_copy UV WHERE (datediff(UV.visitDate, '1999-01-01')>=0 AND datediff(UV.visitDate, '2000-01-01')<=0)) NUV ON (R.pageURL = NUV.destURL) group by sourceIP order by totalRevenue DESC;
EOF
}function prepare_sql_scan () {assert $1 "SQL file path not exist"HIVEBENCH_SQL_FILE=$1find . -name "metastore_db" -exec rm -rf "{}" \; 2>/dev/nullcat <<EOF > ${HIVEBENCH_SQL_FILE}
USE DEFAULT;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
set ${MAP_CONFIG_NAME}=$NUM_MAPS;
set ${REDUCER_CONFIG_NAME}=$NUM_REDS;
set hive.stats.autogather=false;DROP TABLE IF EXISTS uservisits;
CREATE EXTERNAL TABLE uservisits (sourceIP STRING,destURL STRING,visitDate STRING,adRevenue DOUBLE,userAgent STRING,countryCode STRING,languageCode STRING,searchWord STRING,duration INT ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' STORED AS  SEQUENCEFILE LOCATION '$INPUT_HDFS/uservisits';
DROP TABLE IF EXISTS uservisits_copy;
CREATE EXTERNAL TABLE uservisits_copy (sourceIP STRING,destURL STRING,visitDate STRING,adRevenue DOUBLE,userAgent STRING,countryCode STRING,languageCode STRING,searchWord STRING,duration INT ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' STORED AS  SEQUENCEFILE LOCATION '$OUTPUT_HDFS/uservisits_copy';
INSERT OVERWRITE TABLE uservisits_copy SELECT * FROM uservisits;
EOF}

转载于:https://www.cnblogs.com/ratels/p/11039773.html

HiBench成长笔记——(8) 分析源码workload_functions.sh相关推荐

  1. 第二章:小朱笔记hadoop之源码分析-脚本分析

    第二章:小朱笔记hadoop之源码分析-脚本分析 第一节:start-all.sh 第二节:hadoop-config.sh 第三节:hadoop-env.sh 第四节:start-dfs.sh 第五 ...

  2. 第七章:小朱笔记hadoop之源码分析-hdfs分析 第四节:namenode-LeaseManagerMonitor

    第七章:小朱笔记hadoop之源码分析-hdfs分析 第四节:namenode分析 4.4 namenode文件租约分析LeaseManagerMonitor 文件租约就是将操作的文件和操作它的客户端 ...

  3. 第七章:小朱笔记hadoop之源码分析-hdfs分析 第三节:hdfs实现分析

    第七章:小朱笔记hadoop之源码分析-hdfs分析 第三节:hdfs实现分析 3.3 namenode (1)FSDirectory FSDirectory用来管理HDFS整个文件系统的namesp ...

  4. 第七章:小朱笔记hadoop之源码分析-hdfs分析 第四节:namenode分析-namenode启动过程分析...

    第七章:小朱笔记hadoop之源码分析-hdfs分析 第四节:namenode分析 4.1 namenode启动过程分析 org.apache.hadoop.hdfs.server.namenode. ...

  5. 第七章:小朱笔记hadoop之源码分析-hdfs分析 Datanode 心跳分析

    第七章:小朱笔记hadoop之源码分析-hdfs分析 第五节:Datanode 分析 5.2 Datanode 心跳分析 (1)offerService分析 写道 (a)检查心跳间隔是否超时,如是向n ...

  6. 第七章:小朱笔记hadoop之源码分析-hdfs分析 第五节:Datanode 分析

    第七章:小朱笔记hadoop之源码分析-hdfs分析 第五节:Datanode 分析 5.1 Datanode 启动过程分析 5.2 Datanode 心跳分析 5.3 Datanode 注册分析 5 ...

  7. python源码编译为库_【Python笔记】如何源码编译依赖LAPACK和ATLAS库的NumPy包

    上篇笔记介绍了不依赖lapack和atlas库的NumPy包源码编译/安装方法,但"纯净版"的NumPy会损失性能,故本篇笔记说明如何源码编译安装依赖lapack和atlas库的N ...

  8. K8s基础知识学习笔记及部分源码剖析

    K8s基础知识学习笔记及部分源码剖析 在学习b站黑马k8s视频资料的基础上,查阅了配套基础知识笔记和源码剖析,仅作个人学习和回顾使用. 参考资料: 概念 | Kubernetes 四层.七层负载均衡的 ...

  9. java计算机毕业设计信息学院网站分析源码+数据库+系统+lw文档+部署

    java计算机毕业设计信息学院网站分析源码+数据库+系统+lw文档+部署 java计算机毕业设计信息学院网站分析源码+数据库+系统+lw文档+部署 本源码技术栈: 项目架构:B/S架构 开发语言:Ja ...

最新文章

  1. 把三千行代码重构为15行
  2. java 周易解梦接口_周公解梦
  3. 2022-02-03--银河麒麟-银河麒麟v4与.netcore安装
  4. 视频通信关键技术探索及实践
  5. 前端学习(1971)vue之电商管理系统电商系统之完成参数的添加操作
  6. GoEasy小程序即时通讯源码 v1.1.0基于GoEasy提供的websocket通讯服务
  7. Hive安装Version2.1.0
  8. matlab2012 powerlib,matlab没有powerlib2
  9. 多选框中的选中的值和未选中值的获取
  10. HDU1572 下沙小面的(2)【全排列】
  11. 6月29 Electron的第一课
  12. 查看最大的10个文件
  13. Tcl -- proc
  14. 编译内核报错——*** 没有规则可制作目标“debian/canonical-revoked-certs.pem”,由“certs/x509_revocation_list” 需求。 停止。
  15. PS填充颜色边缘模糊
  16. android 蓝牙自动连接,蓝牙自动连接实现
  17. Debezium 抽取oracle数据
  18. 互联网创业项目加盟,超低门槛,0经验也能开
  19. Java测试工程师技术面试题库【持续补充更新】
  20. 关于mac地址,请各位大侠帮忙解决

热门文章

  1. iOS 物流信息时间轴
  2. php文件大小单位单位转换
  3. SASS使用CSS3动画并使动画暂停和停止在最后一帧的简单例子
  4. 李宏毅机器学习2-回归
  5. 作为一个在大厂工作多年的“螺丝钉”。生锈了,等待随时被换掉。
  6. 蓝桥杯单片机之超声波
  7. 会员中心上传头像时的,上传图片并剪切
  8. 实体门店品牌如何用企业微信私域SCRM系统布局微信生态
  9. 单键触摸开关/双键触摸式照明灯/触摸式延时照明灯电路设计
  10. 511遇见易语言流程控制(循环判断首和判断循环首)