0.安装docker

yum install docker -ysystemctl enable docker
systemctl start docker

1.创建部署目录

mkdir -p /usr/local/module/cd /usr/local/module/

2.配置环境

vi /etc/profile
#在最后添加
##########################################
#java
export JAVA_HOME=/usr/local/module/java
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib#scala
export SCALA_HOME=/usr/local/module/scala
export PATH=$SCALA_HOME/bin:$PATH#hadoop
export HADOOP_HOME=/usr/local/module/hadoop
export PATH=$HADOOP_HOME/bin:$PATH
export HADOOP_CONF_DIR=/usr/local/module/hadoop/etc/hadoop#hive
export HIVE_HOME=/usr/local/module/hive
export PATH=$HIVE_HOME/bin:$PATH
export HIVE_CONF_DIR=/usr/local/module/hive/conf#spark
export SPARK_HOME=/usr/local/module/spark
export PATH=$SPARK_HOME/bin:$PATH
export SPARK_CONF_DIR=/usr/local/module/spark/conf# Pyspark必须加的参数
export PYSPARK_ALLOW_INSECURE_GATEWAY=1
###########################################立即生效
source /etc/profile

3.部署java环境

tar -zxvf jdk-8u181-linux-x64.tar.gzln -s jdk1.8.0_181 java

4.部署scala环境

tar -zxvf scala-2.12.1.tgzln -s scala-2.12.1 scala

5.部署hadoop

tar -zxvf hadoop-2.7.2.tar.gzln -s hadoop-2.7.2 hadoopcd hadoop
vi etc/hadoop/core-site.xml#编辑core-site.xml
##############################################
<configuration>
<property><name>fs.defaultFS</name><value>hdfs://localhost.localdomain:9000</value><description>hdfs内部通讯访问地址</description></property><property><name>hadoop.tmp.dir</name><value>/usr/local/module/hadoopdata/</value><description>hadoop数据存放</description></property>
</configuration>
###############################################创建hadoop数据目录
mkdir -p /usr/local/module/hadoopdata/vi etc/hadoop/hdfs-site.xml#编辑hdfs—site.xml
##############################################
<configuration>
<property><name>dfs.replication</name><value>1</value>
</property>
<property><name>dfs.permissions.enabled</name><value>false</value>
</property>
</configuration>
##############################################vi etc/hadoop/yarn-site.xml#编辑yarn-site.xml
##############################################
<configuration><!-- Site specific YARN configuration properties --><property><name>yarn.resourcemanager.hostname</name><value>localhost</value>
</property><property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value>
</property>
<property><name>yarn.nodemanager.pmem-check-enabled</name><value>false</value>
</property>
<property><name>yarn.nodemanager.vmem-check-enabled</name><value>false</value>
</property>
</configuration>
##############################################cp mapred-site.xml.template mapred-site.xmlvi etc/hadoop/mapred-site.xml##############################################
<configuration>
<property><name>mapreduce.framework.name</name><value>yarn</value>
</property>
</configuration>###############################################配置免密登录
#生成秘钥
ssh-keygen -t rsa
#公钥发送到主机
ssh-copy-id -i id_rsa.pub root@localhost###############################################vi /usr/local/module/hadoop/etc/hadoop/hadoop-env.sh
############################################### 修改对应位置
export JAVA_HOME=/usr/local/module/java
################################################格式化namenode
hdfs namenode -format
#启动
sh sbin/start-all.sh

6.部署mysql

mkdir -p /usr/local/module/mysql/conf
mkdir -p /usr/local/module/mysql/data
mkdir -p /usr/local/module/mysql/logs
mkdir -p /usr/local/module/mysql/mysql-files#编辑my.cnf
vi /usr/local/module/mysql/conf/my.cnf############################################
[mysqld]
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
############################################
docker run -d \-p 3306:3306 \--name mysql \--restart always \--privileged=true \-v /usr/local/module/mysql/conf:/etc/mysql \-v /usr/local/module/mysql/logs:/var/log/mysql \-v /usr/local/module/mysql/data:/var/lib/mysql \-v /usr/local/module/mysql/mysql-files:/var/lib/mysql-files/ \-e MYSQL_ROOT_PASSWORD=root123 \-e TZ=Asia/Shanghai \mysql:5.7#安装客户端
yum -y install http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpmyum -y install mysql

7.部署hive

#在mysql中创建hive用户,数据库等
mysql -h 192.168.172.134 -u root -pcreate user 'hive' identified by 'hive';
create database hive;
grant all on hive.* to hive@'%'  identified by 'hive';
grant all on hive.* to hive@'localhost'  identified by 'hive';
flush privileges;
exit;tar -zxvf apache-hive-1.2.2-bin.tar.gzln -s apache-hive-1.2.2-bin hive#复制mysql jar包
tar -zxvf mysql-connector-java-5.1.49.tar.gzcp mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar hive/libcd hive
cp conf/hive-env.sh.template  conf/hive-env.sh
vi conf/hive-env.sh
#编辑hive-env.sh添加如下内容
############################################
export HADOOP_HOME=/usr/local/module/hadoop
export HIVE_CONF_DIR=/usr/local/module/hive/conf
############################################vi conf/hive-site.xml
#编辑hive-site.xml
############################################
<configuration>
<property><name>hive.users.in.admin.role</name><value>root</value>
</property><property><name>hive.security.authorization.enabled</name><value>true</value>
</property>
<property><name>hive.security.authorization.createtable.owner.grants</name><value>ALL</value>
</property>
<property><name>hive.security.authorization.task.factory</name><value>org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl</value>
</property><property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:mysql://127.0.0.1:3306/hive?createDatabaseIfNotExist=true&amp;useSSL=false</value><description>JDBC connect string for a JDBC metastore</description></property><property><name>javax.jdo.option.ConnectionDriverName</name><value>com.mysql.jdbc.Driver</value><description>Driver class name for a JDBC metastore</description></property><property><name>javax.jdo.option.ConnectionUserName</name><value>hive</value><description>username to use against metastore database</description></property><property><name>javax.jdo.option.ConnectionPassword</name><value>hive</value><description>password to use against metastore database</description></property><property><name>hive.metastore.warehouse.dir</name><value>/user/hive/warehouse</value><description>location of default database for the warehouse</description></property><property><name>hive.metastore.schema.verification</name><value>false</value></property><property><name>hive.cli.print.header</name><value>true</value></property><property><name>hive.cli.print.current.db</name><value>true</value></property><property><name>hive.metastore.uris</name><value>thrift://127.0.0.1:9083</value></property></configuration>#############################################创建hive在hdfs中的目录
hadoop fs -mkdir       /tmp
hadoop fs -mkdir  -p     /user/hive/warehouse
hadoop fs -chmod g+w   /tmp
hadoop fs -chmod g+w   /user/hive/warehouse#初始化数据库
schematool -initSchema -dbType mysql#启动
nohup hive --service metastore > metastore.log 2>&1 &
nohup hive --service hiveserver2 > hivesesrver2.log 2>&1 &#配置权限
hive
#set role admin;
grant all to user root;#创建数据库或者数据表授权给root用户(只有授权后的表或者库才能被 qualitis 找到)   注意这条!!!!!
GRANT ALL ON DATABASE xxxx TO USER root;
GRANT ALL ON TABLE xxxx TO USER root;#关闭
ps -ef | grep hive
kill -9 pid

8.部署spark

tar -zxvf spark-2.4.8-bin-hadoop2.7.tar.gzln -s spark-2.4.8-bin-hadoop2.7 spark#复制 hadoop 配置
cp hadoop/etc/hadoop/core-site.xml spark/conf/
cp hadoop/etc/hadoop/hdfs-site.xml spark/conf/#复制 hive 配置
cp hive/conf/hive-site.xml spark/conf/#复制mysql 驱动
cp mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar spark/jars/#编辑
cd spark
vi conf/spark-env.sh
#######################################################
export JAVA_HOME=/usr/local/module/java
export SPARK_MASTER_HOST=localhost
export SPARK_MASTER_WEBUI_PORT=18080
export SPARK_WORKER_WEBUI_PORT=18081
########################################################运行 spark
sh sbin/start-all.sh

9.部署linkis

tar -zxvf wedatasphere-linkis-1.0.2-combined-package-dist.tar.gzcd wedatasphere-linkis-1.0.2-combined-package-dist#编辑配置文件
vi config/linkis-env.sh########################## config/linkis-env.sh 开始 ####################################
#!/bin/bash#
# Copyright 2019 WeBank
#
# Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# description:  Starts and stops Server
#
# @name:        linkis-env
#
# Modified for Linkis 1.0.0# SSH_PORT=22### deploy user
deployUser=root##Linkis_SERVER_VERSION
LINKIS_SERVER_VERSION=v1### Specifies the user workspace, which is used to store the user's script files and log files.
### Generally local directory
WORKSPACE_USER_ROOT_PATH=file:///tmp/linkis/ ##file:// required
### User's root hdfs path
HDFS_USER_ROOT_PATH=hdfs:///tmp/linkis ##hdfs:// required### Path to store started engines and engine logs, must be local
ENGINECONN_ROOT_PATH=/usr/local/module/linkisENTRANCE_CONFIG_LOG_PATH=hdfs:///tmp/linkis/### Path to store job ResultSet:file or hdfs path
RESULT_SET_ROOT_PATH=hdfs:///tmp/linkis ##hdfs:// required### Provide the DB information of Hive metadata database.
### Attention! If there are special characters like "&", they need to be enclosed in quotation marks.
HIVE_META_URL="jdbc:mysql://192.168.172.134:3306/hive?createDatabaseIfNotExist=true"
HIVE_META_USER="hive"
HIVE_META_PASSWORD="hive"##YARN REST URL  spark engine required
YARN_RESTFUL_URL=http://127.0.0.1:8088###HADOOP CONF DIR
HADOOP_CONF_DIR=/usr/local/module/hadoop/etc/hadoop###HIVE CONF DIR
HIVE_CONF_DIR=/usr/local/module/hive/conf###SPARK CONF DIR
SPARK_CONF_DIR=/usr/local/module/spark/conf## Engine version conf
#SPARK_VERSION
SPARK_VERSION=2.4.8
##HIVE_VERSION
HIVE_VERSION=1.2.2
#PYTHON_VERSION=python2################### The install Configuration of all Micro-Services #####################
#
#    NOTICE:
#       1. If you just wanna try, the following micro-service configuration can be set without any settings.
#            These services will be installed by default on this machine.
#       2. In order to get the most complete enterprise-level features, we strongly recommend that you install
#            Linkis in a distributed manner and set the following microservice parameters
####  EUREKA install information
###  You can access it in your browser at the address below:http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}
#EUREKA_INSTALL_IP=127.0.0.1         # Microservices Service Registration Discovery Center
EUREKA_PORT=20303
export EUREKA_PREFER_IP=false###  Gateway install information
#GATEWAY_INSTALL_IP=127.0.0.1
GATEWAY_PORT=9001### ApplicationManager
#MANAGER_INSTALL_IP=127.0.0.1
MANAGER_PORT=9101### EngineManager
#ENGINECONNMANAGER_INSTALL_IP=127.0.0.1
ENGINECONNMANAGER_PORT=9102### EnginePluginServer
#ENGINECONN_PLUGIN_SERVER_INSTALL_IP=127.0.0.1
ENGINECONN_PLUGIN_SERVER_PORT=9103### LinkisEntrance
#ENTRANCE_INSTALL_IP=127.0.0.1
ENTRANCE_PORT=9104###  publicservice
#PUBLICSERVICE_INSTALL_IP=127.0.0.1
PUBLICSERVICE_PORT=9105### cs
#CS_INSTALL_IP=127.0.0.1
CS_PORT=9108########################################################################################## LDAP is for enterprise authorization, if you just want to have a try, ignore it.
#LDAP_URL=ldap://localhost:1389/
#LDAP_BASEDN=dc=webank,dc=com
#LDAP_USER_NAME_FORMAT=cn=%s@xxx.com,OU=xxx,DC=xxx,DC=com## java application default jvm memory
export SERVER_HEAP_SIZE="128M"##The decompression directory and the installation directory need to be inconsistent
LINKIS_HOME=/usr/local/module/linkisLINKIS_VERSION=1.0.2# for install
LINKIS_PUBLIC_MODULE=lib/linkis-commons/public-module################################ config/linkis-env.sh 结束 ###############################配置 db.sh
vi config/db.sh##########################################################
MYSQL_HOST=192.168.172.134
MYSQL_PORT=3306
MYSQL_DB=linkis
MYSQL_USER=root
MYSQL_PASSWORD=root123
###########################################################安装指令环境
yum install telnet -y
yum install dos2unix -y#安装 (会遇到缺少指令需要安装 )
sh bin/install.sh#运行
cd ../linkis
sh sbin/linkis-start-all.sh

10.部署linkis-web

mkdir linkis-webcp wedatasphere-linkis-1.0.2-combined-package-dist/wedatasphere-linkis-web-1.0.2.zip linkis-website/cd linkis-webunzip wedatasphere-linkis-web-1.0.2.zip# 编写nginx.conf
vi nginx.conf############################################################
user root;
worker_processes  1;events {worker_connections  1024;
}http {include       mime.types;default_type  application/octet-stream;sendfile        on;keepalive_timeout  65;server {listen       8080;# 访问端口server_name  localhost;#charset koi8-r;#access_log  /var/log/nginx/host.access.log  main;location / {root   /usr/share/nginx/html/; # 前端包解压的目录index  index.html index.html;}location /api {proxy_pass http://172.19.18.189:9001; # linkis-gateway服务的ip端口proxy_set_header Host $host;proxy_set_header X-Real-IP $remote_addr;proxy_set_header x_real_ipP $remote_addr;proxy_set_header remote_addr $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_http_version 1.1;proxy_connect_timeout 4s;proxy_read_timeout 600s;proxy_send_timeout 12s;proxy_set_header Upgrade $http_upgrade;proxy_set_header Connection upgrade;}#error_page  404              /404.html;# redirect server error pages to the static page /50x.html#error_page   500 502 503 504  /50x.html;location = /50x.html {root   /usr/share/nginx/html;}}
}
############################################################docker run  --restart always --name linkis-web \
-p 18087:8080 \
--privileged=true \
-v /usr/local/module/linkis-web/linkis-website-master:/usr/share/nginx/html:ro \
-v /usr/local/module/linkis-web/nginx.conf:/etc/nginx/nginx.conf \
-d nginx:stable-alpine

11.配置linkis默认yarn队列

#查看防火墙状态(center os7)
firewall-cmd --state
#开启防火墙
systemctl start firewalld
#关闭防火墙
systemctl stop firewalld
#禁止firewall开机启动
systemctl disable firewalld#访问
http://172.19.18.189:18080/#/login
root root参数配置-全局设置yarn队列名[wds.linkis.rm.yarnqueue]: default  队列实例最大个数[wds.linkis.rm.yarnqueue.instance.max]: 2队列CPU使用上限[wds.linkis.rm.yarnqueue.cores.max]:2队列内存使用上限[wds.linkis.rm.yarnqueue.memory.max]:16g全局各个引擎内存使用上限[wds.linkis.rm.client.memory.max]:2g全局各个引擎核心个数上限[wds.linkis.rm.client.core.max]:2全局各个引擎最大并发数[wds.linkis.rm.instance] 2参数配置-IDE核心数全为 2 并发数 2

12.部署qualitis

unzip wedatasphere-qualitis-0.8.0.zipcd qualitis-0.8.0vi conf/application-dev.yml
###################################################### 注意修改对应配置
spring:datasource:username: rootpassword: root123url: jdbc:mysql://192.168.172.134:3306/qualitis?createDatabaseIfNotExist=true&useSSL=falsedriver-class-name: com.mysql.jdbc.Drivertype: com.zaxxer.hikari.HikariDataSourcehikari:minimum-idle: 20maximum-pool-size: 500idle-timeout: 60000max-lifetime: 180000jpa:hibernate:ddl-auto: updatedatabase-platform: org.hibernate.dialect.MySQL5InnoDBDialectshow-sql: falserestTemplate:thread:maxTotal: 200 # max thread numbermaxPerRoute: 100 # max concurrent thread per routerequest:socketTimeout: 10000 # the max time waiting for responseconnectTimeout: 2000 # the max time waiting for shaking handconnectionRequestTimeout: 2000 # the max time waiting for getting connection from connection pooltask:persistent:type: jdbcusername: rootpassword: root123address: jdbc:mysql://192.168.172.134:3306/qualitis?createDatabaseIfNotExist=true&useSSL=falsetableName: qualitis_application_task_resultexecute:limit_thread: 10rule_size: 10timer:thread:size: 5check:period: 10000lock:zk:path: /qualitis/tmp/monitorzk:address:base_sleep_time: 1000max_retries: 3session_time_out: 10000connection_time_out: 15000lock_wait_time: 3auth:unFilterUrls:- /qualitis/api/v1/login/local- /qualitis/api/v1/logout- /qualitis/api/v1/redirectuploadUrls:- /qualitis/api/v1/projector/rule/batch/upload/*- /qualitis/api/v1/projector/project/batch/upload*linkis:api:prefix: api/rest_j/v1submitJob: entrance/executestatus: jobhistory/{id}/getrunningLog: entrance/{id}/logfinishLog: filesystem/openLogmeta_data:db_path: datasource/dbstable_path: datasource/tablestable_comment: datasource/getTableBaseInfocolumn_path: datasource/columnscolumn_info: datasource/getTableFieldsInfospark:application:name: IDElog:maskKey: task.persistent.username, task.persistent.passwordfront_end:home_page: http://192.168.172.134:18090/#/Homedomain_name: http://192.168.172.134:18090
######################################################vi conf/application.yml ######################################################
spring:profiles:active: devjersey:type: servlethttp:encoding:charset: UTF-8enabled: trueforce: truemessages:encoding: UTF-8basename: i18n/messages# logging
logging:config: classpath:log4j2-${spring.profiles.active}.xmlserver:port: 18090connection-timeout: 6000000 # 600s# error pageerror:whitelabel:enabled: falseworkflow:enable: trueha:enable: falsesystem:config:save_database_pattern: save_database_pattern
#######################################################将初始化数据取出
#cd /usr/local/module/qualitis-0.8.0/conf/database
#sz init.sql
#init.sql导入mysql的qualitis数据库中执行,生成初始化数据库#初始化数据
mysql -u {USERNAME} -p {PASSWORD} -h {IP} --default-character-set=utf8
source conf/database/init.sql#脚本文件转换为可执行文件
chmod -R +x bin/*
#启动
sh bin/start.sh#访问
http://192.168.172.134:18090/#/Home#关闭
sh bin/stop.sh

13.编译

#安装 gradle 4.9
#步骤 下载gradle 4.9  解压 配置环境变量#下载源码
https://github.com/WeBankFinTech/Qualitis/archive/refs/tags/release-0.8.0.zip#修改Qualitis-release-0.8.0\Qualitis-release-0.8.0\gradle\wrapper\gradle-wrapper.properties
#######################################  6.6 改成了 4.9
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-bin.zip
######################################## 修改 Qualitis-release-0.8.0\Qualitis-release-0.8.0\gradle\dependencies.gradle
# 可以直接改成 0.7.0 meavn库中只有到 0.7.0
#   "dss":"0.7.0",
# 如果编译了 dss 0.9 那么就可以改成0.9 也可以
#     "dss":"0.9.0",#修改Qualitis-release-0.8.0\Qualitis-release-0.8.0\build.gradle
###################################### 注释到这块processResources {
//    filter ReplaceTokens, tokens: [
//            "version": project.property("version")
//    ]}#########################################然后就可以 编译了 终端里 gradle clean distZip
#或者 点击<小锤子>编译调试 ps: 这里注意设置idea中的编码 utf-8

微众银行qualitis 部署编译教程相关推荐

  1. 微众银行DSS部署单机-普通版

    DSS-普通版部署 我的服务器 我的配置 vim conf/config.sh vim conf/db.sh QA 我的服务器 centos 7.0 8C16G +100G机械硬盘. 我的配置 .ba ...

  2. TDSQL在微众银行的大规模实践之路

    点击上方"方志朋",选择"设为星标" 做积极的人,而不是积极废人 作者投稿转载 一.2014 年:基于分布式的基础架构 微众银行在 2014 年成立之时,就非常 ...

  3. TDSQL 在微众银行的大规模实践之路

    作者 | 胡盼盼.黄德志 本文系 CSDN(ID:CSDNnews)投稿 众所周知,传统银行IT架构体系非常依赖于传统的商业数据库,商业存储以及大中型服务器设备,每年也需要巨大的IT费用去维护和升级, ...

  4. 微众银行数据库架构演进及 TiDB 实践经验

    作者介绍: 胡盼盼,微众银行数据平台室室经理.硕士毕业于华中科技大学,毕业后加入腾讯,任高级工程师,从事分布式存储与云数据库相关的研发与运营工作:2014 年加入微众银行,负责微众银行的数据库平台的建 ...

  5. 数据治理展示血缘关系的工具_Nebula Graph 在微众银行数据治理业务的实践

    本文为微众银行大数据平台:周可在 nMeetup 深圳场的演讲这里文字稿,演讲视频参见:B站 自我介绍下,我是微众银行大数据平台的工程师:周可,今天给大家分享一下 Nebula Graph 在微众银行 ...

  6. 大道至简:微众银行区块链全栈技术体系纵览

    5月26日,微众银行区块链携全栈技术体系亮相贵阳数博会,并斩获领先科技成果奖.从2015年开始布局区块链,微众银行区块链已推出多项开源区块链技术,其首席架构师张开翔将从区块链发展娓娓道来,详细阐述对微 ...

  7. 微众银行张开翔: FISCO BCOS - 开放的区块链实践之道 | 11月25日【区块链技术与实践】论坛...

    张开翔老师,微众银行区块链首席架构师. 张老师在互联网业和金融行业工作多年,有丰富的分布式系统和海量服务实践经历,于2015年专注投入区块链.隐私计算的技术和行业研究,主持微众银行区块链全栈技术体系设 ...

  8. 腾讯天衍实验室联合微众银行研发医疗联邦学习 AI利器让脑卒中预测准确率达80%

    近几年,医疗行业正在经历一场数字化转型,这场基于大数据和AI技术的变革几乎改变了整个行业的方方面面,将"信息就是力量"这句箴言体现的淋漓尽致,人们对人工智能寄以厚望,希望它能真正深 ...

  9. 微众银行AI团队开源联邦学习框架,并发布《联邦学习白皮书1.0》

    (图片由AI科技大本营付费下载自视觉中国) 编辑 | Jane 来源 | <联邦学习白皮书1.0> 出品 | AI科技大本营(ID:rgznai100) [导语]2019年,联邦学习成为业 ...

最新文章

  1. NFS共享服务搭建笔记
  2. 用Unity实现游戏弹反效果
  3. 声明属性Hibernate的Annotation注解
  4. 2、python机器学习基础教程——K近邻算法鸢尾花分类
  5. URAL-1982 Electrification Plan 最小生成树
  6. 深度学习-人工神经网络概述
  7. 学习笔记(18):Python网络编程并发编程-守护进程
  8. 弹框alertView
  9. linux如何加入windows域
  10. 租客如何玩转物联网,打造智能新生活
  11. android 获取全局context,说说 Android 中如何在全局获取 Context
  12. 【Tensorflow、Keras】关于Reshape层小结(部分问题未解决)
  13. cadence17.4 下载安装
  14. 桌面图标无故变成白色
  15. 同期收治患者住院天数_合理用药监测指标
  16. kafka 验证_KAFKA:简单的验证码实施
  17. java消息队列-kafka
  18. 速卖通正式推出全托管,卖家竞争进入新阶段
  19. Word处理控件Aspose.Words功能演示:在C#中的Word文档中添加或删除水印
  20. 前装ADAS单月搭载量创11个月以来新低,“缺芯”背后暗潮涌动

热门文章

  1. iOS动画:UIImageView帧动画(完结)
  2. Linux 下载文件命令
  3. popupwindow 不抢夺焦点_戴安娜王妃自传电影即将开拍,女主角选定!网评:气质不一样!...
  4. javaEE 初阶 — JavaScript 基础语法
  5. edge播放视频HTML5黑屏,win10专业版系统edge浏览器看视频出现黑屏怎么办?
  6. 2012系统如何搭建pptp服务器,ubuntu server 12.04 搭建 pptp 服务器
  7. 《合金装备4》的秘密!小岛秀夫访谈录
  8. 强化学习-马尔可夫决策过程(MDP)
  9. 机器人自动化打磨,如何实现?
  10. js实现页面滚动切换导航栏/点击导航栏跳转到指定位置