关闭防火墙和selinux

1、关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
2、关闭selinux
sed -i 's/enforcing/disabled/g'  /etc/selinux/config  #永久关闭
setenforce 0  #临时关闭

修改主机名

hostnamectl set-hostname hmaster01
hostnamectl set-hostname hmaster02
hostnamectl set-hostname hnode01
hostnamectl set-hostname hnode02
hostnamectl set-hostname hnode03

cat >>/etc/hosts <<EOF
10.252.50.192 hmaster01
10.252.50.222 hmaster02
10.252.50.183 hnode01
10.252.50.172 hnode02
10.252.50.76 hnode03
EOF

修改内核参数

cat > /etc/sysctl.conf <<EOF
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
EOF

sysctl -p

cat <<EOF >>/etc/security/limits.conf
* soft nofile 10240000
* hard nofile 10240000
* soft nproc 65535
* hard nproc 65535
EOF
修改/etc/security/limits.d/20-nproc.conf文件
*          soft    nproc     4096
改为
*          soft    nproc     65535
此时需要推出ssh工具,重新连接,让配置重新在新窗口生效

设置集群时间同步

创建普通用户hadoop

创建统一的工作路径,每台执行

mkdir -p /export/data 
mkdir -p /export/server
mkdir -p /export/software
chown -R hadoop.hadoop /export

以下都切换到hadoop用户操作

设置免密登录

ssh-copy-id hnode03

修改配置文件

cd /export/software/hadoop-3.3.1/etc/hadoop
vim hadoop-env.sh 末尾添加以下内容
export HDFS_NAMENODE_USER=hadoop
export HDFS_DATANODE_USER=hadoop
export HDFS_SECONDARYNAMENODE_USER=hadoop
export YARN_RESOURCEMANAGER_USER=hadoop
export YARN_NODEMANAGER_USER=hadoop

vim core-site.xml 修改内容

   <property>
      <name>fs.defaultFS</name>
      <value>hdfs://mycluster</value>
   </property>
   <property>
      <name>hadoop.tmp.dir</name>
      <value>/data/hadoop/tmp</value>
   </property>
   <property>
      <name>hadoop.http.staticuser.user</name>
      <value>hadoop</value>
   </property>
   <!-- 指定ZKFC故障自动切换转移 -->
   <property>
     <name>ha.zookeeper.quorum</name>
     <value>hnode01:2181,hnode02:2181,hnode03:2181</value>
   </property>


vim hdfs-site.xml

<!-- 设置dfs副本数,默认3个 -->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 完全分布式集群名称 -->
<property>
  <name>dfs.nameservices</name>
  <value>mycluster</value>
</property>
<!-- 集群中NameNode节点都有哪些 -->
<property>
   <name>dfs.ha.namenodes.mycluster</name>
   <value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
   <name>dfs.namenode.rpc-address.mycluster.nn1</name>
   <value>hmaster01:8020</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
   <name>dfs.namenode.rpc-address.mycluster.nn2</name>
   <value>hmaster02:8020</value>
</property>
<!-- nn1的http通信地址 -->
<property>
   <name>dfs.namenode.http-address.mycluster.nn1</name>
   <value>hmaster01:50070</value>
</property>
<!-- nn2的http通信地址 -->
<property>
    <name>dfs.namenode.http-address.mycluster.nn2</name>
    <value>hmaster02:50070</value>
</property>
<!-- 指定NameNode元数据在JournalNode上的存放位置 -->
<property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://hmaster01:8485;hmaster02:8485;hnode01:8485/mycluster</value>
</property>
<!-- 配置隔离机制,即同一时刻只能有一台服务器对外响应 -->
<property>
    <name>dfs.ha.fencing.methods</name>
    <value>sshfence</value>
</property>
<!-- 使用隔离机制时需要ssh无秘钥登录-->
<property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/home/hadoop/.ssh/id_rsa</value>
</property>
<!-- 声明journalnode服务器存储目录-->
<property>
   <name>dfs.journalnode.edits.dir</name>
   <value>/data/hadoop/ha/jn</value>
</property>
<!-- 关闭权限检查-->
<property>
   <name>dfs.permissions.enable</name>
   <value>false</value>
</property>
<!-- 访问代理类:client,mycluster,active配置失败自动切换实现方式-->
<property>
   <name>dfs.client.failover.proxy.provider.mycluster</name>
   <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置自动故障转移-->
<property>
   <name>dfs.ha.automatic-failover.enabled</name>
   <value>true</value>
</property>
<property>
        <name>dfs.namenode.name.dir</name>
        <value>/data/hadoop/hdfs/namenode</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/data/hadoop/hdfs/datanode</value>
    </property>

vim yarn-site.xml

<!--启用resourcemanager ha-->
    <property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
    </property>
<!--声明两台resourcemanager的地址-->
    <property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>rmCluster</value>
    </property>
    <property>
       <name>yarn.nodemanager.aux-services</name>
       <value>mapreduce_shuffle</value>
    </property>
    <property>
       <name>yarn.scheduler.minimum-allocation-mb</name>
       <value>512</value>
    </property>
    <property>
       <name>yarn.scheduler.maximum-allocation-mb</name>
       <value>4096</value>
    </property>
    <property>
       <name>yarn.nodemanager.vmem-pmem-ratio</name>
       <value>4</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.rm-ids</name>
        <value>rm1,rm2</value>
    </property>
    <property>
        <name>yarn.resourcemanager.hostname.rm1</name>
        <value>hmaster01</value>
    </property>
    <property>
        <name>yarn.resourcemanager.hostname.rm2</name>
        <value>hmaster02</value>
    </property>
    <!--指定zookeeper集群的地址-->
    <property>
        <name>yarn.resourcemanager.zk-address</name>
        <value>hnode01:2181,hnode02:2181,hnode03:2181</value>
    </property>
    <!--启用自动恢复-->
    <property>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
    </property>
    <!--指定resourcemanager的状态信息存储在zookeeper集群-->
    <property>
        <name>yarn.resourcemanager.store.class</name>    
        <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    </property>
    
  
  vim mapred-site.xml
  
  <property>
      <name>mapreduce.framework.name</name>
      <value>yarn</value>
   </property>
   <!-- 指定mr历史服务器主机,端口 -->
  <property>   
    <name>mapreduce.jobhistory.address</name>   
    <value>hmaster01:10020</value>   
  </property>   
<!-- 指定mr历史服务器WebUI主机,端口 -->
  <property>   
    <name>mapreduce.jobhistory.webapp.address</name>   
    <value>hmaster02:19888</value>   
  </property>
<!-- 历史服务器的WEB UI上最多显示20000个历史的作业记录信息 -->    
  <property>
    <name>mapreduce.jobhistory.joblist.cache.size</name>
    <value>20000</value>
  </property>
<!--配置作业运行日志 --> 
  <property>
    <name>mapreduce.jobhistory.done-dir</name>
    <value>${yarn.app.mapreduce.am.staging-dir}/history/done</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.intermediate-done-dir</name>
    <value>${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate</value>
  </property>
  <property>
    <name>yarn.app.mapreduce.am.staging-dir</name>
    <value>/tmp/hadoop-yarn/staging</value>
  </property>
  
 vim yarn-site.xml
 
 <!--启用resourcemanager ha-->
    <property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
    </property>
<!--声明两台resourcemanager的地址-->
    <property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>rmCluster</value>
    </property>
    <property>
       <name>yarn.nodemanager.aux-services</name>
       <value>mapreduce_shuffle</value>
    </property>
    <property>
       <name>yarn.scheduler.minimum-allocation-mb</name>
       <value>512</value>
    </property>
    <property>
       <name>yarn.scheduler.maximum-allocation-mb</name>
       <value>4096</value>
    </property>
    <property>
       <name>yarn.nodemanager.vmem-pmem-ratio</name>
       <value>4</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.rm-ids</name>
        <value>rm1,rm2</value>
    </property>
    <property>
        <name>yarn.resourcemanager.hostname.rm1</name>
        <value>hmaster01</value>
    </property>
    <property>
        <name>yarn.resourcemanager.hostname.rm2</name>
        <value>hmaster02</value>
    </property>
    <!--指定zookeeper集群的地址-->
    <property>
        <name>yarn.resourcemanager.zk-address</name>
        <value>hnode01:2181,hnode02:2181,hnode03:2181</value>
    </property>
    <!--启用自动恢复-->
    <property>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
    </property>
    <!--指定resourcemanager的状态信息存储在zookeeper集群-->
    <property>
        <name>yarn.resourcemanager.store.class</name>    
        <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    </property>

vim slaves

hnode01
hnode02
hnode03

配置hadoop环境变量 以下在root用户下执行

在hmaster01上配置hadoop环境变量
vim /etc/profile
export HADOOP_HOME=/export/server/hadoop-3.3.1
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
将修改后的环境变量同步到其他机器
scp /etc/profile hmaster02:/etc/profile
scp /etc/profile hnode01:/etc/profile
scp /etc/profile hnode02:/etc/profile
scp /etc/profile hnode03:/etc/profile
重新加载环境变量,每台执行
source /etc/profile

启动journalnode,hdfs-site.xml配置文件中的3台机器上执行,本次安装在hmaster01、hmaster02、hnode01

hadoop-daemon.sh start journalnode

格式化hadoop,在hmaster01上执行

hdfs namenode -format

启动namenode,hmaster01上执行

hadoop-daemon.sh  start namenode

在hmaster02上,同步hmaster01的元数据信息

hdfs namenode -bootstrapStandby

启动namenode,hmaster02上执行

hadoop-daemon.sh start namenode

在hmaster01上,启动所有datanode

hadoop-daemons.sh start datanode

启动 YARN, 在 hmaster01 的主机上执行

start-yarn.sh

虽然上一步启动了 YARN ,但是在 hmaster02 上是没有相应的 ResourceManager 进程,故需要在 hmaster02 主机上单独启动:
yarn-daemon.sh start resourcemanager

启动 ZKFC

格式化zkfc,在hmaster01上执行
hdfs zkfc -formatZK
在hmaster01和hmaster02的主机上分别执行如下命令:
hadoop-daemon.sh start zkfc

hive搭建

cp hive-env.sh.template hive-env.sh 添加以下内容
export HADOOP_HOME=/data/hadoop_base
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export HIVE_HOME=/data/hive
export HIVE_CONF_DIR=$HIVE_HOME/conf
export HIVE_AUX_JARS_PATH=$HIVE_HOME/lib

cp hive-log4j2.properties.template hive-log4j2.properties 修改内容为
property.hive.log.dir = /data/hive/logs

cp hive-default.xml.template hive-site.xml 修改以下值
<!--连接数据库地址,名称 -->  
<property>
  <name>javax.jdo.option.ConnectionURL</name>
  <value>jdbc:mysql://node21:3306/hive?createDatabaseIfNotExist=true</value>  
</property>  
<!--连接数据库驱动 --> 
<property>
  <name>javax.jdo.option.ConnectionDriverName</name>  
  <value>com.mysql.jdbc.Driver</value>  
</property> 
<!--连接数据库用户名称 -->  
<property>  
  <name>javax.jdo.option.ConnectionUserName</name>  
  <value>hive</value>
</property> 
<!--连接数据库用户密码 -->  
<property>  
  <name>javax.jdo.option.ConnectionPassword</name>  
  <value>hive</value>
</property>
<!--客户端显示当前查询表的头信息 --> 
 <property>
  <name>hive.cli.print.header</name>
  <value>true</value>
</property>
<!--客户端显示当前数据库名称信息 --> 
<property>
  <name>hive.cli.print.current.db</name>
  <value>true</value>
</property>   

${system:java.io.tmpdir}/${system:user.name}  改为 /data/hive/tmp

scala

解压 scala-2.11.12.tgz
重命名 mv scala-2.11.12 scala
scp 发送到其他机器

spark

cp spark-env.sh.template spark-env.sh

export SCALA_HOME=/data/scala
export HADOOP_HOME=/data/hadoop_base
export STANDALONE_SPARK_MASTER_HOST=hmaster01
export SPARK_MASTER_IP=$STANDALONE_SPARK_MASTER_HOST
export SPARK_LAUNCH_WITH_SCALA=0
export SPARK_LIBRARY_PATH=${SPARK_HOME}/lib
export SCALA_LIBRARY_PATH=${SPARK_HOME}/lib
export SPARK_MASTER_WEBUI_PORT=18080
export SPARK_WORKER_WEBUI_PORT=18081
export SPARK_LOG_DIR=/data/spark/logs

cp slaves.template slaves
hnode01
hnode02
hnode03

mkdir /data/spark/logs

cd /data/spark/sbin
./start-all.sh

scp 发送到其他机器

flink

下载flink-1.12.5-bin-scala_2.11.tgz   flink-shaded-hadoop-2-uber-2.7.5-10.0.jar

flink-conf.yaml
high-availability: zookeeper
high-availability.zookeeper.quorum: hnode01:2181,hnode02:2181,hnode03:2181
high-availability.storageDir: hdfs:///flink/ha/
high-availability.zookeeper.path.root: /flink
high-availability.cluster-id: /flinkCluster
state.backend: filesystem
state.checkpoints.dir: hdfs:///flink/checkpoints
state.savepoints.dir: hdfs:///flink/checkpoints
jobmanager.memory.process.size: 1600m
taskmanager.memory.process.size: 1728m
taskmanager.numberOfTaskSlots: 3

masters
hmaster01:8081
hmaster02:8081

workers
hnode01
hnode02
hnode03

zoo.cfg
# The number of milliseconds of each tick
tickTime=2000

# The number of ticks that the initial  synchronization phase can take
initLimit=10

# The number of ticks that can pass between  sending a request and getting an acknowledgement
syncLimit=5

# The directory where the snapshot is stored.
# dataDir=/tmp/zookeeper

# The port at which the clients will connect
clientPort=2181

# ZooKeeper quorum peers
server.1=hnode01:2888:3888
server.2=hnode02:2888:3888
server.3=hnode03:2888:3888
# server.2=host:peer-port:leader-port

scp 发送到其他机器

es

tar -zxvf elasticsearch-7.6.1-linux-x86_64.tar.gz
mv elasticsearch-7.6.1 /data/elasticsearch
cd bin
./elasticsearch-certutil ca
./elasticsearch-certutil cert ca ../elastic-stack-ca.p12

cluster.name: my-es
node.name: esnode-1
node.master: true
node.data: true
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: true
network.host: 192.168.1.208
network.tcp.no_delay: true
network.tcp.keep_alive: true
network.tcp.reuse_address: true
network.tcp.send_buffer_size: 256mb
network.tcp.receive_buffer_size: 256mb
transport.tcp.port: 9300
transport.tcp.compress: true
http.max_content_length: 200mb
http.cors.enabled: true
http.cors.allow-origin: "*"
http.port: 9200
discovery.seed_hosts: ["192.168.1.208:9300","192.168.1.173:9300","192.168.1.50:9300"]
cluster.initial_master_nodes: ["192.168.1.208:9300","192.168.1.173:9300","192.168.1.50:9300"]
cluster.fault_detection.leader_check.interval: 15s
discovery.cluster_formation_warning_timeout: 30s
cluster.join.timeout: 30s
cluster.publish.timeout: 90s
cluster.routing.allocation.cluster_concurrent_rebalance: 16
cluster.routing.allocation.node_concurrent_recoveries: 16
cluster.routing.allocation.node_initial_primaries_recoveries: 16
xpack.security.enabled: true
xpack.license.self_generated.type: basic
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: cert/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: cert/elastic-certificates.p12

JAVA_HOME=/data/jdk-11.0.12
vm.max_map_count=262144
sysctl -p

vim /etc/security/limits.conf
* hard memlock unlimited
* soft memlock unlimited

ClickHouse

wget https://repo.yandex.ru/clickhouse/rpm/stable/x86_64/clickhouse-server-common-19.4.0-2.noarch.rpm
wget https://repo.yandex.ru/clickhouse/rpm/stable/x86_64/clickhouse-server-21.9.5.16-2.noarch.rpm
wget https://repo.yandex.ru/clickhouse/rpm/stable/x86_64/clickhouse-common-static-dbg-21.9.5.16-2.x86_64.rpm
wget https://repo.yandex.ru/clickhouse/rpm/stable/x86_64/clickhouse-client-21.9.5.16-2.noarch.rpm

rpm -ivh clickhouse-*

PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
0i2HdZgK

mkdir data  format_schemas  tmp  user_files 
chown -R clickhouse.clickhouse ./*