#!/bin/bash
# 2024-12

#获取脚本根目录，并设为只读
readonly INITDIR=$(cd $(dirname $0); dirname "$PWD")
STAMP=`date -d today +"%Y%m%d%H%M%S"`
source $INITDIR/conf/init.conf
arr=( $NODES )

#安装flink
if [ ! -d "/opt/flink-1.8.2/" ]; then
  echo "检测无/opt/flink-1.8.2/目录"
else
  \rm -rf /opt/flink-1.8.2/
  echo "检测已存在/opt/flink-1.8.2/目录，并已删除"
fi
tar -xzf $INITDIR/file/flink/flink-1.8.2.tar.gz -C /opt/

sed -i "s/zwlbs106/$WEBHOST/g" /opt/flink-1.8.2/conf/flink-conf.yaml

#导入flink变量
sed -i "/FLIMK\_HOME/d" /etc/profile
echo 'export FLINK_HOME=/opt/flink-1.8.2' >> /etc/profile
echo 'export PATH=$FLINK_HOME/bin:$PATH' >> /etc/profile
source /etc/profile
echo "flink安装完成"

if [ ! -d "/cluster/" ]; then
  echo "检测无/cluster/目录"
else
  mv /cluster/ /cluster_$STAMP
  echo "检测已存在/cluster/目录，并已重命名备份"
fi
mkdir -p /cluster/default/

#安装job架包
tar -xzf $INITDIR/file/flink/universal-0.1.0.tar.gz -C /cluster/default/
mv /cluster/default/universal-0.1.0/ /cluster/default/positional-0.1.0/
\cp -rf /cluster/default/positional-0.1.0/ /cluster/default/alarm-0.1.0/
\cp -rf /cluster/default/positional-0.1.0/ /cluster/default/adas-0.1.0/
\cp -rf /cluster/default/positional-0.1.0/ /cluster/default/address-0.1.0/

sed -i 's/^profile=.*/profile=prod-adas/' /cluster/default/adas-0.1.0/bin/start.sh
sed -i 's/^profile=.*/profile=prod-alarm/' /cluster/default/alarm-0.1.0/bin/start.sh
sed -i 's/^profile=.*/profile=prod-positional/' /cluster/default/positional-0.1.0/bin/start.sh
sed -i 's/^profile=.*/profile=prod-address/' /cluster/default/address-0.1.0/bin/start.sh

\cp -rf $INITDIR/file/flink/streamingload*.jar /cluster/default/adas-0.1.0/lib/
\cp -rf $INITDIR/file/flink/streamingload*.jar /cluster/default/alarm-0.1.0/lib/
\cp -rf $INITDIR/file/flink/streamingload*.jar /cluster/default/positional-0.1.0/lib/
\cp -rf $INITDIR/file/flink/streamingload*.jar /cluster/default/address-0.1.0/lib/

echo "flink相关程序包安装完成"

#安装spark
if [ ! -d "/opt/spark2/" ]; then
  echo "检测无/opt/spark2/目录"
else
  \rm -rf /opt/spark2/
  echo "检测已存在/opt/spark2/目录，并已删除"
fi
tar -xf $INITDIR/file/bigdata/spark2.tar.gz -C /opt/
\cp -rf $INITDIR/file/bigdata/chfile/spark-defaults.conf /opt/spark2/conf/
sed -i "s/ZWlbs3/$(hostname)/" /opt/spark2/conf/spark-defaults.conf
[ -e /opt/spark2/jars/phoenix-server.jar ] && \rm -rf /opt/spark2/jars/phoenix-server.jar
echo "spark安装完成"

#安装bigdata
tar -xf $INITDIR/file/bigdata/gather-*.tar.gz -C /cluster/
\cp -f $INITDIR/file/bigdata/gather.json /cluster/$(ls /cluster/ | grep 'gather')/conf/
\cp -f $INITDIR/file/bigdata/gather.properties /cluster/$(ls /cluster/ | grep 'gather')/conf/
echo "大数据包已复制到指定路径"

find /cluster/$(ls /cluster/ | grep 'gather')/conf/ /cluster/$(ls /cluster/ | grep 'gather')/bin  -type f | xargs sed -i 's/
//'
\cp -f /cluster/$(ls /cluster/ | grep 'gather')/lib/* /opt/spark2/jars/

sed -i "/\/cluster\/gather/d" /var/spool/cron/root
echo "00  3  *  *  0   sh /cluster/$(ls /cluster/ | grep 'gather')/bin/batchTable.sh" >> /var/spool/cron/root
echo "00  2  *  *  *   sh /cluster/$(ls /cluster/ | grep 'gather')/bin/start.sh" >> /var/spool/cron/root
echo "大数据已加入定时任务"

source /etc/profile
/bin/sh  /opt/spark2/sbin/start-master.sh && sleep 10;
/bin/sh  /opt/spark2/sbin/start-slave.sh spark://$(hostname):7077 && sleep 10;
/bin/sh /cluster/$(ls /cluster/ | grep 'gather')/bin/batchTable.sh
