apiVersion: v1
kind: ConfigMap
metadata:
name: hadoop
namespace: big-data
labels:
app: hadoop
data:
hadoop-env.sh: |
export HDFS_DATANODE_USER=root
export HDFS_NAMENODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export JAVA_HOME=/usr/local/openjdk-8
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
export HADOOP_OPTS=”-Djava.library.path=${HADOOP_HOME}/lib/native”
core-site.xml: |
<?xml version=”1.0″ encoding=”UTF-8″?>
<?xml-stylesheet type=”text/xsl” href=”configuration.xsl” rel=”external nofollow” rel=”external nofollow” ?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop-master:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-bind-host</name>
<value>0.0.0.0</value>
</property>
</configuration>
hdfs-site.xml: |
<?xml version=”1.0″ encoding=”UTF-8″?>
<?xml-stylesheet type=”text/xsl” href=”configuration.xsl” rel=”external nofollow” rel=”external nofollow” ?>
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///opt/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///opt/hadoop/hdfs/data</value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
—
# namenode svc
apiVersion: v1
kind: Service
metadata:
name: hadoop-master
namespace: big-data
spec:
selector:
app: hadoop-namenode
type: NodePort
ports:
– name: rpc
port: 9000
targetPort: 9000
– name: http
port: 9870
targetPort: 9870
nodePort: 9870
# namenode pod
—
apiVersion: apps/v1
kind: Deployment
metadata:
name: hadoop-namenode
namespace: big-data
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: hadoop-namenode
template:
metadata:
labels:
app: hadoop-namenode
spec:
volumes:
– name: hadoop-env
configMap:
name: hadoop
items:
– key: hadoop-env.sh
path: hadoop-env.sh
– name: core-site
configMap:
name: hadoop
items:
– key: core-site.xml
path: core-site.xml
– name: hdfs-site
configMap:
name: hadoop
items:
– key: hdfs-site.xml
path: hdfs-site.xml
– name: hadoop-data
persistentVolumeClaim:
claimName: data-hadoop-namenode
containers:
– name: hadoop
image: registry:5000/hadoop
imagePullPolicy: Always
ports:
– containerPort: 22
– containerPort: 9000
– containerPort: 9870
volumeMounts:
– name: hadoop-env
mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh
subPath: hadoop-env.sh
– name: core-site
mountPath: /opt/hadoop/etc/hadoop/core-site.xml
subPath: core-site.xml
– name: hdfs-site
mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
subPath: hdfs-site.xml
– name: hadoop-data
mountPath: /opt/hadoop/hdfs/
subPath: hdfs
– name: hadoop-data
mountPath: /opt/hadoop/logs/
subPath: logs
env:
– name: HADOOP_NODE_TYPE
value: namenode
—
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-hadoop-namenode
namespace: big-data
spec:
accessModes:
– ReadWriteMany
resources:
requests:
storage: 256Gi
storageClassName: “managed-nfs-storage”
# datanode pod
—
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: hadoop-datanode
namespace: big-data
spec:
replicas: 2
selector:
matchLabels:
app: hadoop-datanode
serviceName: hadoop-datanode
template:
metadata:
labels:
app: hadoop-datanode
spec:
volumes:
– name: hadoop-env
configMap:
name: hadoop
items:
– key: hadoop-env.sh
path: hadoop-env.sh
– name: core-site
configMap:
name: hadoop
items:
– key: core-site.xml
path: core-site.xml
– name: hdfs-site
configMap:
name: hadoop
items:
– key: hdfs-site.xml
path: hdfs-site.xml
containers:
– name: hadoop
image: registry:5000/hadoop
imagePullPolicy: Always
ports:
– containerPort: 22
– containerPort: 9000
– containerPort: 9870
volumeMounts:
– name: hadoop-env
mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh
subPath: hadoop-env.sh
– name: core-site
mountPath: /opt/hadoop/etc/hadoop/core-site.xml
subPath: core-site.xml
– name: hdfs-site
mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
subPath: hdfs-site.xml
– name: data
mountPath: /opt/hadoop/hdfs/
subPath: hdfs
– name: data
mountPath: /opt/hadoop/logs/
subPath: logs
env:
– name: HADOOP_NODE_TYPE
value: datanode
volumeClaimTemplates:
– metadata:
name: data
namespace: big-data
spec:
accessModes:
– ReadWriteMany
resources:
requests:
storage: 256Gi
storageClassName: “managed-nfs-storage”