Install Hadoop Cluster with Docker


name@host:/#wget https://raw.github.com/zettio/weave/master/weave
name@host:/#sudo mv weave /usr/local/bin/
name@host:/#sudo chmod +x /usr/local/bin/weave
name@host:/#weave launch
name@host:/#eval $(weave env)
name@host:/#docker run --name=hnode1 --hostname=hnode1.weave.local -it ubuntu /bin/bash
name@host:/#eval $(weave env)
name@host:/#docker run --name=hnode2 --hostname=hnode2.weave.local -it ubuntu /bin/bash

root@hnode1:~#passwd
Enter new UNIX password:root
root@hnode1:~#sudo apt-get update
root@hnode1:~#apt-get install ssh-server vim rsync
root@hnode1:~#sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config
root@hnode1:~#/etc/init.d/ssh start
root@hnode1:~#ssh-keygen

root@hnode2:~#passwd
Enter new UNIX password:root
root@hnode2:~#sudo apt-get update
root@hnode2:~#apt-get install ssh-server vim rsync
root@hnode2:~#sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config
root@hnode2:~#/etc/init.d/ssh start
root@hnode2:~#ssh-keygen

root@hnode1:/opt# cat /root/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
root@hnode1:/opt# chmod 700 ~/.ssh
root@hnode1:/opt# chmod 600 ~/.ssh/authorized_keys
root@hnode1:~#scp /root/.ssh/id_rsa.pub root@hnode2:/tmp

root@hnode2:~#scp /root/.ssh/id_rsa.pub root@hnode1:/tmp
root@hnode1:~#cat /tmp/id_rsa.pub >> ~/.ssh/authorized_keys

root@hnode2:/opt# cat /root/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
root@hnode2:~#cat /tmp/id_rsa.pub >> ~/.ssh/authorized_keys
root@hnode2:/opt# cat /root/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
root@hnode2:~#chmod 700 ~/.ssh
root@hnode2:~#chmod 600 ~/.ssh/authorized_keys

root@hnode1:/# wget http://apache.openmirror.de/hadoop/common/hadoop-2.6.2/hadoop-2.6.2.tar.gz
root@hnode1:/#tar -xvf hadoop-2.6.2.tar.gz
root@hnode1:/#mv hadoop-2.6.2 /opt/

root@hnode1:/# vim /opt/hadoop-2.6.2/etc/hadoop/core-site.xml

<property>
<name>fs.defaultFS</name>
<value>hdfs://hnode1.weave.local/</value>
</property>

root@hnode1:/# vim /opt/hadoop-2.6.2/etc/hadoop/hdfs-site.xml

<property>
<name>dfs.namenode.name.dir</name>
<value>/disk1/hdfs/name,/disk1/hdfs/name,/remote/hdfs/name</value>
</property>

root@hnode1:/# vim /opt/hadoop-2.6.2/etc/hadoop/yarn-site.xml

<property>
<name>yarn.resourcemanager.hostname</name>
<value>node1.weave.local</value>
</property>
<property>
<name>yarn.nodemanager.local-dirs</name>
<value>/disk1/nm-local-dir,/disk2/nm-local-dir</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce.shuffle</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>16384</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>16</value>
</property>

root@hnode1:/# mkdir -p /disk1/hdfs/name /remote/hdfs/name /disk1/nm-local-dir /disk2/nm-local-dir

root@hnode2:/# wget http://apache.openmirror.de/hadoop/common/hadoop-2.6.2/hadoop-2.6.2.tar.gz
root@hnode2:/#tar -xvf hadoop-2.6.2.tar.gz
root@hnode2:/#mv hadoop-2.6.2 /opt/

root@hnode2:/# vim /opt/hadoop-2.6.2/etc/hadoop/core-site.xml

<property>
<name>fs.defaultFS</name>
<value>hdfs://hnode1.weave.local/</value>
</property>

root@hnode2:/# vim /opt/hadoop-2.6.2/etc/hadoop/hdfs-site.xml

<property>
<name>dfs.datanode.data.dir</name>
<value>/disk1/hdfs/data,/disk2/hdfs/data</value></property>

root@hnode2:/# mkdir -p /disk1/hdfs/data /disk2/hdfs/data

name@host:/#HN1_IP=$(docker inspect -f '{{ .NetworkSettings.IPAddress }}' hnode1)

name@host:/#HN2_IP=$(docker inspect -f '{{ .NetworkSettings.IPAddress }}' hnode2)

name@host:/#scp ~/Downloads/jdk-8u65-linux-x64.tar.gz root@$HN1_IP:/opt

name@host:/#scp ~/Downloads/jdk-8u65-linux-x64.tar.gz root@$HN2_IP:/opt

root@hnode1:/opt# tar -xvf jdk-8u65-linux-x64.tar.gz
root@hnode1:~# echo "JAVA_HOME=jdk1.8.0_65" >> /etc/environment
root@hnode1:~# echo "PATH=$JAVA_HOME/bin:$PATH" >> /etc/environment
root@hnode1:~#source /etc/environment
root@hnode1:~# export JAVA_HOME=jdk1.8.0_65

root@hnode2:/opt# tar -xvf jdk-8u65-linux-x64.tar.gz
root@hnode2:~# echo "JAVA_HOME=jdk1.8.0_65" >> /etc/environment
root@hnode2:~# echo "PATH=$JAVA_HOME/bin:$PATH" >> /etc/environment
root@hnode2:~#source /etc/environment
root@hnode2:~# export JAVA_HOME=jdk1.8.0_65

root@hnode1:/# /opt/hadoop-2.6.2/bin/hdfs namenode -format mycluster

root@hnode1:~# /opt/hadoop-2.6.2/sbin/start-dfs.sh
root@hnode2:~# /opt/hadoop-2.6.2/sbin/start-dfs.sh

Install Hortonworks Hadoop with Ambari on Docker


name@host:/#docker run --name=data1 --hostname=data1 -it ubuntu /bin/bash

name@host:/#docker run --name=data2 --hostname=data2 -it ubuntu /bin/bash

name@host:/#docker run --name=name --hostname=name --link data1:data1 --link data2:data2 -it ubuntu /bin/bash

name@host:/#docker run --name=ambari --hostname=ambari --link data1:data1 --link data2:data2 --link name:name -it ubuntu /bin/bash

name@host:/#sudo bash -c "curl https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework > /usr/local/bin/pipework"

name@host:/#sudo chmod u+x /usr/local/bin/pipework

name@host:/#sudo apt-get install iproute2

name@host:/#sudo /usr/local/bin/pipework br1 data1 192.168.0.50/24

name@host:/#sudo /usr/local/bin/pipework br1 data2 192.168.0.51/24

name@host:/#sudo /usr/local/bin/pipework br1 name 192.168.0.53/24

name@host:/#sudo /usr/local/bin/pipework br1 ambari 192.168.0.54/24

root@data1:~#passwd
Enter new UNIX password:root

root@data1:~#apt-get install ssh

root@data1:~#sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config

root@data1:~#/etc/init.d/ssh start
root@data1:~#ssh-keygen
root@data1:~#apt-get update

root@data2:~#passwd
Enter new UNIX password:root

root@data2:~#apt-get install ssh

root@data2:~#sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config

root@data2:~#/etc/init.d/ssh start
root@data2:~#ssh-keygen
root@data1:~#apt-get update

root@name:~#passwd
Enter new UNIX password:root

root@name:~#apt-get install ssh

root@name:~#sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config

root@name:~#/etc/init.d/ssh start
root@name:~#ssh-keygen

root@ambari:~#passwd
Enter new UNIX password:root

root@ambari:~#apt-get install ssh

root@ambari:~#sed -i 's/PermitRootLogin without-password/PermitRootLogin yes/' /etc/ssh/sshd_config

root@ambari:~#/etc/init.d/ssh start
root@ambari:~#ssh-keygen

root@name:~# scp /root/.ssh/id_rsa.pub root@data1:/tmp
root@name:~# scp /root/.ssh/id_rsa.pub root@data2:/tmp

root@data1:~#cat /tmp/id_rsa.pub >> ~/.ssh/authorized_keys
root@data1:~#chmod 700 ~/.ssh
root@data1:~#chmod 600 ~/.ssh/authorized_keys

root@data2:~#cat /tmp/id_rsa.pub >> ~/.ssh/authorized_keys
root@data2:~#chmod 700 ~/.ssh
root@data2:~#chmod 600 ~/.ssh/authorized_keys

root@ambari:~# scp /root/.ssh/id_rsa.pub root@name:/tmp
root@ambari:~# scp /root/.ssh/id_rsa.pub root@data1:/tmp/ambari_id_rsa.pub
root@ambari:~# scp /root/.ssh/id_rsa.pub root@data2:/tmp/ambari_id_rsa.pub

root@data1:~#cat /tmp/ambari_id_rsa.pub >> ~/.ssh/authorized_keys
root@data1:~#/etc/init.d/ssh restart
root@data2:~#cat /tmp/ambari_id_rsa.pub >> ~/.ssh/authorized_keys
root@data2:~#/etc/init.d/ssh restart

root@name:~#cat /tmp/id_rsa.pub >> ~/.ssh/authorized_keys
root@name:~#/etc/init.d/ssh restart
root@name:~#chmod 700 ~/.ssh
root@name:~#chmod 600 ~/.ssh/authorized_keys

root@ambari:/#apt-get update
root@ambari:/#apt-get install postgresql postgresql-contrib
root@ambari:/#/etc/init.d/postgresql start

root@data1:/#wget -nv http://public-repo-1.hortonworks.com/ambari/ubuntu14/2.x/updates/2.1.2/ambari.list -O /etc/apt/sources.list.d/ambari.list

root@data1:/#apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD

root@data1:/#apt-get update

root@data2:/#wget -nv http://public-repo-1.hortonworks.com/ambari/ubuntu14/2.x/updates/2.1.2/ambari.list -O /etc/apt/sources.list.d/ambari.list

root@data2:/#apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD

root@data2:/#apt-get update

root@name:/#wget -nv http://public-repo-1.hortonworks.com/ambari/ubuntu14/2.x/updates/2.1.2/ambari.list -O /etc/apt/sources.list.d/ambari.list

root@name:/#apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD

root@name:/#apt-get update

root@ambari:/#wget -nv http://public-repo-1.hortonworks.com/ambari/ubuntu14/2.x/updates/2.1.2/ambari.list -O /etc/apt/sources.list.d/ambari.list

root@ambari:/#apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD

root@ambari:/#apt-get update

root@ambari:/#apt-get install ambari-server
root@ambari:/#apt-get install ambari-agent

root@ambari:/#ambari-server setup
// select default answer for all questions during setup

root@ambari:/#ambari-server start

name@host:/#docker inspect -f '{{ .NetworkSettings.IPAddress }}' ambari

172.17.0.17

// open in browser http://172.17.0.17:8080 and login with admin/admin as username/password
// click on Launch Install Wizard button
// enter mycluster as cluster name then click next
// select HDP 2.3 then click next
// in target hosts enter
// name
// data1
// data2
// and

root@ambari:/# cat ~/.ssh/id_rsa
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAk...47QxOaBp4tIy+9Ezg==
-----END RSA PRIVATE KEY-----

// copy the private key to ssh private key text box
// then click register and Confirm button
// you should see every host is registered successfully , click next button
// in page Choose Services click next button
// in page Assign Masters, select HDFS , YARN + MapReduce2, ZooKeeper,Ambari Metrics , then click next button
// in page Assign Slaves and Clients, select name as name node and data1 as Sname(secondary name node)
// in Assign Slaves and Clients and Customize Services pages, click next button, and after Review page, click Deploy button

MairaDB Galera Cluster with Docker


mkdir -p /data/mariadb1
mkdir -p /data/mariadb2

docker run -d -v /data/mariadb2:/var/lib/mysql -p 3306 -p 8080 -e XTRABACKUP_PASSWORD=abc -e MYSQL_ROOT_PASSWORD=secret --name=seed irmann/galera-mariadb-10.0-xtrabackup seed

docker exec -it seed bash

root@2954e5d5249c:/# export TERM=dumb

root@2954e5d5249c:/# mysql -u root -psecret -e 'CREATE DATABASE playground;'

root@2954e5d5249c:/#mysql -u root -psecret -e 'CREATE TABLE playground.equipment ( id INT NOT NULL AUTO_INCREMENT, type VARCHAR(50), quant INT, color VARCHAR(25), PRIMARY KEY(id));'

root@2954e5d5249c:/#mysql -u root -psecret -e 'INSERT INTO playground.equipment (type, quant, color) VALUES ("slide", 2, "blue")'

root@2954e5d5249c:/#mysql -u root -psecret -e 'SELECT * FROM playground.equipment;'

SEED_IP=$(docker inspect -f '{{ .NetworkSettings.IPAddress }}' seed)

echo $SEED_IP
172.17.0.58

docker run -d --name=node -v /data/mariadb1:/var/lib/mysql -p 3306 -p 8080 -e XTRABACKUP_PASSWORD=abc irmann/galera-mariadb-10.0-xtrabackup node 172.17.0.58

docker exec -it node bash

root@ccf73de2218b:/# export TERM=dumb

root@ccf73de2218b:/#mysql -u root -psecret -e 'SELECT * FROM playground.equipment;'

csaandra cluster with multi-host docker

in server 192.168.112.101 to run a cassandra seed node, where in cassandra.yaml, listen_address ,broadcast_address and rpc_address is 192.168.112.101

docker run –net=host -d –name cass1 -p 9042:9042 -p 7001:7001 -p 7000:7000 -p 7199:7199 -p 9160:9160 _YOUR_DOCKER_IMAGE_

in server 192.168.112.102 to run a cassandra node, where in cassandra.yaml, listen_address ,broadcast_address, rpc_address and seeds parameter is 192.168.112.102.

- class_name: org.apache.cassandra.locator.SimpleSeedProvider
parameters: - seeds: "192.168.112.101"

docker run --net=host -d --name cass2 _YOUR_DOCKER_IMAGE_

192.168.112.102 and 192.168.112.101 should see ports 7000,7001,9042 and 7199 of each other.

errors during making a docker image of cassandra on openwrt

conf/cassandra-env.sh: line 91: /opt/jre/bin/java: not found

bash-4.3# which java
/opt/jre/bin/bundled/java

vi Dockerfile

RUN sed -i 's/\"$JAVA_HOME\"\/bin\/java/\"$JAVA_HOME\"\/bin\/bundled\/java/g' /opt/cassandra/bin/cassandra

/opt/cassandra/bin/cassandra

./cassandra: line 165: getopt: not found

vi /opt/cassandra/bin/cassandra
#args=`getopt vfhp:bD:H:E: "$@"`
#eval set -- "$args"
classname="org.apache.cassandra.service.CassandraDaemon"
#while true; do
.
.
.

foreground="yes"
properties="-XX:ErrorFile=/etc/cassandra -XX:HeapDumpPath=/etc/cassandra"
launch_service "$foreground" "$properties" "$classname"

/opt/cassandra/bin/cassandra

# There is insufficient memory for the Java Runtime Environment to continue.
# Native memory allocation (malloc) failed to allocate 8380219392 bytes for committing reserved memory.

modify cassandra-env.sh

add
MAX_HEAP_SIZE=1000M
before
JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}"

Error: Exception thrown by the agent : java.net.MalformedURLException: Local host name unknown: java.net.UnknownHostException: cassandra-host: cassandra-host

import java.net.Inet4Address;
import java.net.UnknownHostException;

public class Test {
public static void main(String[] args) throws UnknownHostException {
System.setProperty("java.net.preferIPv4Stack" , "true");
System.out.println(Inet4Address.getLocalHost());

}
}

bash-4.3# java Test
Exception in thread "main" java.net.UnknownHostException: 30b91803e12d: 30b91803e12d
at java.net.InetAddress.getLocalHost(InetAddress.java:1473)
at Test.main(Test.java:8)
Caused by: java.net.UnknownHostException: 30b91803e12d
at java.net.Inet4AddressImpl.lookupAllHostAddr(Native Method)
at java.net.InetAddress$1.lookupAllHostAddr(InetAddress.java:901)
at java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1293)
at java.net.InetAddress.getLocalHost(InetAddress.java:1469)
... 1 more

following libs were lost in jre/lib
+libnss_dns-2.19.so
+libnss_files-2.19.so
libnss_dns.so.2