Scala + Spring Boot (Actuator RESTful API)


build.sbt

import sbt.internal.IvyConsole.Dependencies._

lazy val springVersion = "1.5.3.RELEASE"
lazy val thymeleafVersion = "2.1.5.RELEASE"

lazy val root = (project in file(".")).
settings(
inThisBuild(List(
organization := "org.bk",
scalaVersion := "2.11.0",
version := "1.0"
)),
name := "immosis-scheduler-immobilienscout24",
libraryDependencies += "org.springframework.boot" % "spring-boot-starter-web" % springVersion,
libraryDependencies += "org.springframework.boot" % "spring-boot-starter-actuator" % springVersion
)
mainClass in (Compile, run) := Some("HelloWorldApplication")

HelloWorldApplication.scala

import org.springframework.boot.SpringApplication
import org.springframework.boot.autoconfigure.SpringBootApplication

object HelloWorldApplication extends App {
SpringApplication.run(classOf[HelloWorldApplication]);
}

@SpringBootApplication
class HelloWorldApplication

HelloWorldController.scala

import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{GetMapping, RequestParam, ResponseBody}

@Controller
class HelloWorldController {

@GetMapping(Array("/hello-world"))
@ResponseBody
def sayHello(@RequestParam(name="name", required=false, defaultValue="Stranger") name: String): Greeting = {
new Greeting("Hello, "+name)
}
}

Greeting.scala
class Greeting(content: String){
def getContent():String = content
}

sbt run

curl localhost:8080/hello-world
{"content":"Hello, Stranger"}

Advertisements

VCode Debug Mocha


{
"version": "0.2.0",
"configurations": [

{
"name": "Run Mocha",
"request": "launch",
"type": "node",
"program": "${workspaceFolder}/node_modules/mocha/bin/_mocha",
"stopOnEntry": false,
"args": ["--opts", "${workspaceFolder}/mocha.opts"],
"cwd": "${workspaceFolder}",
"runtimeExecutable": null,
"env": { },

}
]
}

mocha.opts :

--timeout 60000
--exit
--recursive test
--debug-brk

Landing Page Loader For React

loader.css:

.loader {
border: 16px solid #f3f3f3; /* Light grey */
border-top: 16px solid #3498db; /* Blue */
border-radius: 50%;
width: 120px;
height: 120px;
animation: spin 2s linear infinite;

display: inline-block;
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
margin: auto;

}

@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}

index.html:

< div id=”root” >
<div class=”loader”&lt >/div>
< /div >

index.js:

import React from ‘react’;
import ReactDOM from ‘react-dom’;
import App from ‘./App’;

ReactDOM.render(<App />, document.getElementById(‘root’));
registerServiceWorker();

Scala Parser for YAML File


import java.io.{File, FileInputStream}
import java.text.SimpleDateFormat
import org.yaml.snakeyaml.Yaml
import org.yaml.snakeyaml.constructor.Constructor

object Main {
def main(args: Array[String]): Unit = {
val text = scala.io.Source.fromInputStream(getClass.getResourceAsStream(
"codes.yaml")).mkString
val yaml = new Yaml
val obj = yaml.load(text)
val codes = obj.asInstanceOf[java.util.LinkedHashMap[String,java.util.Map[String, Any]]]
var values = codes.get("duration").get("values").asInstanceOf[java.util.ArrayList
[java.util.Map[String,Any]]]
values.toArray.foreach(c=> println(c.asInstanceOf[java.util.LinkedHashMap[String,Any]]
.get("code")))
}
}

codes.yaml:

duration:
values:
-
code: 1
from: 0
to: 1
-
code: 2
from: 1
to: 3

build.sbt

libraryDependencies += "org.yaml" % "snakeyaml" % "1.8"

Install oozie 4.3.0

Install oozie 4.3.0

wget http://www-us.apache.org/dist/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz

tar -xvf hadoop-2.7.3.tar.gz

cd hadoop-2.7.3/
vim etc/hadoop/core-site.xml

fs.defaultFS
hdfs://localhost:9000

vim etc/hadoop/hdfs-site.xml

dfs.replication
1

dfs.namenode.name.dir
file:///home/your_user_name/data/hadoop/

sudo apt-get install openssh-server
ssh-keygen -t rsa -P ” -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys

vim etc/hadoop/hadoop-env.sh
export JAVA_HOME=/opt/jdk

bin/hdfs namenode -forma
sbin/start-dfs.sh

wget http://www-eu.apache.org/dist/oozie/4.3.0/oozie-4.3.0.tar.gz

tar -xvf oozie-4.3.0.tar.gz

cd oozie-4.3.0

./bin/mkdistro.sh -P hadoop-2 -DskipTests

cp -R distro/target/oozie-4.3.0-distro/oozie-4.3.0/ ../oozie

cd ../oozie

mkdir libext
cd libext
wget http://extjs.com/deploy/ext-2.2.zip
cd ..

cp $HADOOP_HOME/share/hadoop/**/*.jar libext/
cp $HADOOP_HOME/share/hadoop/common/*.jar libext/
cp $HADOOP_HOME/share/hadoop/common/lib/*.jar libext/
cp $HADOOP_HOME/share/hadoop/hdfs/lib/*.jar libext/
cp $HADOOP_HOME/share/hadoop/hdfs/*.jar libext/
cp $HADOOP_HOME/share/hadoop/mapreduce/*.jar libext/
cp $HADOOP_HOME/share/hadoop/mapreduce/lib/*.jar libext/
cp $HADOOP_HOME/share/hadoop/yarn/lib/*.jar libext/
cp $HADOOP_HOME/share/hadoop/yarn/*.jar libext/

./bin/oozie-setup.sh prepare-war

vim $HADOOP_HOME/etc/hadoop/core-site.xml

hadoop.proxyuser.your_user_name.hosts
*

hadoop.proxyuser.your_user_name.groups
*

$HADOOP_HOME/sbin/stop-dfs.sh
$HADOOP_HOME/sbin/start-dfs.sh

./bin/oozie-setup.sh sharelib create -fs hdfs://localhost:9000

./bin/ooziedb.sh create -sqlfile oozie.sql -run

vim conf/oozie-site.xml

oozie.service.WorkflowAppService.system.libpath
/user/your_user_name/share/lib/

oozie.service.HadoopAccessorService.hadoop.configurations
*=$HAOOP_HOME/etc/hadoop/

./bin/oozied.sh start

tar -xvf oozie-examples.tar.gz

vim examples/apps/map-reduce/job.properties
nameNode=hdfs://localhost:9000
jobTracker=localhost:10020
oozie.use.system.libpath=true

hadoop fs -put examples hdfs://localhost:9000/user/your_username/examples

$HADOOP_HOME/sbin/mr-jobhistory-daemon.sh –config $HADOOP_CONF_DIR start historyserver

jps

./bin/oozie job -oozie http://localhost:11000/oozie -config examples/apps/map-reduce/job.properties -run

./bin/oozie job -oozie http://localhost:11000/oozie -info 0000000-170408113318091-oozie-fars-W
./bin/oozie job -oozie http://localhost:11000/oozie -log 0000000-170408113318091-oozie-fars-W

WrodCount MapReduce with Scalding

git clone https://github.com/scalding-io/ProgrammingWithScalding

cd ProgrammingWithScalding/chapter2/

mvn clean install

hadoop fs -mkdir -p /data/input

hadoop fs -mkdir -p /data/output

echo “This is a happy day. A day to remember” > /tmp/input.txt

hadoop fs -put /tmp/input.txt /data/input

hadoop jar /root/repo/ProgrammingWithScalding/chapter2/target/chapter2-0-jar-with-dependencies.jar com.twitter.scalding.Tool WordCountJob –local –input /data/input/input.txt –output /data/output/output.txt

cat /data/output/output.txt
a 2
day 1
day. 1
happy 1
is 1
remember 1
this 1
to 1

Wordcount Example With Apache Spark

mkdir wordcount-spark
cd wordcount-spark

mkdir -p src/main/scala

cat <<EOF > build.sbt
name := "wordcount"
version := "1.0"
scalaVersion := "2.11.8"
libraryDependencies += "org.apache.spark" %% "spark-core" % "1.6.0"
EOF

touch src/main/scala/SparkWordCount.scala

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf

object SparkWordCount {
def main(args: Array[String]) {
// create Spark context with Spark configuration
val sc = new SparkContext(new SparkConf().setAppName("Spark Count"))

// get threshold
val threshold = args(1).toInt

// read in text file and split each document into words
val tokenized = sc.textFile(args(0)).flatMap(_.split(" "))

// count the occurrence of each word
val wordCounts = tokenized.map((_, 1)).reduceByKey(_ + _)

// filter out words with fewer than threshold occurrences
val filtered = wordCounts.filter(_._2 >= threshold)

// count characters
val charCounts = filtered.flatMap(_._1.toCharArray).map((_, 1)).reduceByKey(_ + _)

System.out.println(charCounts.collect().mkString(", "))
}
}

sbt package

cat <<EOF > /tmp/wordcount.txt
Hello world, Hello
EOF

cp target/scala-2.11/workcount_2.11-1.0.jar /tmp/

cd $SPARK_HOME
./bin/spark-submit --master "local[*]" --class SparkWordCount /tmp/wordcount_2.11-1.0.jar /tmp/wordcount.txt 1