diff --git a/README.md b/README.md index c809d6a..d36c703 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,17 @@ Slides and lab materials for my Docker Workshop (in Traditional Chinese). ## Background -After given 2 Docker speeches at the end of 2014 (see [A Gentle Introduction to Docker, a “Write Once, Run Anywhere” Containerization Technology](http://jcconf.tw/docker-most-write-once-run-anywhere.html) at JCConf Taiwan 2014 and [Twelve Factor App in Docker](http://containersummit.ithome.com.tw/) at Container Summit 2014), we've seen a growing demand for a systematic, practical, and step-by-step Docker workshop. Therefore, I design this 6-hour workshop materials to fulfill the need. +After given 2 Docker speeches at the end of 2014 (see [A Gentle Introduction to Docker, a “Write Once, Run Anywhere” Containerization Technology](http://jcconf.tw/2014/docker-most-write-once-run-anywhere.html) at JCConf Taiwan 2014 and [Twelve Factor App in Docker](http://containersummit.ithome.com.tw/) at Container Summit 2014), we've seen a growing demand for a systematic, practical, and step-by-step Docker workshop. Therefore, I design this 6-hour workshop materials to fulfill the need. ## Course Introduction Read the document ☛ [Docker 建置實戰講堂・課程簡介](intro.md). +If your organization needs customized training programs and tailored courses, please contact me at william.pjyeh@gmail.com + + + ## Preparation Read the documents: @@ -22,6 +26,68 @@ Read the documents: - [行前準備 Part 2:預載範例程式碼](config.md) ☚ 建議等開課前一週再進行。 +## Companion Slides + +View slides online: http://bit.ly/docker-slides + +FYI, the `gh-pages` branch stores the slide files, mostly in markdown format. + + + +## Course Feedback + +- 「從無到有,對於 Docker 有了基本的認識,也大概知道學習的方向。」 + +- 「了解現行 Docker 發展技術及實際操作。」 + +- 「建立有系統的 Docker 基礎背景。」 + +- 「對 Docker 架構有更進一步瞭解。」 + +- 「快速進入 Docker 世界,避免會碰到的地雷。」 + +- 「學會 GitHub 與 Docker Hub 連動。」 + +- 「了解 Docker 是否適用於 production server 上。」 + +- 「希望有進階的實務範例銜接課程。」 + +- 「快開進階課吧!」 + + +## History + +**v5.0** / 第五梯次 (2015-06-27) + + - 增加更多實例,示範 container linking 的實務手法。 + - 提前以範例介紹初學者常見的兩大地雷:volume 及 network。 + + +**v4.0** / 第四梯次 (2015-05-09) + + - 簡化 Vagrant 虛擬機數量。 + - 增加「極簡化 Docker」實例,解釋 rootfs、dependency 與 isolation 性質。 + + +**v3.0** / 第三梯次 (2015-04-11) + + - 簡化 Vagrant 環境設定程序。 + + +**v2.0** / 第二梯次 (2015-03-07) + + - 更新至 Docker 1.5.0。 + - 增加更多程式語言例子。 + - 增加 Docker Compose 例子。 + - 增加更適合的實例,解釋 dependency 與 isolation 性質。 + - 增加更適合的實例,逐步剖析 container linking 觀念。 + - 增加更適合的實例,逐步剖析 Dockerized app 要素。 + + +**v1.0** / 第一梯次 (2015-02-07) + + - 初版 + ## License diff --git a/Vagrantfile b/Vagrantfile index 2f2806c..fffb7f8 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -1,8 +1,13 @@ -Vagrant.require_version ">= 1.7.2" +Vagrant.require_version ">= 1.7.4" +# change default synced_folder for convenience +SYNCED_FOLDER = "/home/vagrant/docker-workshop" # expose ports from guest to host for convenience -FORWARDED_PORT_RANGE = 10080..10100 +FORWARDED_PORT_RANGE = (10080..10100).to_a.push(10443).to_a.push(8080) + +# external provision script files +PROVISION_SCRIPTS = [ "provision/setup-docker-tools.sh", "provision/setup-env.sh", "provision/setup-hosts.sh" ] Vagrant.configure(2) do |config| @@ -10,70 +15,34 @@ Vagrant.configure(2) do |config| config.vm.define "main", primary: true do |node| node.vm.box = "williamyeh/ubuntu-trusty64-docker" - node.vm.box_version = ">= 1.5.0" + node.vm.box_version = ">= 1.8.1" node.vm.network "private_network", ip: "10.0.0.10" - node.vm.provision "hosts" do |hosts| - hosts.add_host '10.0.0.200', ['registry.com', 'registry'] - end for i in FORWARDED_PORT_RANGE node.vm.network "forwarded_port", guest: i, host: i end - node.vm.synced_folder ".", "/home/vagrant/docker-workshop" - - node.vm.provision "shell", path: "provision/setup-docker-tools.sh" - node.vm.provision "shell", path: "provision/setup-env.sh" + node.vm.synced_folder ".", SYNCED_FOLDER - node.vm.provider "virtualbox" do |vb| - vb.customize ["modifyvm", :id, "--memory", "2048"] + for f in PROVISION_SCRIPTS + node.vm.provision "shell", path: f end - end - - - config.vm.define "alice" do |node| - - node.vm.box = "williamyeh/ubuntu-trusty64-docker" - node.vm.box_version = ">= 1.5.0" - - node.vm.network "private_network", ip: "10.0.0.11" - node.vm.provision "hosts" do |hosts| - hosts.add_host '10.0.0.200', ['registry.com', 'registry'] + node.vm.provider "virtualbox" do |vb| + vb.customize ["modifyvm", :id, "--memory", "1024"] + #vb.customize ["modifyvm", :id, "--memory", "2048"] end - node.vm.synced_folder ".", "/home/vagrant/docker-workshop" - - node.vm.provision "shell", path: "provision/setup-docker-tools.sh" - node.vm.provision "shell", path: "provision/setup-env.sh" - end - config.vm.define "bob" do |node| - - node.vm.box = "williamyeh/ubuntu-trusty64-docker" - node.vm.box_version = ">= 1.5.0" - - node.vm.network "private_network", ip: "10.0.0.12" - node.vm.provision "hosts" do |hosts| - hosts.add_host '10.0.0.200', ['registry.com', 'registry'] - end - - node.vm.synced_folder ".", "/home/vagrant/docker-workshop" - - node.vm.provision "shell", path: "provision/setup-docker-tools.sh" - node.vm.provision "shell", path: "provision/setup-env.sh" - - end - config.vm.define "centos" do |node| - node.vm.box = "chef/centos-5.11" + node.vm.box = "bento/centos-5.11" node.vm.network "private_network", ip: "10.0.0.30" - node.vm.synced_folder ".", "/home/vagrant/docker-workshop" + node.vm.synced_folder ".", SYNCED_FOLDER # [NOTE] unmark this while benchmarking VM startup time #node.vm.box_check_update = false @@ -87,20 +56,16 @@ Vagrant.configure(2) do |config| config.vm.define "registry" do |node| - node.vm.box = "williamyeh/insecure-registry" - node.vm.box_version = ">= 1.5.0" + node.vm.box = "williamyeh/docker-workshop-registry" + node.vm.box_version = ">= 5.0.0" node.vm.network "private_network", ip: "10.0.0.200" - node.vm.provision "hosts" do |hosts| - hosts.add_host '10.0.0.200', ['registry.com', 'registry'] - end - node.vm.synced_folder ".", "/home/vagrant/docker-workshop" + node.vm.synced_folder ".", SYNCED_FOLDER - node.vm.provision "shell", path: "provision/setup-docker-tools.sh" - node.vm.provision "shell", path: "provision/setup-env.sh" - node.vm.provision "shell", - inline: "PRIVATE_DOCKER_REGISTRY=registry.com docker-mirror /home/vagrant/docker-workshop/provision/IMAGE-LIST" + for f in PROVISION_SCRIPTS + node.vm.provision "shell", path: f + end end diff --git a/build-chat/.dockerignore b/build-chat/.dockerignore index dd44972..966e067 100644 --- a/build-chat/.dockerignore +++ b/build-chat/.dockerignore @@ -1 +1,2 @@ *.md +docker-compose.yml diff --git a/build-chat/docker-compose.yml b/build-chat/docker-compose.yml new file mode 100644 index 0000000..c6199f3 --- /dev/null +++ b/build-chat/docker-compose.yml @@ -0,0 +1,4 @@ +app: + build: . + ports: + - "10080:3000" diff --git a/build-fork/.dockerignore b/build-fork/.dockerignore new file mode 100644 index 0000000..8617481 --- /dev/null +++ b/build-fork/.dockerignore @@ -0,0 +1 @@ +output.txt diff --git a/build-fork/Dockerfile b/build-fork/Dockerfile new file mode 100644 index 0000000..a629d20 --- /dev/null +++ b/build-fork/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +COPY getpid /usr/local/bin/ +COPY fork /usr/local/bin/ + +WORKDIR /data +VOLUME ["/data"] + +CMD [ "getpid" ] diff --git a/build-fork/docker-compose.yml b/build-fork/docker-compose.yml new file mode 100644 index 0000000..7762dc2 --- /dev/null +++ b/build-fork/docker-compose.yml @@ -0,0 +1,16 @@ +# normal case: +# child exit first, parent exit last +normal: + build: . + command: fork 5 0 + volumes: + - .:/data + +# abnormal case: +# parent exit first, child exit last (orphan) +abnormal: + build: . + command: fork 0 5 + volumes: + - .:/data + diff --git a/build-fork/fork b/build-fork/fork new file mode 100755 index 0000000..0799818 --- /dev/null +++ b/build-fork/fork @@ -0,0 +1,97 @@ +#!/usr/bin/env perl +use strict; +use warnings; +use English; # for $PID +use File::Basename; + + +my $fh; +my $PARENT_TAG = "[Parent] "; +my $CHILD_TAG = " [Child] "; + +# sleep period in seconds +my ($parent_sleep, $child_sleep, $output_file) = @ARGV; + +sub main; +sub child; +sub my_tee; +sub process_cmdline; +sub usage; + +main(); + +sub main { + process_cmdline(); + + my_tee $fh => ($PARENT_TAG, "PID = ", $PID, "\n"); + + my $pid = fork(); + if ($pid == 0) { # We are the child + child(); + } + elsif (defined($pid)) { # We are the parent of child + my_tee $fh => ($PARENT_TAG, "forked a child with PID = $pid\n"); + if ($parent_sleep > 0) { + my_tee $fh => ($PARENT_TAG, "sleeping...\n"); + sleep($parent_sleep); + my_tee $fh => ($PARENT_TAG, "awaken!\n"); + } + my_tee $fh => ($PARENT_TAG, "exit...\n"); + exit 0; + } + else { # The fork failed + my_tee $fh => ($PARENT_TAG, "fork failed.\n"); + exit 2; + } +} + + +sub child { + my_tee $fh => ($CHILD_TAG, "PID = ", $PID, "\n"); + + if ($child_sleep > 0) { + my_tee $fh => ($CHILD_TAG, "sleeping...\n"); + sleep($child_sleep); + my_tee $fh => ($CHILD_TAG, "awaken!\n"); + } + + my_tee $fh => ($CHILD_TAG, "exit...\n"); + exit 0; +} + + +# simple "tee" function for Perl: +# print the output on the screen and to a file +# @see http://www.perlmonks.org/?node_id=962541 +sub my_tee { + my $handle = shift; + print $handle @_; + print @_; +} + + +sub process_cmdline { + usage() if (scalar @ARGV < 2); + + if (not defined $output_file) { + $output_file = 'output.txt'; + } + + open $fh, '>>', $output_file + or die "$!"; + + my $now = localtime(); + my_tee $fh => ("\n----> ", $now, " <----\n"); +} + + +sub usage { + my $prog_name = basename($0); + + print < [output file] + +USAGE + + exit 1; +} diff --git a/build-fork/getpid b/build-fork/getpid new file mode 100755 index 0000000..3e3bdd5 --- /dev/null +++ b/build-fork/getpid @@ -0,0 +1,6 @@ +#!/usr/bin/env perl +use strict; +use warnings; +use English; # for $PID + +print $PID, "\n"; diff --git a/build-io/Dockerfile b/build-io/Dockerfile new file mode 100644 index 0000000..51c3f96 --- /dev/null +++ b/build-io/Dockerfile @@ -0,0 +1,5 @@ +FROM busybox + +COPY io.sh /usr/local/bin/ + +CMD ["io.sh"] diff --git a/build-io/README.md b/build-io/README.md new file mode 100644 index 0000000..24cbd4d --- /dev/null +++ b/build-io/README.md @@ -0,0 +1,50 @@ +File I/O example +=== + +This directory demonstrates several Docker topics: + +- The usefulness of **volume** mechanism in Docker. +- The `docker logs` command. + + +## Steps + +1. Run without Docker + + ```bash + $ ./io.sh + $ ./io.sh + $ ./io.sh + + $ cat /tmp/output + ``` + + +2. Run with Docker, without volume mechanism + + ```bash + $ docker build -t io . + + $ docker run io + $ docker run io + $ docker run io + + $ cat /tmp/output + + $ docker ps -a + $ docker logs + ``` + + +3. Run with Docker, with volume mechanism + + ```bash + $ docker run -v $(pwd):/tmp io + $ docker run -v $(pwd):/tmp io + $ docker run -v $(pwd):/tmp io + + $ cat ./output + + $ docker ps -a + $ docker logs + ``` diff --git a/build-io/io.sh b/build-io/io.sh new file mode 100755 index 0000000..7f2266a --- /dev/null +++ b/build-io/io.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# +# Append date info to specified output file (default: "/tmp/output"), +# and display its content. +# + + +OUTPUT=${1:-/tmp/output} + +# create output file, if not exist... +if [ ! -f "$OUTPUT" ]; then + touch $OUTPUT +fi + + +# append date info +date >> $OUTPUT + + +# display the content of output file +cat $OUTPUT diff --git a/build-mcrypt/Dockerfile.nodejs b/build-mcrypt/Dockerfile.nodejs new file mode 100644 index 0000000..c2fae8e --- /dev/null +++ b/build-mcrypt/Dockerfile.nodejs @@ -0,0 +1,21 @@ +# a naive image for Node.js + mcrypt + +FROM node:0.10.36-slim + +RUN mkdir -p /opt/node_modules +WORKDIR /opt + +# copy to image/container +COPY libmcrypt4_2.5.8-3.3_amd64.deb libmcrypt4.deb +COPY libmcrypt-dev_2.5.8-3.3_amd64.deb libmcrypt-dev.deb + +# install from deb +RUN dpkg -i libmcrypt4.deb libmcrypt-dev.deb + +# copy pre-installed Node.js modules +COPY node_modules /opt/node_modules/ +#COPY package.json /opt/ +#RUN npm install + +# run! +ENTRYPOINT ["node"] diff --git a/build-mcrypt/Dockerfile.official-5.6.6 b/build-mcrypt/Dockerfile.official-5.6.6 new file mode 100644 index 0000000..51b66d4 --- /dev/null +++ b/build-mcrypt/Dockerfile.official-5.6.6 @@ -0,0 +1,58 @@ +FROM debian:jessie + +# persistent / runtime deps +RUN apt-get update && apt-get install -y ca-certificates curl libxml2 --no-install-recommends && rm -r /var/lib/apt/lists/* + +# phpize deps +RUN apt-get update && apt-get install -y autoconf gcc make pkg-config --no-install-recommends && rm -r /var/lib/apt/lists/* + +ENV PHP_INI_DIR /usr/local/etc/php +RUN mkdir -p $PHP_INI_DIR/conf.d + +#### +#### + +RUN gpg --keyserver pool.sks-keyservers.net --recv-keys 6E4F6AB321FDC07F2C332E3AC2BF0BC433CFC8B3 0BD78B5F97500D450838F95DFE857D9A90D90EC1 + +ENV PHP_VERSION 5.6.6 + +# --enable-mysqlnd is included below because it's harder to compile after the fact the extensions are (since it's a plugin for several extensions, not an extension in itself) +RUN buildDeps=" \ + $PHP_EXTRA_BUILD_DEPS \ + bzip2 \ + file \ + libcurl4-openssl-dev \ + libreadline6-dev \ + libssl-dev \ + libxml2-dev \ + "; \ + set -x \ + && apt-get update && apt-get install -y $buildDeps --no-install-recommends && rm -rf /var/lib/apt/lists/* \ + && curl -SL "http://php.net/get/php-$PHP_VERSION.tar.bz2/from/this/mirror" -o php.tar.bz2 \ + && curl -SL "http://php.net/get/php-$PHP_VERSION.tar.bz2.asc/from/this/mirror" -o php.tar.bz2.asc \ + && gpg --verify php.tar.bz2.asc \ + && mkdir -p /usr/src/php \ + && tar -xf php.tar.bz2 -C /usr/src/php --strip-components=1 \ + && rm php.tar.bz2* \ + && cd /usr/src/php \ + && ./configure \ + --with-config-file-path="$PHP_INI_DIR" \ + --with-config-file-scan-dir="$PHP_INI_DIR/conf.d" \ + $PHP_EXTRA_CONFIGURE_ARGS \ + --disable-cgi \ + --enable-mysqlnd \ + --with-curl \ + --with-openssl \ + --with-readline \ + --with-zlib \ + && make -j"$(nproc)" \ + && make install \ + && { find /usr/local/bin /usr/local/sbin -type f -executable -exec strip --strip-all '{}' + || true; } \ + && apt-get purge -y --auto-remove $buildDeps \ + && make clean + +COPY docker-php-ext-* /usr/local/bin/ + +#### +CMD ["php", "-a"] +#### diff --git a/build-mcrypt/Dockerfile.php b/build-mcrypt/Dockerfile.php new file mode 100644 index 0000000..614d34f --- /dev/null +++ b/build-mcrypt/Dockerfile.php @@ -0,0 +1,18 @@ +# a naive PHP + mcrypt image + +FROM php:5.6.6-cli + +# copy to image/container +COPY libmcrypt4_2.5.8-3.1_amd64.deb libmcrypt4.deb +COPY libmcrypt-dev_2.5.8-3.1_amd64.deb libmcrypt-dev.deb + +# install from deb +RUN dpkg -i libmcrypt4.deb libmcrypt-dev.deb + +# install PHP modules +# ... via a convenient wrapper for "make ; make install" stuff +RUN docker-php-ext-install mcrypt + + +# start PHP +ENTRYPOINT [ "php" ] \ No newline at end of file diff --git a/build-mcrypt/Dockerfile.php-new b/build-mcrypt/Dockerfile.php-new new file mode 100644 index 0000000..17c3c1e --- /dev/null +++ b/build-mcrypt/Dockerfile.php-new @@ -0,0 +1,18 @@ +# a naive PHP + mcrypt image + +FROM php:5.6.6-cli + +# copy to image/container +COPY libmcrypt4_2.5.8-3.3_amd64.deb libmcrypt4.deb +COPY libmcrypt-dev_2.5.8-3.3_amd64.deb libmcrypt-dev.deb + +# install from deb +RUN dpkg -i libmcrypt4.deb libmcrypt-dev.deb + +# install PHP modules +# ... via a convenient wrapper for "make ; make install" stuff +RUN docker-php-ext-install mcrypt + + +# start PHP +ENTRYPOINT [ "php" ] \ No newline at end of file diff --git a/build-mcrypt/README.md b/build-mcrypt/README.md new file mode 100644 index 0000000..186bc45 --- /dev/null +++ b/build-mcrypt/README.md @@ -0,0 +1,21 @@ +Build a naive PHP/Node.js + mcrypt image for Ubuntu 14.04 LTS (Trusty) +=== + + +## Purpose + +Demostrate how to build a naive PHP/Node.js + mcrypt image from Dockerfile. + +This lab uses pre-downloaded DEB files to minimize time to completion. Alternatives have drawbacks for this lab: + + - Building from tarball source will require `build-essential`. + - Installing by `apt-get` will require downloading packages on-the-fly. + +It is just a naive demo, especially in early workshop stages. For a better Dockerfile to learn from, see [official PHP repo](https://registry.hub.docker.com/_/php/) and [official Node.js repo](https://registry.hub.docker.com/_/node/). + + +## Package + +Package: [`libmcrypt4`](http://packages.ubuntu.com/trusty/libmcrypt4) and [`libmcrypt-dev`](http://packages.ubuntu.com/trusty/libmcrypt-dev) + +DEB files: see http://mirrors.kernel.org/ubuntu/pool/universe/libm/libmcrypt/ diff --git a/build-mcrypt/centos-nodejs-mcrypt/README.txt b/build-mcrypt/centos-nodejs-mcrypt/README.txt new file mode 100644 index 0000000..be86cdd --- /dev/null +++ b/build-mcrypt/centos-nodejs-mcrypt/README.txt @@ -0,0 +1 @@ +Demostrate how to build all dependencies for Node.js + mcrypt under CentOS. diff --git a/build-mcrypt/centos-nodejs-mcrypt/Vagrantfile b/build-mcrypt/centos-nodejs-mcrypt/Vagrantfile new file mode 100644 index 0000000..6d5d05f --- /dev/null +++ b/build-mcrypt/centos-nodejs-mcrypt/Vagrantfile @@ -0,0 +1,11 @@ +Vagrant.configure(2) do |config| + config.vm.box = "bento/centos-6.7" + + config.vm.provision "shell", inline: <<-SHELL + rpm -ivh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm + yum -y install libmcrypt-devel nodejs npm + npm install mcrypt + # vagrant ssh ; cd /vagrant + # node example.js + SHELL +end diff --git a/build-mcrypt/centos-nodejs-mcrypt/example.js b/build-mcrypt/centos-nodejs-mcrypt/example.js new file mode 100644 index 0000000..bf8374c --- /dev/null +++ b/build-mcrypt/centos-nodejs-mcrypt/example.js @@ -0,0 +1,53 @@ +/** + * simple demo for mcrypt in Node.js + * adapted from PHP example: http://php.net/manual/en/function.mcrypt-encrypt.php + */ + +var mcrypt = require('mcrypt'); + +var CIPHER_ALGO = 'rijndael-256'; +var cipher = new mcrypt.MCrypt(CIPHER_ALGO, 'cbc'); + + + +var key = new Buffer( + "bcb04b7e103a0cd8b54763051cef08bc55abe029fdebae5e1d417e2ffb2a00a3" + , 'hex'); + +var iv = cipher.generateIv(); +var iv_size = iv.length; + +console.log('Cipher:', CIPHER_ALGO); +console.log('Key size:', key.length); +console.log('Key:', key.toString('hex')); +console.log('IV: ', iv.toString('hex')); +console.log("---"); + + +// display plaintext +var plaintext = 'This string was AES-256 / CBC / ZeroBytePadding encrypted.'; +console.log('Before encryption:', plaintext); + + +// --- ENCRYPTION --- + +cipher.open(key, iv); + +var ciphertext = cipher.encrypt(plaintext); +var combined = Buffer.concat([iv, ciphertext]); +var ciphertext_base64 = combined.toString('base64'); +console.log('After encryption: ', ciphertext_base64); + + +// --- DECRYPTION --- + +var ciphertext_binary = new Buffer(ciphertext_base64, 'base64'); + +var iv_dec = new Buffer(iv_size); +var ciphertext_dec = new Buffer(ciphertext_binary.length - iv_size); +ciphertext_binary.copy(iv_dec, 0, 0, iv_size); +ciphertext_binary.copy(ciphertext_dec, 0, iv_size); + +cipher.open(key, iv_dec); +var plaintext_dec = cipher.decrypt(ciphertext_dec); +console.log('After decryption: ', plaintext_dec.toString()); diff --git a/build-mcrypt/demo-nodejs b/build-mcrypt/demo-nodejs new file mode 100755 index 0000000..d1da529 --- /dev/null +++ b/build-mcrypt/demo-nodejs @@ -0,0 +1,13 @@ +#!/bin/bash + +NODEJS_IMAGE=$1 +NODEJS_EXAMPLE=${2:-example.js} +NAME=$(basename $BASH_SOURCE) + +if [ $# -lt 1 ]; then + echo "Demo for Dockerized Node.js." + echo "Usage: $NAME [nodejs source code]" + exit 1 +fi + +cat $NODEJS_EXAMPLE | docker run -i $NODEJS_IMAGE diff --git a/build-mcrypt/demo-php b/build-mcrypt/demo-php new file mode 100755 index 0000000..d54ece1 --- /dev/null +++ b/build-mcrypt/demo-php @@ -0,0 +1,13 @@ +#!/bin/bash + +PHP_IMAGE=$1 +PHP_EXAMPLE=${2:-example.php} +NAME=$(basename $BASH_SOURCE) + +if [ $# -lt 1 ]; then + echo "Demo for Dockerized PHP." + echo "Usage: $NAME [php source code]" + exit 1 +fi + +cat $PHP_EXAMPLE | docker run -i $PHP_IMAGE diff --git a/build-mcrypt/example.js b/build-mcrypt/example.js new file mode 100644 index 0000000..bf8374c --- /dev/null +++ b/build-mcrypt/example.js @@ -0,0 +1,53 @@ +/** + * simple demo for mcrypt in Node.js + * adapted from PHP example: http://php.net/manual/en/function.mcrypt-encrypt.php + */ + +var mcrypt = require('mcrypt'); + +var CIPHER_ALGO = 'rijndael-256'; +var cipher = new mcrypt.MCrypt(CIPHER_ALGO, 'cbc'); + + + +var key = new Buffer( + "bcb04b7e103a0cd8b54763051cef08bc55abe029fdebae5e1d417e2ffb2a00a3" + , 'hex'); + +var iv = cipher.generateIv(); +var iv_size = iv.length; + +console.log('Cipher:', CIPHER_ALGO); +console.log('Key size:', key.length); +console.log('Key:', key.toString('hex')); +console.log('IV: ', iv.toString('hex')); +console.log("---"); + + +// display plaintext +var plaintext = 'This string was AES-256 / CBC / ZeroBytePadding encrypted.'; +console.log('Before encryption:', plaintext); + + +// --- ENCRYPTION --- + +cipher.open(key, iv); + +var ciphertext = cipher.encrypt(plaintext); +var combined = Buffer.concat([iv, ciphertext]); +var ciphertext_base64 = combined.toString('base64'); +console.log('After encryption: ', ciphertext_base64); + + +// --- DECRYPTION --- + +var ciphertext_binary = new Buffer(ciphertext_base64, 'base64'); + +var iv_dec = new Buffer(iv_size); +var ciphertext_dec = new Buffer(ciphertext_binary.length - iv_size); +ciphertext_binary.copy(iv_dec, 0, 0, iv_size); +ciphertext_binary.copy(ciphertext_dec, 0, iv_size); + +cipher.open(key, iv_dec); +var plaintext_dec = cipher.decrypt(ciphertext_dec); +console.log('After decryption: ', plaintext_dec.toString()); diff --git a/build-mcrypt/example.php b/build-mcrypt/example.php new file mode 100644 index 0000000..08c9805 --- /dev/null +++ b/build-mcrypt/example.php @@ -0,0 +1,74 @@ + diff --git a/build-mcrypt/libmcrypt-dev_2.5.8-3.1_amd64.deb b/build-mcrypt/libmcrypt-dev_2.5.8-3.1_amd64.deb new file mode 100644 index 0000000..bbaa2b4 Binary files /dev/null and b/build-mcrypt/libmcrypt-dev_2.5.8-3.1_amd64.deb differ diff --git a/build-mcrypt/libmcrypt-dev_2.5.8-3.3_amd64.deb b/build-mcrypt/libmcrypt-dev_2.5.8-3.3_amd64.deb new file mode 100644 index 0000000..6c0c8af Binary files /dev/null and b/build-mcrypt/libmcrypt-dev_2.5.8-3.3_amd64.deb differ diff --git a/build-mcrypt/libmcrypt4_2.5.8-3.1_amd64.deb b/build-mcrypt/libmcrypt4_2.5.8-3.1_amd64.deb new file mode 100644 index 0000000..71e18cb Binary files /dev/null and b/build-mcrypt/libmcrypt4_2.5.8-3.1_amd64.deb differ diff --git a/build-mcrypt/libmcrypt4_2.5.8-3.3_amd64.deb b/build-mcrypt/libmcrypt4_2.5.8-3.3_amd64.deb new file mode 100644 index 0000000..56fdfc9 Binary files /dev/null and b/build-mcrypt/libmcrypt4_2.5.8-3.3_amd64.deb differ diff --git a/build-mcrypt/node_modules/mcrypt/.npmignore b/build-mcrypt/node_modules/mcrypt/.npmignore new file mode 100644 index 0000000..db6460a --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/.npmignore @@ -0,0 +1,3 @@ +/build/ +*.swp +.git \ No newline at end of file diff --git a/build-mcrypt/node_modules/mcrypt/LICENSE b/build-mcrypt/node_modules/mcrypt/LICENSE new file mode 100755 index 0000000..ed31e82 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Tuğrul Topuz + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/build-mcrypt/node_modules/mcrypt/README.md b/build-mcrypt/node_modules/mcrypt/README.md new file mode 100644 index 0000000..c942f05 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/README.md @@ -0,0 +1,278 @@ +# node-mcrypt + +MCrypt bindings for Node.js + +## Dependencies + +### Debian / Ubuntu + +``` +apt-get install libmcrypt4 libmcrypt-dev +``` + +### OSX + +``` +brew install mcrypt +``` + +## Install + +``` +npm install mcrypt +``` + +## Introduction + +Alright! There is already OpenSSL extension bundled with Node.js but there are something wrong with some traditional encryption algorithms on OpenSSL. + +I tried to decrypt ciphertext of AES and DES algorithms using OpenSSL but i get the garbage outputs. There are some reasons with OpenSSL like null padding. + +Also i saw same issues on the stackoverflow.com. Some people encountered with same problems. + +This extension provide the cipher and decipher operations via `libmcrypt` and compatible with Java Crypto and PHP MCrypt consequently. + +You should start with import the package like + +```javascript +var mcrypt = require('mcrypt'); +``` + +There are 3 exposed common functions in the package. These functions are `getAlgorithmNames()`, `getModeNames()` and `MCrypt()` constructor function. Also there are some functions under the prototype of `MCrypt()` constructor function. + +### getAlgorithmNames([path]) : Array + +`getAlgorithmNames()` returns an array that contains available algorithm names. `path` parameter to specify special algorithm directory. `path` parameter is not required. + +```javascript +var mcrypt = require('mcrypt'); + +var algos = mcrypt.getAlgorithmNames(); +console.log(algos); +``` + +Expected result like that + +``` +[ 'cast-128', 'gost', 'rijndael-128', 'twofish', 'arcfour', 'cast-256', 'loki97', 'rijndael-192', 'saferplus', 'wake', 'blowfish-compat', 'des', 'rijndael-256', 'serpent', 'xtea', 'blowfish', 'enigma', 'rc2', 'tripledes' ] +``` + +### getModeNames([path]) : Array + +`getModeNames()` returns an array that contains available mode names. `path` parameter to specify special mode directory. `path` parameter is not required. + +```javascript +var mcrypt = require('mcrypt'); + +var algos = mcrypt.getModeNames(); +console.log(algos); +``` + +Expected result like that +``` +[ 'cbc', 'cfb', 'ctr', 'ecb', 'ncfb', 'nofb', 'ofb', 'stream' ] +``` + +### MCrypt(algorithm, mode [, algorithmDir] [, modeDir]) : Object + +`MCrypt(algorithm, mode)` is a constructor function to create object for cipher and decipher operations. +`algorithm` is a required parameter and one of the values of array returned by `getAlgorithmNames()`. +`mode` is required parameter and one of the values of array returned by `getModeNames()`. +`algorithmDir` and `modeDir` are optional parameters to specify algorithm and mode directories. + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var desEcb = new MCrypt('des', 'ecb'); +``` + +There are some prototype functions to make cipher decipher operations and to identify algorithm properties. + +#### open(key [, iv]) + +We are need to `open()` with a key for `decrypt()` and `encrypt()` operations also we should set an iv if required by algorithm in other case `iv` is optional parameter. +`key` and `iv` should be string or Buffer + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var desEcb = new MCrypt('des', 'ecb'); +desEcb.open('madepass'); // we are set the key +``` + +#### encrypt(plaintext) : Buffer + +`encrypt()` returns a Buffer object that contains ciphertext of `plaintext` parameter. `plaintext` parameter should be `string` or `Buffer` + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var desEcb = new MCrypt('des', 'ecb'); +desEcb.open('madepass'); // we are set the key + +var ciphertext = desEcb.encrypt('this is top secret message!'); +console.log(ciphertext.toString('base64')); +``` + +Expected result like that + +``` +fkJnIgtiH8nsGDryyuIsmyf5vABMGStlpACfKCTifvA= +``` + +#### decrypt(ciphertext) : Buffer + +`decrypt()` returns a Buffer object that contains plaintext of `ciphertext` parameter. `ciphertext` parameter should be `Buffer` + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var desEcb = new MCrypt('des', 'ecb'); +desEcb.open('madepass'); // we are set the key + +var plaintext = desEcb.decrypt(new Buffer('fkJnIgtiH8nsGDryyuIsmyf5vABMGStlpACfKCTifvA=', 'base64')); +console.log(plaintext.toString()); +``` + +Expected result like that + +``` +this is top secret message! +``` + +#### generateIv() : Buffer + +`generateIv()` function generates IV randomly. + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +var iv = blowfishCfb.generateIv(); + +blowfishCfb.open('somekey', iv); + +var ciphertext = blowfishCfb.encrypt('sometext'); + +console.log(Buffer.concat([iv, ciphertext]).toString('base64')); +``` + +#### validateKeySize(Boolean) +`validateKeySize()` is a function to disable or enable key size validation on `open()` + +```javascript +var mc = new MCrypt('blowfish', 'ecb'); +mc.validateKeySize(false); // disable key size checking +mc.open('typeconfig.sys^_-'); +``` + +#### validateIvSize(Boolean) +`validateIvSize()` is a function to disable or enable iv size validation on `open()` + +```javascript +var mc = new MCrypt('rijndael-256', 'cbc'); +mc.validateIvSize(false); // disable iv size checking +mc.open('$verysec$retkey$', 'foobar'); +``` + +#### selfTest() : Boolean + +`selfTest()` is an utility function to make test algorithm internally and returns boolean value of status + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.selfTest()); +``` + +#### isBlockAlgorithmMode() : Boolean + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.isBlockAlgorithmMode()); +``` + +#### isBlockAlgorithm() : Boolean + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.isBlockAlgorithm()); +``` + +#### isBlockMode() : Boolean + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.isBlockMode()); +``` + +#### getBlockSize() : Number + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.getBlockSize()); +``` + +#### getKeySize() : Number + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.getKeySize()); +``` + +#### getSupportedKeySizes() : Array + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.getSupportedKeySizes()); +``` + +#### getIvSize() : Number + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.getIvSize()); +``` + +#### hasIv() : Boolean + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.hasIv()); +``` + +#### getAlgorithmName() : String + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.getAlgorithmName()); +``` + +#### getModeName() : String + +```javascript +var MCrypt = require('mcrypt').MCrypt; + +var blowfishCfb = new MCrypt('blowfish', 'cfb'); +console.log(blowfishCfb.getModeName()); +``` + diff --git a/build-mcrypt/node_modules/mcrypt/binding.gyp b/build-mcrypt/node_modules/mcrypt/binding.gyp new file mode 100644 index 0000000..11c9e01 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/binding.gyp @@ -0,0 +1,20 @@ +{ + "targets": [ + { + "target_name": "mcrypt", + "sources": [ + "src/mcrypt.cc" + ], + "include_dirs": [ + "/usr/include/", + "/opt/local/include/", + "/usr/local/Cellar/mcrypt/" + ], + "link_settings": { + "libraries": [ + "-lmcrypt" + ] + } + } + ] +} diff --git a/build-mcrypt/node_modules/mcrypt/build/Makefile b/build-mcrypt/node_modules/mcrypt/build/Makefile new file mode 100644 index 0000000..9a00afc --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/Makefile @@ -0,0 +1,332 @@ +# We borrow heavily from the kernel build setup, though we are simpler since +# we don't have Kconfig tweaking settings on us. + +# The implicit make rules have it looking for RCS files, among other things. +# We instead explicitly write all the rules we care about. +# It's even quicker (saves ~200ms) to pass -r on the command line. +MAKEFLAGS=-r + +# The source directory tree. +srcdir := .. +abs_srcdir := $(abspath $(srcdir)) + +# The name of the builddir. +builddir_name ?= . + +# The V=1 flag on command line makes us verbosely print command lines. +ifdef V + quiet= +else + quiet=quiet_ +endif + +# Specify BUILDTYPE=Release on the command line for a release build. +BUILDTYPE ?= Release + +# Directory all our build output goes into. +# Note that this must be two directories beneath src/ for unit tests to pass, +# as they reach into the src/ directory for data with relative paths. +builddir ?= $(builddir_name)/$(BUILDTYPE) +abs_builddir := $(abspath $(builddir)) +depsdir := $(builddir)/.deps + +# Object output directory. +obj := $(builddir)/obj +abs_obj := $(abspath $(obj)) + +# We build up a list of every single one of the targets so we can slurp in the +# generated dependency rule Makefiles in one pass. +all_deps := + + + +CC.target ?= $(CC) +CFLAGS.target ?= $(CFLAGS) +CXX.target ?= $(CXX) +CXXFLAGS.target ?= $(CXXFLAGS) +LINK.target ?= $(LINK) +LDFLAGS.target ?= $(LDFLAGS) +AR.target ?= $(AR) + +# C++ apps need to be linked with g++. +# +# Note: flock is used to seralize linking. Linking is a memory-intensive +# process so running parallel links can often lead to thrashing. To disable +# the serialization, override LINK via an envrionment variable as follows: +# +# export LINK=g++ +# +# This will allow make to invoke N linker processes as specified in -jN. +LINK ?= flock $(builddir)/linker.lock $(CXX.target) + +# TODO(evan): move all cross-compilation logic to gyp-time so we don't need +# to replicate this environment fallback in make as well. +CC.host ?= gcc +CFLAGS.host ?= +CXX.host ?= g++ +CXXFLAGS.host ?= +LINK.host ?= $(CXX.host) +LDFLAGS.host ?= +AR.host ?= ar + +# Define a dir function that can handle spaces. +# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions +# "leading spaces cannot appear in the text of the first argument as written. +# These characters can be put into the argument value by variable substitution." +empty := +space := $(empty) $(empty) + +# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces +replace_spaces = $(subst $(space),?,$1) +unreplace_spaces = $(subst ?,$(space),$1) +dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1))) + +# Flags to make gcc output dependency info. Note that you need to be +# careful here to use the flags that ccache and distcc can understand. +# We write to a dep file on the side first and then rename at the end +# so we can't end up with a broken dep file. +depfile = $(depsdir)/$(call replace_spaces,$@).d +DEPFLAGS = -MMD -MF $(depfile).raw + +# We have to fixup the deps output in a few ways. +# (1) the file output should mention the proper .o file. +# ccache or distcc lose the path to the target, so we convert a rule of +# the form: +# foobar.o: DEP1 DEP2 +# into +# path/to/foobar.o: DEP1 DEP2 +# (2) we want missing files not to cause us to fail to build. +# We want to rewrite +# foobar.o: DEP1 DEP2 \ +# DEP3 +# to +# DEP1: +# DEP2: +# DEP3: +# so if the files are missing, they're just considered phony rules. +# We have to do some pretty insane escaping to get those backslashes +# and dollar signs past make, the shell, and sed at the same time. +# Doesn't work with spaces, but that's fine: .d files have spaces in +# their names replaced with other characters. +define fixup_dep +# The depfile may not exist if the input file didn't have any #includes. +touch $(depfile).raw +# Fixup path as in (1). +sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile) +# Add extra rules as in (2). +# We remove slashes and replace spaces with new lines; +# remove blank lines; +# delete the first line and append a colon to the remaining lines. +sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\ + grep -v '^$$' |\ + sed -e 1d -e 's|$$|:|' \ + >> $(depfile) +rm $(depfile).raw +endef + +# Command definitions: +# - cmd_foo is the actual command to run; +# - quiet_cmd_foo is the brief-output summary of the command. + +quiet_cmd_cc = CC($(TOOLSET)) $@ +cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $< + +quiet_cmd_cxx = CXX($(TOOLSET)) $@ +cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< + +quiet_cmd_touch = TOUCH $@ +cmd_touch = touch $@ + +quiet_cmd_copy = COPY $@ +# send stderr to /dev/null to ignore messages when linking directories. +cmd_copy = rm -rf "$@" && cp -af "$<" "$@" + +quiet_cmd_alink = AR($(TOOLSET)) $@ +cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) + +quiet_cmd_alink_thin = AR($(TOOLSET)) $@ +cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) + +# Due to circular dependencies between libraries :(, we wrap the +# special "figure out circular dependencies" flags around the entire +# input list during linking. +quiet_cmd_link = LINK($(TOOLSET)) $@ +cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) + +# We support two kinds of shared objects (.so): +# 1) shared_library, which is just bundling together many dependent libraries +# into a link line. +# 2) loadable_module, which is generating a module intended for dlopen(). +# +# They differ only slightly: +# In the former case, we want to package all dependent code into the .so. +# In the latter case, we want to package just the API exposed by the +# outermost module. +# This means shared_library uses --whole-archive, while loadable_module doesn't. +# (Note that --whole-archive is incompatible with the --start-group used in +# normal linking.) + +# Other shared-object link notes: +# - Set SONAME to the library filename so our binaries don't reference +# the local, absolute paths used on the link command-line. +quiet_cmd_solink = SOLINK($(TOOLSET)) $@ +cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) + +quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ +cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) + + +# Define an escape_quotes function to escape single quotes. +# This allows us to handle quotes properly as long as we always use +# use single quotes and escape_quotes. +escape_quotes = $(subst ','\'',$(1)) +# This comment is here just to include a ' to unconfuse syntax highlighting. +# Define an escape_vars function to escape '$' variable syntax. +# This allows us to read/write command lines with shell variables (e.g. +# $LD_LIBRARY_PATH), without triggering make substitution. +escape_vars = $(subst $$,$$$$,$(1)) +# Helper that expands to a shell command to echo a string exactly as it is in +# make. This uses printf instead of echo because printf's behaviour with respect +# to escape sequences is more portable than echo's across different shells +# (e.g., dash, bash). +exact_echo = printf '%s\n' '$(call escape_quotes,$(1))' + +# Helper to compare the command we're about to run against the command +# we logged the last time we ran the command. Produces an empty +# string (false) when the commands match. +# Tricky point: Make has no string-equality test function. +# The kernel uses the following, but it seems like it would have false +# positives, where one string reordered its arguments. +# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \ +# $(filter-out $(cmd_$@), $(cmd_$(1)))) +# We instead substitute each for the empty string into the other, and +# say they're equal if both substitutions produce the empty string. +# .d files contain ? instead of spaces, take that into account. +command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\ + $(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1)))) + +# Helper that is non-empty when a prerequisite changes. +# Normally make does this implicitly, but we force rules to always run +# so we can check their command lines. +# $? -- new prerequisites +# $| -- order-only dependencies +prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?)) + +# Helper that executes all postbuilds until one fails. +define do_postbuilds + @E=0;\ + for p in $(POSTBUILDS); do\ + eval $$p;\ + E=$$?;\ + if [ $$E -ne 0 ]; then\ + break;\ + fi;\ + done;\ + if [ $$E -ne 0 ]; then\ + rm -rf "$@";\ + exit $$E;\ + fi +endef + +# do_cmd: run a command via the above cmd_foo names, if necessary. +# Should always run for a given target to handle command-line changes. +# Second argument, if non-zero, makes it do asm/C/C++ dependency munging. +# Third argument, if non-zero, makes it do POSTBUILDS processing. +# Note: We intentionally do NOT call dirx for depfile, since it contains ? for +# spaces already and dirx strips the ? characters. +define do_cmd +$(if $(or $(command_changed),$(prereq_changed)), + @$(call exact_echo, $($(quiet)cmd_$(1))) + @mkdir -p "$(call dirx,$@)" "$(dir $(depfile))" + $(if $(findstring flock,$(word 1,$(cmd_$1))), + @$(cmd_$(1)) + @echo " $(quiet_cmd_$(1)): Finished", + @$(cmd_$(1)) + ) + @$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile) + @$(if $(2),$(fixup_dep)) + $(if $(and $(3), $(POSTBUILDS)), + $(call do_postbuilds) + ) +) +endef + +# Declare the "all" target first so it is the default, +# even though we don't have the deps yet. +.PHONY: all +all: + +# make looks for ways to re-generate included makefiles, but in our case, we +# don't have a direct way. Explicitly telling make that it has nothing to do +# for them makes it go faster. +%.d: ; + +# Use FORCE_DO_CMD to force a target to run. Should be coupled with +# do_cmd. +.PHONY: FORCE_DO_CMD +FORCE_DO_CMD: + +TOOLSET := target +# Suffix rules, putting all outputs into $(obj). +$(obj).$(TOOLSET)/%.o: $(srcdir)/%.c FORCE_DO_CMD + @$(call do_cmd,cc,1) +$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(srcdir)/%.S FORCE_DO_CMD + @$(call do_cmd,cc,1) +$(obj).$(TOOLSET)/%.o: $(srcdir)/%.s FORCE_DO_CMD + @$(call do_cmd,cc,1) + +# Try building from generated source, too. +$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD + @$(call do_cmd,cc,1) +$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cxx FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.S FORCE_DO_CMD + @$(call do_cmd,cc,1) +$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.s FORCE_DO_CMD + @$(call do_cmd,cc,1) + +$(obj).$(TOOLSET)/%.o: $(obj)/%.c FORCE_DO_CMD + @$(call do_cmd,cc,1) +$(obj).$(TOOLSET)/%.o: $(obj)/%.cc FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(obj)/%.cpp FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(obj)/%.cxx FORCE_DO_CMD + @$(call do_cmd,cxx,1) +$(obj).$(TOOLSET)/%.o: $(obj)/%.S FORCE_DO_CMD + @$(call do_cmd,cc,1) +$(obj).$(TOOLSET)/%.o: $(obj)/%.s FORCE_DO_CMD + @$(call do_cmd,cc,1) + + +ifeq ($(strip $(foreach prefix,$(NO_LOAD),\ + $(findstring $(join ^,$(prefix)),\ + $(join ^,mcrypt.target.mk)))),) + include mcrypt.target.mk +endif + +quiet_cmd_regen_makefile = ACTION Regenerating $@ +cmd_regen_makefile = cd $(srcdir); /home/vagrant/.nvm/v0.10.36/lib/node_modules/npm/node_modules/node-gyp/gyp/gyp_main.py -fmake --ignore-environment "--toplevel-dir=." -I/home/vagrant/docker-workshop/build-php/node_modules/mcrypt/build/config.gypi -I/home/vagrant/.nvm/v0.10.36/lib/node_modules/npm/node_modules/node-gyp/addon.gypi -I/home/vagrant/.node-gyp/0.10.36/common.gypi "--depth=." "-Goutput_dir=." "--generator-output=build" "-Dlibrary=shared_library" "-Dvisibility=default" "-Dnode_root_dir=/home/vagrant/.node-gyp/0.10.36" "-Dmodule_root_dir=/home/vagrant/docker-workshop/build-php/node_modules/mcrypt" binding.gyp +Makefile: $(srcdir)/../../../../.node-gyp/0.10.36/common.gypi $(srcdir)/build/config.gypi $(srcdir)/binding.gyp $(srcdir)/../../../../.nvm/v0.10.36/lib/node_modules/npm/node_modules/node-gyp/addon.gypi + $(call do_cmd,regen_makefile) + +# "all" is a concatenation of the "all" targets from all the included +# sub-makefiles. This is just here to clarify. +all: + +# Add in dependency-tracking rules. $(all_deps) is the list of every single +# target in our tree. Only consider the ones with .d (dependency) info: +d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d)) +ifneq ($(d_files),) + include $(d_files) +endif diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/mcrypt.node.d b/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/mcrypt.node.d new file mode 100644 index 0000000..a845a6d --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/mcrypt.node.d @@ -0,0 +1 @@ +cmd_Release/mcrypt.node := rm -rf "Release/mcrypt.node" && cp -af "Release/obj.target/mcrypt.node" "Release/mcrypt.node" diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/obj.target/mcrypt.node.d b/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/obj.target/mcrypt.node.d new file mode 100644 index 0000000..0d6f2f4 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/obj.target/mcrypt.node.d @@ -0,0 +1 @@ +cmd_Release/obj.target/mcrypt.node := flock ./Release/linker.lock g++ -shared -pthread -rdynamic -m64 -Wl,-soname=mcrypt.node -o Release/obj.target/mcrypt.node -Wl,--start-group Release/obj.target/mcrypt/src/mcrypt.o -Wl,--end-group -lmcrypt diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/obj.target/mcrypt/src/mcrypt.o.d b/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/obj.target/mcrypt/src/mcrypt.o.d new file mode 100644 index 0000000..75a4c51 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/Release/.deps/Release/obj.target/mcrypt/src/mcrypt.o.d @@ -0,0 +1,24 @@ +cmd_Release/obj.target/mcrypt/src/mcrypt.o := g++ '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-DBUILDING_NODE_EXTENSION' -I/home/vagrant/.node-gyp/0.10.36/src -I/home/vagrant/.node-gyp/0.10.36/deps/uv/include -I/home/vagrant/.node-gyp/0.10.36/deps/v8/include -I/usr/include -I/opt/local/include -I/usr/local/Cellar/mcrypt -fPIC -Wall -Wextra -Wno-unused-parameter -pthread -m64 -O2 -fno-strict-aliasing -fno-tree-vrp -fno-tree-sink -fno-omit-frame-pointer -fno-rtti -fno-exceptions -MMD -MF ./Release/.deps/Release/obj.target/mcrypt/src/mcrypt.o.d.raw -c -o Release/obj.target/mcrypt/src/mcrypt.o ../src/mcrypt.cc +Release/obj.target/mcrypt/src/mcrypt.o: ../src/mcrypt.cc ../src/mcrypt.h \ + /home/vagrant/.node-gyp/0.10.36/src/node.h \ + /home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv.h \ + /home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv-private/uv-unix.h \ + /home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv-private/ngx-queue.h \ + /home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv-private/uv-linux.h \ + /home/vagrant/.node-gyp/0.10.36/deps/v8/include/v8.h \ + /home/vagrant/.node-gyp/0.10.36/deps/v8/include/v8stdint.h \ + /home/vagrant/.node-gyp/0.10.36/src/node_object_wrap.h \ + /home/vagrant/.node-gyp/0.10.36/src/node.h \ + /home/vagrant/.node-gyp/0.10.36/src/node_buffer.h +../src/mcrypt.cc: +../src/mcrypt.h: +/home/vagrant/.node-gyp/0.10.36/src/node.h: +/home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv.h: +/home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv-private/uv-unix.h: +/home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv-private/ngx-queue.h: +/home/vagrant/.node-gyp/0.10.36/deps/uv/include/uv-private/uv-linux.h: +/home/vagrant/.node-gyp/0.10.36/deps/v8/include/v8.h: +/home/vagrant/.node-gyp/0.10.36/deps/v8/include/v8stdint.h: +/home/vagrant/.node-gyp/0.10.36/src/node_object_wrap.h: +/home/vagrant/.node-gyp/0.10.36/src/node.h: +/home/vagrant/.node-gyp/0.10.36/src/node_buffer.h: diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/linker.lock b/build-mcrypt/node_modules/mcrypt/build/Release/linker.lock new file mode 100644 index 0000000..e69de29 diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/mcrypt.node b/build-mcrypt/node_modules/mcrypt/build/Release/mcrypt.node new file mode 100755 index 0000000..d47e24e Binary files /dev/null and b/build-mcrypt/node_modules/mcrypt/build/Release/mcrypt.node differ diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/obj.target/mcrypt.node b/build-mcrypt/node_modules/mcrypt/build/Release/obj.target/mcrypt.node new file mode 100755 index 0000000..d47e24e Binary files /dev/null and b/build-mcrypt/node_modules/mcrypt/build/Release/obj.target/mcrypt.node differ diff --git a/build-mcrypt/node_modules/mcrypt/build/Release/obj.target/mcrypt/src/mcrypt.o b/build-mcrypt/node_modules/mcrypt/build/Release/obj.target/mcrypt/src/mcrypt.o new file mode 100644 index 0000000..6717cf9 Binary files /dev/null and b/build-mcrypt/node_modules/mcrypt/build/Release/obj.target/mcrypt/src/mcrypt.o differ diff --git a/build-mcrypt/node_modules/mcrypt/build/binding.Makefile b/build-mcrypt/node_modules/mcrypt/build/binding.Makefile new file mode 100644 index 0000000..e9e12d1 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/binding.Makefile @@ -0,0 +1,6 @@ +# This file is generated by gyp; do not edit. + +export builddir_name ?= ./build/. +.PHONY: all +all: + $(MAKE) mcrypt diff --git a/build-mcrypt/node_modules/mcrypt/build/config.gypi b/build-mcrypt/node_modules/mcrypt/build/config.gypi new file mode 100644 index 0000000..78b643c --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/config.gypi @@ -0,0 +1,122 @@ +# Do not edit. File was generated by node-gyp's "configure" step +{ + "target_defaults": { + "cflags": [], + "default_configuration": "Release", + "defines": [], + "include_dirs": [], + "libraries": [] + }, + "variables": { + "clang": 0, + "gcc_version": 44, + "host_arch": "x64", + "node_install_npm": "true", + "node_prefix": "/", + "node_shared_cares": "false", + "node_shared_http_parser": "false", + "node_shared_libuv": "false", + "node_shared_openssl": "false", + "node_shared_v8": "false", + "node_shared_zlib": "false", + "node_tag": "", + "node_unsafe_optimizations": 0, + "node_use_dtrace": "false", + "node_use_etw": "false", + "node_use_openssl": "true", + "node_use_perfctr": "false", + "node_use_systemtap": "false", + "openssl_no_asm": 0, + "python": "/data/opt/bin/python", + "target_arch": "x64", + "v8_enable_gdbjit": 0, + "v8_no_strict_aliasing": 1, + "v8_use_snapshot": "false", + "want_separate_host_toolset": 0, + "nodedir": "/home/vagrant/.node-gyp/0.10.36", + "copy_dev_lib": "true", + "standalone_static_library": 1, + "cache_lock_stale": "60000", + "sign_git_tag": "", + "user_agent": "npm/1.4.28 node/v0.10.36 linux x64", + "always_auth": "", + "bin_links": "true", + "key": "", + "description": "true", + "fetch_retries": "2", + "heading": "npm", + "user": "1000", + "force": "", + "cache_min": "10", + "init_license": "ISC", + "editor": "vi", + "rollback": "true", + "cache_max": "Infinity", + "userconfig": "/home/vagrant/.npmrc", + "engine_strict": "", + "init_author_name": "", + "init_author_url": "", + "tmp": "/tmp", + "depth": "Infinity", + "save_dev": "", + "usage": "", + "cafile": "", + "https_proxy": "", + "onload_script": "", + "rebuild_bundle": "true", + "save_bundle": "", + "shell": "/bin/bash", + "prefix": "/home/vagrant/.nvm/v0.10.36", + "registry": "https://registry.npmjs.org/", + "browser": "", + "cache_lock_wait": "10000", + "save_optional": "", + "searchopts": "", + "versions": "", + "cache": "/home/vagrant/.npm", + "ignore_scripts": "", + "searchsort": "name", + "version": "", + "local_address": "", + "viewer": "man", + "color": "true", + "fetch_retry_mintimeout": "10000", + "umask": "2", + "fetch_retry_maxtimeout": "60000", + "message": "%s", + "ca": "", + "cert": "", + "global": "", + "link": "", + "save": "", + "unicode": "true", + "long": "", + "production": "", + "unsafe_perm": "true", + "node_version": "0.10.36", + "tag": "latest", + "git_tag_version": "true", + "shrinkwrap": "true", + "fetch_retry_factor": "10", + "npat": "", + "proprietary_attribs": "true", + "save_exact": "", + "strict_ssl": "true", + "username": "", + "dev": "", + "globalconfig": "/home/vagrant/.nvm/v0.10.36/etc/npmrc", + "init_module": "/home/vagrant/.npm-init.js", + "parseable": "", + "globalignorefile": "/home/vagrant/.nvm/v0.10.36/etc/npmignore", + "cache_lock_retries": "10", + "save_prefix": "^", + "group": "1000", + "init_author_email": "", + "searchexclude": "", + "git": "git", + "optional": "true", + "email": "", + "json": "", + "spin": "true" + } +} diff --git a/build-mcrypt/node_modules/mcrypt/build/mcrypt.target.mk b/build-mcrypt/node_modules/mcrypt/build/mcrypt.target.mk new file mode 100644 index 0000000..0673171 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/build/mcrypt.target.mk @@ -0,0 +1,138 @@ +# This file is generated by gyp; do not edit. + +TOOLSET := target +TARGET := mcrypt +DEFS_Debug := \ + '-D_LARGEFILE_SOURCE' \ + '-D_FILE_OFFSET_BITS=64' \ + '-DBUILDING_NODE_EXTENSION' \ + '-DDEBUG' \ + '-D_DEBUG' + +# Flags passed to all source files. +CFLAGS_Debug := \ + -fPIC \ + -Wall \ + -Wextra \ + -Wno-unused-parameter \ + -pthread \ + -m64 \ + -g \ + -O0 + +# Flags passed to only C files. +CFLAGS_C_Debug := + +# Flags passed to only C++ files. +CFLAGS_CC_Debug := \ + -fno-rtti \ + -fno-exceptions + +INCS_Debug := \ + -I/home/vagrant/.node-gyp/0.10.36/src \ + -I/home/vagrant/.node-gyp/0.10.36/deps/uv/include \ + -I/home/vagrant/.node-gyp/0.10.36/deps/v8/include \ + -I/usr/include \ + -I/opt/local/include \ + -I/usr/local/Cellar/mcrypt + +DEFS_Release := \ + '-D_LARGEFILE_SOURCE' \ + '-D_FILE_OFFSET_BITS=64' \ + '-DBUILDING_NODE_EXTENSION' + +# Flags passed to all source files. +CFLAGS_Release := \ + -fPIC \ + -Wall \ + -Wextra \ + -Wno-unused-parameter \ + -pthread \ + -m64 \ + -O2 \ + -fno-strict-aliasing \ + -fno-tree-vrp \ + -fno-tree-sink \ + -fno-omit-frame-pointer + +# Flags passed to only C files. +CFLAGS_C_Release := + +# Flags passed to only C++ files. +CFLAGS_CC_Release := \ + -fno-rtti \ + -fno-exceptions + +INCS_Release := \ + -I/home/vagrant/.node-gyp/0.10.36/src \ + -I/home/vagrant/.node-gyp/0.10.36/deps/uv/include \ + -I/home/vagrant/.node-gyp/0.10.36/deps/v8/include \ + -I/usr/include \ + -I/opt/local/include \ + -I/usr/local/Cellar/mcrypt + +OBJS := \ + $(obj).target/$(TARGET)/src/mcrypt.o + +# Add to the list of files we specially track dependencies for. +all_deps += $(OBJS) + +# CFLAGS et al overrides must be target-local. +# See "Target-specific Variable Values" in the GNU Make manual. +$(OBJS): TOOLSET := $(TOOLSET) +$(OBJS): GYP_CFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE)) +$(OBJS): GYP_CXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE)) + +# Suffix rules, putting all outputs into $(obj). + +$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD + @$(call do_cmd,cxx,1) + +# Try building from generated source, too. + +$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD + @$(call do_cmd,cxx,1) + +$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD + @$(call do_cmd,cxx,1) + +# End of this set of suffix rules +### Rules for final target. +LDFLAGS_Debug := \ + -pthread \ + -rdynamic \ + -m64 + +LDFLAGS_Release := \ + -pthread \ + -rdynamic \ + -m64 + +LIBS := \ + -lmcrypt + +$(obj).target/mcrypt.node: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE)) +$(obj).target/mcrypt.node: LIBS := $(LIBS) +$(obj).target/mcrypt.node: TOOLSET := $(TOOLSET) +$(obj).target/mcrypt.node: $(OBJS) FORCE_DO_CMD + $(call do_cmd,solink_module) + +all_deps += $(obj).target/mcrypt.node +# Add target alias +.PHONY: mcrypt +mcrypt: $(builddir)/mcrypt.node + +# Copy this to the executable output path. +$(builddir)/mcrypt.node: TOOLSET := $(TOOLSET) +$(builddir)/mcrypt.node: $(obj).target/mcrypt.node FORCE_DO_CMD + $(call do_cmd,copy) + +all_deps += $(builddir)/mcrypt.node +# Short alias for building this executable. +.PHONY: mcrypt.node +mcrypt.node: $(obj).target/mcrypt.node $(builddir)/mcrypt.node + +# Add executable to "all" target. +.PHONY: all +all: $(builddir)/mcrypt.node + diff --git a/build-mcrypt/node_modules/mcrypt/package.json b/build-mcrypt/node_modules/mcrypt/package.json new file mode 100644 index 0000000..9b41174 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/package.json @@ -0,0 +1,54 @@ +{ + "name": "mcrypt", + "version": "0.0.11", + "description": "MCrypt bindings", + "keywords": [ + "mcrypt", + "crypto" + ], + "homepage": "https://github.com/tugrul/node-mcrypt", + "author": { + "name": "Tuğrul Topuz", + "email": "tugrultopuz@gmail.com" + }, + "bugs": { + "url": "https://github.com/tugrul/node-mcrypt/issues" + }, + "license": "MIT", + "repository": { + "type": "git", + "url": "https://github.com/tugrul/node-mcrypt.git" + }, + "scripts": { + "install": "node-gyp rebuild --release", + "preuninstall": "rm -rf build/*" + }, + "engines": { + "node": ">=0.8.0" + }, + "devDependencies": { + "node-gyp": "*" + }, + "main": "./build/Release/mcrypt", + "gitHead": "d094183f37e712fb1547aa43b0ab89721a8bc095", + "_id": "mcrypt@0.0.11", + "_shasum": "7ebba09ce002343e59615079039655d92e2d9b24", + "_from": "mcrypt@*", + "_npmVersion": "1.4.14", + "_npmUser": { + "name": "tugrul", + "email": "tugrultopuz@gmail.com" + }, + "maintainers": [ + { + "name": "tugrul", + "email": "tugrultopuz@gmail.com" + } + ], + "dist": { + "shasum": "7ebba09ce002343e59615079039655d92e2d9b24", + "tarball": "http://registry.npmjs.org/mcrypt/-/mcrypt-0.0.11.tgz" + }, + "directories": {}, + "_resolved": "https://registry.npmjs.org/mcrypt/-/mcrypt-0.0.11.tgz" +} diff --git a/build-mcrypt/node_modules/mcrypt/src/mcrypt.cc b/build-mcrypt/node_modules/mcrypt/src/mcrypt.cc new file mode 100644 index 0000000..cff6b3b --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/src/mcrypt.cc @@ -0,0 +1,564 @@ + + +#include "mcrypt.h" + +using namespace v8; + +Persistent MCrypt::constructor; + +MCrypt::MCrypt(const Arguments& args): + checkKeySize(true), + checkIvSize(true), + algo(args[0]), + mode(args[1]), + algoDir(args[2]), + modeDir(args[3]) { + + mcrypt_ = mcrypt_module_open(*algo, *algoDir, *mode, *modeDir); +}; + +MCrypt::~MCrypt() { + mcrypt_module_close(mcrypt_); +}; + +template +node::Buffer* MCrypt::transform(const char* plainText, const size_t length, int* result) { + size_t targetLength = length; + + // determine allocation size if the cipher algorithm is block mode + // block mode algorithm needs to fit in modulus of block size + // and it needs to padding space if not fit into block size + if (mcrypt_enc_is_block_algorithm(mcrypt_) == 1) { + size_t blockSize = mcrypt_enc_get_block_size(mcrypt_); + targetLength = (((length - 1) / blockSize) + 1) * blockSize; + } + + char* targetData = new char[targetLength](); + std::copy(plainText, plainText + length, targetData); + + // create a dummy object to return on fail result + node::Buffer* cipherText = node::Buffer::New(1); + + // copy of the key and iv due to mcrypt_generic_init not accepts + // const char for key and iv. direct passing is not safe because + // iv and key could be modified by mcrypt_generic_init in this case + char keyBuf[key.length()]; + key.copy(keyBuf, key.length()); + + char ivBuf[iv.length()]; + iv.copy(ivBuf, iv.length()); + + if ((*result = mcrypt_generic_init(mcrypt_, keyBuf, key.length(), ivBuf)) < 0) { + delete[] targetData; + + return cipherText; + } + + if ((*result = modify(mcrypt_, targetData, targetLength)) != 0) { + delete[] targetData; + + return cipherText; + } + + if ((*result = mcrypt_generic_deinit(mcrypt_)) < 0) { + delete[] targetData; + + return cipherText; + } + + cipherText = node::Buffer::New(targetData, targetLength); + + delete[] targetData; + + return cipherText; +} + +std::vector MCrypt::getKeySizes() { + + int count = 0; + int* sizes = mcrypt_enc_get_supported_key_sizes(mcrypt_, &count); + + if (count <= 0) { + mcrypt_free(sizes); + + size_t size = mcrypt_enc_get_key_size(mcrypt_); + + if (size > 0) { + std::vector keySizes(1); + keySizes[0] = size; + return keySizes; + } + + std::vector keySizes(0); + return keySizes; + } + + std::vector keySizes(count); + + for (int i = 0; i < count; i++) { + keySizes[i] = sizes[i]; + } + + mcrypt_free(sizes); + + return keySizes; +} + +NODE_MCRYPT_METHOD(New) { + HandleScope scope; + + if (!args.IsConstructCall()) { + Local argv[] = {args[0], args[1], args[2], args[3]}; + return scope.Close(constructor->NewInstance(4, argv)); + } + + if (args.Length() < 2) { + return ThrowException(Exception::TypeError(String::New("Missing parameters. Algorithm and mode should be specified."))); + } + + MCrypt* mcrypt = new MCrypt(args); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + mcrypt->Wrap(args.This()); + + return args.This(); +} + +NODE_MCRYPT_METHOD(Open) { + HandleScope scope; + + if (args.Length() < 1) { + return ThrowException(Exception::TypeError(String::New("Missing parameter. Key should be specified."))); + } + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + if (args[0]->IsString()) { + String::Utf8Value value(args[0]); + + mcrypt->key = std::string(*value, value.length()); + + } else if (node::Buffer::HasInstance(args[0])) { + + mcrypt->key = std::string(node::Buffer::Data(args[0]), node::Buffer::Length(args[0])); + + } else { + return ThrowException(Exception::TypeError(String::New("Key has got incorrect type. Should be Buffer or String."))); + } + + if (mcrypt->checkKeySize) { + std::vector keySizes = mcrypt->getKeySizes(); + + if (keySizes.size() > 0) { + + bool invalid = true; + + std::stringstream serror; + + serror << "Invalid key size. Available key size are ["; + + for(size_t i = 0; i < keySizes.size(); i++) { + + if (i != 0) { + serror << ", "; + } + + serror << keySizes[i]; + + if (keySizes[i] == mcrypt->key.length()) { + invalid = false; + } + } + + serror << "]"; + + std::string error = serror.str(); + + if (invalid) { + return ThrowException(Exception::TypeError(String::New(error.c_str(), error.length()))); + } + } + } + + if (args[1]->IsUndefined()) { + return scope.Close(Undefined()); + } + + size_t ivLen; + + if (args[1]->IsString()) { + + String::Utf8Value value(args[1]); + + ivLen = value.length(); + mcrypt->iv = std::string(*value, ivLen); + + } else if (node::Buffer::HasInstance(args[1])) { + + ivLen = node::Buffer::Length(args[1]); + mcrypt->iv = std::string(node::Buffer::Data(args[1]), ivLen); + } else { + return ThrowException(Exception::TypeError(String::New("Iv has got incorrect type. Should be Buffer or String."))); + } + + if (mcrypt->checkIvSize) { + if ((size_t)mcrypt_enc_get_iv_size(mcrypt->mcrypt_) != ivLen) { + return ThrowException(Exception::TypeError(String::New("Invalid iv size. You can determine iv size using getIvSize()"))); + } + } + + return scope.Close(Undefined()); +} + +NODE_MCRYPT_METHOD(Encrypt) { + HandleScope scope; + + if (args.Length() < 1) { + return ThrowException(Exception::TypeError(String::New("Missing parameter. Plaintext should be specified."))); + } + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + int result = 0; + node::Buffer* cipherText = node::Buffer::New(1); + + if (args[0]->IsString()) { + + String::Utf8Value value(args[0]); + cipherText = mcrypt->transform(*value, value.length(), &result); + + } else if(node::Buffer::HasInstance(args[0])) { + + cipherText = mcrypt->transform(node::Buffer::Data(args[0]), node::Buffer::Length(args[0]), &result); + + } else { + return ThrowException(Exception::TypeError(String::New("Plaintext has got incorrect type. Should be Buffer or String."))); + } + + if (result != 0) { + const char* error = mcrypt_strerror(result); + return ThrowException(Exception::Error(String::New(error))); + } + + return scope.Close(cipherText->handle_); +} + +NODE_MCRYPT_METHOD(Decrypt) { + HandleScope scope; + + if (args.Length() < 1) { + return ThrowException(Exception::TypeError(String::New("Missing parameter. Plaintext should be specified."))); + } + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + int result = 0; + node::Buffer* plainText = node::Buffer::New(1); + + if (args[0]->IsString()) { + + String::Utf8Value value(args[0]); + plainText = mcrypt->transform(*value, value.length(), &result); + + } else if (node::Buffer::HasInstance(args[0])) { + + plainText = mcrypt->transform(node::Buffer::Data(args[0]), node::Buffer::Length(args[0]), &result); + + } else { + return ThrowException(Exception::TypeError(String::New("Ciphertext has got incorrect type. Should be Buffer or String."))); + } + + if (result != 0) { + const char* error = mcrypt_strerror(result); + return ThrowException(Exception::Error(String::New(error))); + } + + return scope.Close(plainText->handle_); +} + +NODE_MCRYPT_METHOD(ValidateKeySize) { + HandleScope scope; + + if(args.Length() == 0) { + return scope.Close(Undefined()); + } + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + Local state = args[0]->ToBoolean(); + mcrypt->checkKeySize = state->Value(); + + return scope.Close(Undefined()); +} + +NODE_MCRYPT_METHOD(ValidateIvSize) { + HandleScope scope; + + if(args.Length() == 0) { + return scope.Close(Undefined()); + } + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + Local state = args[0]->ToBoolean(); + mcrypt->checkIvSize = state->Value(); + + return scope.Close(Undefined()); +} + +NODE_MCRYPT_METHOD(SelfTest) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + if (mcrypt_enc_self_test(mcrypt->mcrypt_) == 0) { + return scope.Close(True()); + } + + return scope.Close(False()); +} + +NODE_MCRYPT_METHOD(IsBlockAlgorithmMode) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + if (mcrypt_enc_is_block_algorithm_mode(mcrypt->mcrypt_) == 1) { + return scope.Close(True()); + } + + return scope.Close(False()); +} + +NODE_MCRYPT_METHOD(IsBlockAlgorithm) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + if (mcrypt_enc_is_block_algorithm(mcrypt->mcrypt_) == 1) { + return scope.Close(True()); + } + + return scope.Close(False()); +} + +NODE_MCRYPT_METHOD(IsBlockMode) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + if (mcrypt_enc_is_block_mode(mcrypt->mcrypt_) == 1) { + return scope.Close(True()); + } + + return scope.Close(False()); +} + +NODE_MCRYPT_METHOD(GetBlockSize) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + int blockSize = mcrypt_enc_get_block_size(mcrypt->mcrypt_); + + return scope.Close(Number::New(blockSize)); +} + +NODE_MCRYPT_METHOD(GetKeySize) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + int keySize = mcrypt_enc_get_key_size(mcrypt->mcrypt_); + + return scope.Close(Number::New(keySize)); +} + +NODE_MCRYPT_METHOD(GetSupportedKeySizes) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + std::vector keySizes = mcrypt->getKeySizes(); + + Handle array = Array::New(keySizes.size()); + + for (size_t i = 0; i < keySizes.size(); i++) { + array->Set(i, Number::New(keySizes[i])); + } + + return scope.Close(array); +} + +NODE_MCRYPT_METHOD(GetIvSize) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + int ivSize = mcrypt_enc_get_iv_size(mcrypt->mcrypt_); + + return scope.Close(Number::New(ivSize)); +} + +NODE_MCRYPT_METHOD(HasIv) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + if (mcrypt_enc_mode_has_iv(mcrypt->mcrypt_) == 1) { + return scope.Close(True()); + } + + return scope.Close(False()); +} + +NODE_MCRYPT_METHOD(GetAlgorithmName) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + char* name = mcrypt_enc_get_algorithms_name(mcrypt->mcrypt_); + Handle ret = String::New(name); + mcrypt_free(name); + + return scope.Close(ret); +} + +NODE_MCRYPT_METHOD(GetModeName) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + char* name = mcrypt_enc_get_modes_name(mcrypt->mcrypt_); + Handle ret = String::New(name); + mcrypt_free(name); + + return scope.Close(ret); +} + +NODE_MCRYPT_METHOD(GenerateIv) { + HandleScope scope; + + MCrypt* mcrypt = ObjectWrap::Unwrap(args.This()); + + MCRYPT_MODULE_ERROR_CHECK(mcrypt) + + int ivSize = mcrypt_enc_get_iv_size(mcrypt->mcrypt_); + + node::Buffer* buffer = node::Buffer::New(ivSize); + + char* iv = node::Buffer::Data(buffer); + + while(ivSize) { + iv[--ivSize] = 255.0 * std::rand() / RAND_MAX; + } + + return scope.Close(buffer->handle_); +} + +NODE_MCRYPT_METHOD(GetAlgorithmNames) { + HandleScope scope; + + String::Utf8Value path(args[0]); + + int size = 0; + char** algos = mcrypt_list_algorithms(*path, &size); + + Handle array = Array::New(size); + + if (array.IsEmpty()) { + return Handle(); + } + + for (int i = 0; i < size; i++) { + array->Set(i, String::New(algos[i])); + } + + mcrypt_free_p(algos, size); + + return scope.Close(array); +} + +NODE_MCRYPT_METHOD(GetModeNames) { + HandleScope scope; + + String::Utf8Value path(args[0]); + + int size = 0; + char** modes = mcrypt_list_modes(*path, &size); + + Handle array = Array::New(size); + + if (array.IsEmpty()) + return Handle(); + + for (int i = 0; i < size; i++) { + array->Set(i, String::New(modes[i])); + } + + mcrypt_free_p(modes, size); + + return scope.Close(array); +} + +void MCrypt::Init(Handle exports) { + Local tpl = FunctionTemplate::New(New); + tpl->SetClassName(String::NewSymbol("MCrypt")); + tpl->InstanceTemplate()->SetInternalFieldCount(1); + + Local prototype = tpl->PrototypeTemplate(); + + // prototype + prototype->Set(String::NewSymbol("encrypt"), FunctionTemplate::New(Encrypt)->GetFunction()); + prototype->Set(String::NewSymbol("decrypt"), FunctionTemplate::New(Decrypt)->GetFunction()); + prototype->Set(String::NewSymbol("open"), FunctionTemplate::New(Open)->GetFunction()); + prototype->Set(String::NewSymbol("validateKeySize"), FunctionTemplate::New(ValidateKeySize)->GetFunction()); + prototype->Set(String::NewSymbol("validateIvSize"), FunctionTemplate::New(ValidateIvSize)->GetFunction()); + prototype->Set(String::NewSymbol("selfTest"), FunctionTemplate::New(SelfTest)->GetFunction()); + prototype->Set(String::NewSymbol("isBlockAlgorithmMode"), FunctionTemplate::New(IsBlockAlgorithmMode)->GetFunction()); + prototype->Set(String::NewSymbol("isBlockAlgorithm"), FunctionTemplate::New(IsBlockAlgorithm)->GetFunction()); + prototype->Set(String::NewSymbol("isBlockMode"), FunctionTemplate::New(IsBlockMode)->GetFunction()); + prototype->Set(String::NewSymbol("getBlockSize"), FunctionTemplate::New(GetBlockSize)->GetFunction()); + prototype->Set(String::NewSymbol("getKeySize"), FunctionTemplate::New(GetKeySize)->GetFunction()); + prototype->Set(String::NewSymbol("getSupportedKeySizes"), FunctionTemplate::New(GetSupportedKeySizes)->GetFunction()); + prototype->Set(String::NewSymbol("getIvSize"), FunctionTemplate::New(GetIvSize)->GetFunction()); + prototype->Set(String::NewSymbol("hasIv"), FunctionTemplate::New(HasIv)->GetFunction()); + prototype->Set(String::NewSymbol("getAlgorithmName"), FunctionTemplate::New(GetAlgorithmName)->GetFunction()); + prototype->Set(String::NewSymbol("getModeName"), FunctionTemplate::New(GetModeName)->GetFunction()); + prototype->Set(String::NewSymbol("generateIv"), FunctionTemplate::New(GenerateIv)->GetFunction()); + + // exports + constructor = Persistent::New(tpl->GetFunction()); + exports->Set(String::NewSymbol("MCrypt"), constructor); + exports->Set(String::NewSymbol("getAlgorithmNames"), FunctionTemplate::New(GetAlgorithmNames)->GetFunction()); + exports->Set(String::NewSymbol("getModeNames"), FunctionTemplate::New(GetModeNames)->GetFunction()); +} + +NODE_MODULE(mcrypt, MCrypt::Init) diff --git a/build-mcrypt/node_modules/mcrypt/src/mcrypt.h b/build-mcrypt/node_modules/mcrypt/src/mcrypt.h new file mode 100644 index 0000000..1b63e67 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/src/mcrypt.h @@ -0,0 +1,73 @@ + +#ifndef SRC_NODE_MCRYPT_H_ +#define SRC_NODE_MCRYPT_H_ + +#include +#include +#include + +#include +#include +#include + +#define MCRYPT_MODULE_ERROR_CHECK(mcrypt) if (mcrypt->mcrypt_ == MCRYPT_FAILED) { \ + return ThrowException(Exception::ReferenceError(String::New("MCrypt module could not open"))); \ + } + +#define NODE_MCRYPT_METHOD_PROTO(MethodName) static Handle MethodName(const Arguments& args) +#define NODE_MCRYPT_METHOD(MethodName) Handle MCrypt::MethodName(const Arguments& args) + +using namespace v8; + +class MCrypt : public node::ObjectWrap { + public: + static void Init(Handle exports); + + private: + MCrypt(const Arguments& args); + ~MCrypt(); + + template + node::Buffer* transform(const char* plainText, const size_t length, int* result); + + std::vector getKeySizes(); + + static Persistent constructor; + + NODE_MCRYPT_METHOD_PROTO(New); + NODE_MCRYPT_METHOD_PROTO(Encrypt); + NODE_MCRYPT_METHOD_PROTO(Decrypt); + NODE_MCRYPT_METHOD_PROTO(Open); + NODE_MCRYPT_METHOD_PROTO(ValidateKeySize); + NODE_MCRYPT_METHOD_PROTO(ValidateIvSize); + NODE_MCRYPT_METHOD_PROTO(SelfTest); + NODE_MCRYPT_METHOD_PROTO(IsBlockAlgorithmMode); + NODE_MCRYPT_METHOD_PROTO(IsBlockAlgorithm); + NODE_MCRYPT_METHOD_PROTO(IsBlockMode); + NODE_MCRYPT_METHOD_PROTO(GetBlockSize); + NODE_MCRYPT_METHOD_PROTO(GetKeySize); + NODE_MCRYPT_METHOD_PROTO(GetSupportedKeySizes); + NODE_MCRYPT_METHOD_PROTO(GetIvSize); + NODE_MCRYPT_METHOD_PROTO(HasIv); + NODE_MCRYPT_METHOD_PROTO(GetAlgorithmName); + NODE_MCRYPT_METHOD_PROTO(GetModeName); + NODE_MCRYPT_METHOD_PROTO(GenerateIv); + NODE_MCRYPT_METHOD_PROTO(Close); + + NODE_MCRYPT_METHOD_PROTO(GetAlgorithmNames); + NODE_MCRYPT_METHOD_PROTO(GetModeNames); + + MCRYPT mcrypt_; + std::string key; + std::string iv; + + bool checkKeySize; + bool checkIvSize; + + String::AsciiValue algo; + String::AsciiValue mode; + String::AsciiValue algoDir; + String::AsciiValue modeDir; +}; + +#endif // ~ SRC_NODE_MCRYPT_H_ diff --git a/build-mcrypt/node_modules/mcrypt/test.js b/build-mcrypt/node_modules/mcrypt/test.js new file mode 100644 index 0000000..fad21a8 --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/test.js @@ -0,0 +1,15 @@ + + +var MCrypt = require('./build/Debug/mcrypt.node').MCrypt; + +var rijndael = new MCrypt('rijndael-128', 'cbc'); +rijndael.open('aaaabbbbccccdddd'); + +// 0.0.7 output: +// 8cc419b7cb0f744dbada17858ea84d00 +// Correct! equals with php mcrypt module +// but 0.0.10 output: +// b2af83d35ab9b920bcdbb3ae1425f454 +console.log(rijndael.encrypt('123456').toString('hex')); + + diff --git a/build-mcrypt/node_modules/mcrypt/test/test.js b/build-mcrypt/node_modules/mcrypt/test/test.js new file mode 100755 index 0000000..75e53dd --- /dev/null +++ b/build-mcrypt/node_modules/mcrypt/test/test.js @@ -0,0 +1,197 @@ + +var mcrypt = require('../build/Release/mcrypt'); +var assert = require('assert'); + +describe('MCrypt', function() { + + describe('getAlgorithmNames', function() { + it('should be a function', function() { + assert(typeof mcrypt.getAlgorithmNames == 'function', 'there is getAlgorithmNames function'); + }); + + it('should return an array', function() { + assert(mcrypt.getAlgorithmNames() instanceof Array, 'there is an array'); + }); + + it('should some values in the array', function() { + assert(mcrypt.getAlgorithmNames().length > 0, 'there are some values'); + }); + + it('should be DES value in the array', function() { + assert(mcrypt.getAlgorithmNames().indexOf('des') > -1, 'there is the des value'); + }); + }); + + describe('getModeNames', function(){ + it('should be a function', function() { + assert(typeof mcrypt.getModeNames == 'function', 'there is getModeNames function'); + }); + + it('should return an array', function() { + assert(mcrypt.getModeNames() instanceof Array, 'there is an array'); + }); + + it('should some values in the array', function() { + assert(mcrypt.getModeNames().length > 0, 'there are some values'); + }); + + it('should be ECB value in the array', function() { + assert(mcrypt.getModeNames().indexOf('ecb') > -1, 'there is the ecb value'); + }); + }); + + + describe('constructor', function() { + it('should be a constructor function', function() { + assert(typeof mcrypt.MCrypt == 'function', 'there is MCrypt constructor function'); + }); + + it('should throw exception without parameters', function() { + assert.throws(function(){ + new mcrypt.MCrypt(); // without algo and mode + }, TypeError, 'there is a TypeError when parameters are missed'); + }); + + it('should throw exception with less parameter', function(){ + assert.throws(function(){ + new mcrypt.MCrypt('des'); // without mode + }); + }); + + it('should throw exception with wrong parameters', function() { + assert.throws(function(){ + new mcrypt.MCrypt('deso', 'ecb'); // invalid algo name + }, ReferenceError, 'there is a ReferenceError when parameters are wrong'); + + assert.throws(function(){ + new mcrypt.MCrypt('des', 'ebo'); // invalid mode name + }, ReferenceError, 'there is a ReferenceError when parameters are wrong'); + }); + + it('should not throw exception with correct parameters', function() { + assert.doesNotThrow(function() { + new mcrypt.MCrypt('des', 'ecb'); + }, 'there is no error when parameters are correct'); + }); + }); + + describe('MCrypt instance (BLOWFISH-ECB)', function() { + var mc = new mcrypt.MCrypt('blowfish', 'ecb'); + + it('should be an object', function() { + assert(typeof mc == 'object', 'there is an object'); + }); + + describe('open', function() { + var key = 'typeconfig.sys^_-'; + + it('should open without error', function() { + assert.doesNotThrow(function() { + mc.validateKeySize(false); + mc.open(key); + }, 'there is error when opened with key'); + }); + }); + }); + + describe('MCrypt instance (DES-ECB)', function() { + var mc = new mcrypt.MCrypt('des', 'ecb'); + + it('should be an object', function() { + assert(typeof mc == "object", 'there is an object'); + }); + + describe('open', function() { + var key = 'madepass'; + var plaintext = 'top secret information!!'; + var ciphertext = '7Bg68sriLJuKCYPl1NmSwGrBrE0E5I+T'; + + it('should open without error', function(){ + assert.doesNotThrow(function(){ + mc.open(key); + }, 'there is error when opened with key'); + }); + + describe('encrypt', function() { + it('plaintext and decrypted ciphertext should be same', function(){ + assert.equal(ciphertext, mc.encrypt(plaintext).toString('base64'), 'ciphertext are not same'); + }); + + }); + + describe('decrypt', function() { + it('ciphertext and encrypted plaintext should be same', function(){ + assert.equal(plaintext, mc.decrypt(new Buffer(ciphertext, 'base64')).toString(), 'plaintext are not same'); + }); + }); + + describe('selfTest', function(){ + it('should return true', function(){ + assert(mc.selfTest(), 'return value is not true'); + }); + }); + + describe('isBlockAlgorithmMode', function(){ + it('should return true', function(){ + assert(mc.isBlockAlgorithmMode(), 'return value is not true'); + }); + }); + + describe('isBlockAlgorithm', function(){ + it('should return true', function(){ + assert(mc.isBlockAlgorithm(), 'return value is not true'); + }); + }); + + describe('isBlockMode', function(){ + it('should return true', function(){ + assert(mc.isBlockMode(), 'return value is not true'); + }); + }); + + describe('getKeySize', function(){ + it('should equal to keysize of algorithm', function(){ + assert.equal(mc.getKeySize(), 8, 'return value is not 8'); + }); + }); + + describe('getSupportedKeySizes', function(){ + it('should return an array', function() { + assert(mc.getSupportedKeySizes() instanceof Array, 'return value is not array'); + }); + + it('array should not be empty', function() { + assert(mc.getSupportedKeySizes().length > 0, 'return value is empty'); + }); + }); + + describe('getIvSize', function(){ + it('should equal to iv size of algorithm', function(){ + assert.equal(mc.getIvSize(), 8, 'iv size not equals with 8'); + }); + }); + + describe('hasIv', function(){ + it('should return false', function(){ + assert.equal(mc.hasIv(), false, 'return value is not false'); + }); + }); + + describe('getAlgorithmName', function(){ + it('should return DES', function(){ + assert.equal(mc.getAlgorithmName(), 'DES', 'return value is not DES'); + }); + }); + + describe('getModeName', function() { + it('should return ECB', function(){ + assert.equal(mc.getModeName(), 'ECB', 'return value is not ECB'); + }); + }); + + }); + + + }); +}); + diff --git a/build-mcrypt/package.json b/build-mcrypt/package.json new file mode 100644 index 0000000..8207193 --- /dev/null +++ b/build-mcrypt/package.json @@ -0,0 +1,11 @@ +{ + "name": "nodejs-mcrypt-example", + "version": "0.0.1", + "description": "nodejs mcrypt example", + "scripts": { + "start": "node index.js" + }, + "dependencies": { + "mcrypt": "*" + } +} diff --git a/build-nginx/Dockerfile-daemon-off b/build-nginx/Dockerfile-daemon-off index 62cf827..f46632a 100644 --- a/build-nginx/Dockerfile-daemon-off +++ b/build-nginx/Dockerfile-daemon-off @@ -1,4 +1,4 @@ -FROM nginx:1.7.9 +FROM nginx:1.9.0 # start Nginx server, without daemonize itself (correct!) CMD [ "nginx", "-g", "daemon off;" ] diff --git a/build-nginx/Dockerfile-daemonize b/build-nginx/Dockerfile-daemonize index 682a631..a7725d8 100644 --- a/build-nginx/Dockerfile-daemonize +++ b/build-nginx/Dockerfile-daemonize @@ -1,4 +1,4 @@ -FROM nginx:1.7.9 +FROM nginx:1.9.0 # start Nginx server, and daemonize itself (incorrect!) CMD [ "nginx" ] diff --git a/build-nginx/Dockerfile.official-1.7.9 b/build-nginx/Dockerfile.official-1.7.9 deleted file mode 100644 index 7b9ca6d..0000000 --- a/build-nginx/Dockerfile.official-1.7.9 +++ /dev/null @@ -1,20 +0,0 @@ -FROM debian:wheezy - -MAINTAINER NGINX Docker Maintainers "docker-maint@nginx.com" - -RUN apt-key adv --keyserver pgp.mit.edu --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 -RUN echo "deb http://nginx.org/packages/mainline/debian/ wheezy nginx" >> /etc/apt/sources.list - -ENV NGINX_VERSION 1.7.9-1~wheezy - -RUN apt-get update && apt-get install -y nginx=${NGINX_VERSION} && rm -rf /var/lib/apt/lists/* - -# forward request and error logs to docker log collector -RUN ln -sf /dev/stdout /var/log/nginx/access.log -RUN ln -sf /dev/stderr /var/log/nginx/error.log - -VOLUME ["/var/cache/nginx"] - -EXPOSE 80 443 - -CMD ["nginx", "-g", "daemon off;"] diff --git a/build-nginx/Dockerfile.official-1.9.0 b/build-nginx/Dockerfile.official-1.9.0 new file mode 100644 index 0000000..85c0b81 --- /dev/null +++ b/build-nginx/Dockerfile.official-1.9.0 @@ -0,0 +1,22 @@ +FROM debian:jessie + +MAINTAINER NGINX Docker Maintainers "docker-maint@nginx.com" + +RUN apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 +RUN echo "deb http://nginx.org/packages/mainline/debian/ jessie nginx" >> /etc/apt/sources.list + +ENV NGINX_VERSION 1.9.0-1~jessie + +RUN apt-get update && \ + apt-get install -y ca-certificates nginx=${NGINX_VERSION} && \ + rm -rf /var/lib/apt/lists/* + +# forward request and error logs to docker log collector +RUN ln -sf /dev/stdout /var/log/nginx/access.log +RUN ln -sf /dev/stderr /var/log/nginx/error.log + +VOLUME ["/var/cache/nginx"] + +EXPOSE 80 443 + +CMD ["nginx", "-g", "daemon off;"] diff --git a/build-nginx/docker-compose.yml b/build-nginx/docker-compose.yml new file mode 100644 index 0000000..c3a7da9 --- /dev/null +++ b/build-nginx/docker-compose.yml @@ -0,0 +1,7 @@ +nginx: + image: nginx:1.9.0 + ports: + - "10081:80" + volumes: + - .:/usr/share/nginx/html:ro + diff --git a/build-redis-linking/Dockerfile b/build-redis-linking/Dockerfile new file mode 100644 index 0000000..3836026 --- /dev/null +++ b/build-redis-linking/Dockerfile @@ -0,0 +1,21 @@ +# a naive Redis image + +FROM ubuntu:14.04 + +# copy to image/container +COPY redis-server_2.8.19.deb redis-server.deb + +# install from deb +RUN dpkg -i redis-server.deb + + +# expose Redis port (used by "redis-server") +EXPOSE 6379 + +# install wrapper scripts +COPY client /usr/local/bin/ +COPY benchmark /usr/local/bin/ + + +# start Redis server +CMD [ "redis-server" ] diff --git a/build-redis-linking/Dockerfile.official-2.8.19 b/build-redis-linking/Dockerfile.official-2.8.19 new file mode 100644 index 0000000..6ef0f84 --- /dev/null +++ b/build-redis-linking/Dockerfile.official-2.8.19 @@ -0,0 +1,45 @@ +FROM debian:wheezy + +# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added +RUN groupadd -r redis && useradd -r -g redis redis + +RUN apt-get update \ + && apt-get install -y curl \ + && rm -rf /var/lib/apt/lists/* + +# grab gosu for easy step-down from root +RUN gpg --keyserver pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 +RUN curl -o /usr/local/bin/gosu -SL "https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture)" \ + && curl -o /usr/local/bin/gosu.asc -SL "https://github.com/tianon/gosu/releases/download/1.2/gosu-$(dpkg --print-architecture).asc" \ + && gpg --verify /usr/local/bin/gosu.asc \ + && rm /usr/local/bin/gosu.asc \ + && chmod +x /usr/local/bin/gosu + +ENV REDIS_VERSION 2.8.19 +ENV REDIS_DOWNLOAD_URL http://download.redis.io/releases/redis-2.8.19.tar.gz +ENV REDIS_DOWNLOAD_SHA1 3e362f4770ac2fdbdce58a5aa951c1967e0facc8 + +# for redis-sentinel see: http://redis.io/topics/sentinel +RUN buildDeps='gcc libc6-dev make'; \ + set -x \ + && apt-get update && apt-get install -y $buildDeps --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* \ + && mkdir -p /usr/src/redis \ + && curl -sSL "$REDIS_DOWNLOAD_URL" -o redis.tar.gz \ + && echo "$REDIS_DOWNLOAD_SHA1 *redis.tar.gz" | sha1sum -c - \ + && tar -xzf redis.tar.gz -C /usr/src/redis --strip-components=1 \ + && rm redis.tar.gz \ + && make -C /usr/src/redis \ + && make -C /usr/src/redis install \ + && rm -r /usr/src/redis \ + && apt-get purge -y --auto-remove $buildDeps + +RUN mkdir /data && chown redis:redis /data +VOLUME /data +WORKDIR /data + +COPY docker-entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] + +EXPOSE 6379 +CMD [ "redis-server" ] diff --git a/build-redis-linking/README.md b/build-redis-linking/README.md new file mode 100644 index 0000000..e62aa90 --- /dev/null +++ b/build-redis-linking/README.md @@ -0,0 +1,22 @@ +Build a naive Redis image (and some wrappers) for Ubuntu 14.04 LTS (Trusty) +=== + + +## Purpose + +Demostrate how to build a naive Redis image (and some wrappers) from Dockerfile. + +This lab uses a pre-downloaded DEB file to minimize time to completion. Alternatives have drawbacks for this lab: + + - Building from tarball source will require `build-essential`. + - Installing by `apt-get` will require downloading packages on-the-fly. + +It is just a naive demo, especially in early workshop stages. For a better Dockerfile to learn from, take [williamyeh/redis](https://registry.hub.docker.com/u/williamyeh/redis/) as a starting point. + + +## Package + +PPA: [`ppa:rwky/redis`](https://launchpad.net/~rwky/+archive/ubuntu/redis) + +DEB file: [2:2.8.19-rwky1~trusty](https://launchpad.net/~rwky/+archive/ubuntu/redis/+files/redis-server_2.8.19-rwky1~trusty_amd64.deb) + diff --git a/build-redis-linking/benchmark b/build-redis-linking/benchmark new file mode 100755 index 0000000..ae88fb6 --- /dev/null +++ b/build-redis-linking/benchmark @@ -0,0 +1,10 @@ +#!/bin/bash +# +# A simple wrapper for Dockerized redis-benchmark. +# +# Expecting link alias to be 'redis'. +# + + +exec redis-benchmark -h redis -p 6379 "$@" +#exec redis-benchmark -h $REDIS_PORT_6379_TCP_ADDR -p $REDIS_PORT_6379_TCP_PORT "$@" diff --git a/build-redis-linking/client b/build-redis-linking/client new file mode 100755 index 0000000..fdf4600 --- /dev/null +++ b/build-redis-linking/client @@ -0,0 +1,10 @@ +#!/bin/bash +# +# A simple wrapper for Dockerized redis-cli. +# +# Expecting link alias to be 'redis'. +# + + +exec redis-cli -h redis -p 6379 "$@" +#exec redis-cli -h $REDIS_PORT_6379_TCP_ADDR -p $REDIS_PORT_6379_TCP_PORT "$@" diff --git a/build-redis-linking/docker-compose-mixed.yml b/build-redis-linking/docker-compose-mixed.yml new file mode 100644 index 0000000..7b8f9cf --- /dev/null +++ b/build-redis-linking/docker-compose-mixed.yml @@ -0,0 +1,17 @@ +benchmark1: + build: . + command: benchmark + links: + - redis1:redis + +benchmark2: + build: . + command: benchmark + links: + - redis2:redis + +redis1: + image: redis:2.8.19 + +redis2: + build: . diff --git a/build-redis-linking/docker-compose.yml b/build-redis-linking/docker-compose.yml new file mode 100644 index 0000000..4f81e46 --- /dev/null +++ b/build-redis-linking/docker-compose.yml @@ -0,0 +1,8 @@ +benchmark: + build: . + command: benchmark + links: + - redis + +redis: + image: redis:2.8.19 diff --git a/build-redis-linking/redis-server_2.8.19.deb b/build-redis-linking/redis-server_2.8.19.deb new file mode 100644 index 0000000..fc25304 Binary files /dev/null and b/build-redis-linking/redis-server_2.8.19.deb differ diff --git a/build-redis-mini/.dockerignore b/build-redis-mini/.dockerignore new file mode 100644 index 0000000..85de9cf --- /dev/null +++ b/build-redis-mini/.dockerignore @@ -0,0 +1 @@ +src diff --git a/build-redis-mini/Dockerfile b/build-redis-mini/Dockerfile new file mode 100644 index 0000000..efd453a --- /dev/null +++ b/build-redis-mini/Dockerfile @@ -0,0 +1,12 @@ +# a minimal Dockerized `redis-server` + +FROM scratch + +ADD rootfs.tar.gz / +COPY redis.conf /etc/redis/redis.conf + +# Redis port. +EXPOSE 6379 + + +CMD ["redis-server"] diff --git a/build-redis-mini/redis.conf b/build-redis-mini/redis.conf new file mode 100644 index 0000000..2206f2d --- /dev/null +++ b/build-redis-mini/redis.conf @@ -0,0 +1,938 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################ GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /var/run/redis.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 + +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive 0 + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes diff --git a/build-redis-mini/rootfs.tar.gz b/build-redis-mini/rootfs.tar.gz new file mode 100644 index 0000000..e91fddf Binary files /dev/null and b/build-redis-mini/rootfs.tar.gz differ diff --git a/build-redis-mini/src/build-rootfs.sh b/build-redis-mini/src/build-rootfs.sh new file mode 100755 index 0000000..1ee530a --- /dev/null +++ b/build-redis-mini/src/build-rootfs.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +REDIS_VERSION=3.0.0 + +echo "==> Install curl and helper tools..." +sudo apt-get install -y curl make gcc + + +echo "==> Compile..." +tar zxvf redis-$REDIS_VERSION.tar.gz +cd redis-$REDIS_VERSION +make + + +echo "==> Copy aux files..." +cp redis.conf .. + + +echo "==> Clear screen..." +cd .. +clear + + +echo "==> Investigate required .so files..." +ldd redis-$REDIS_VERSION/src/redis-server + + +echo "==> Extract .so files and pack them into rootfs.tar.gz..." +../extract-elf-so_static_linux-amd64 \ + -z \ + redis-$REDIS_VERSION/src/redis-server diff --git a/build-redis-mini/src/clean.sh b/build-redis-mini/src/clean.sh new file mode 100755 index 0000000..085410c --- /dev/null +++ b/build-redis-mini/src/clean.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +REDIS_VERSION=3.0.0 + +rm -rf redis-$REDIS_VERSION redis.conf rootfs.tar.gz diff --git a/build-redis-mini/src/redis-3.0.0.tar.gz b/build-redis-mini/src/redis-3.0.0.tar.gz new file mode 100644 index 0000000..3981808 Binary files /dev/null and b/build-redis-mini/src/redis-3.0.0.tar.gz differ diff --git a/build-walk/README.md b/build-walk/README.md new file mode 100644 index 0000000..f9938db --- /dev/null +++ b/build-walk/README.md @@ -0,0 +1,142 @@ +Several walk-tree examples +=== + +This directory demonstrates several Docker topics: + +- Minimal Docker images (all less than 3 MB). + +- Rootfs concept inside the Docker images/containers. + +- Dependencies on runtime components (e.g., `.so` files). + +- Isolation of resources. + + +## Main programs + +The program “**walk**” tries to traverse directory structures starting from specified path (`.` by default). For brevity, it excludes `/dev`, `/proc`, and `/sys` directories from the output. + + +Two compiled versions of the same functionality written in C are provided: + +1. Static version `walk-static`: a fully statically-linked ELF executable (i.e., without runtime dependencies on any `.so` files). + + +2. Dynamic version `walk-dynamic`: an ordinary ELF executabie with runtime dependencies on some system-wide `.so` files: + + ```bash + $ ldd walk-dynamic + linux-vdso.so.1 => (0x00007fff899f4000) + libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f0e2a191000) + /lib64/ld-linux-x86-64.so.2 (0x00007f0e2a55f000) + ``` + +Refer to the `src` directory if you're courious about their C source code. + + +![bg](walk-cases.png) + + +## Case 1: Fully statically-linked ELF file + +The program `walk-static` is a fully statically-linked ELF executable. + +★★ To build it into a minimal Docker image with `Dockerfile`: + +``` +$ docker build . +``` + +To see what's inside this image, starting from its root directory `/`: + +``` +$ docker run IMAGE-ID walk / +``` + + + +## Case 2: Forget to link an ELF file with its dependent .so files + +The program `walk-dynamic` is a dynamically-linked ELF executable with runtime dependencies on some system-wide `.so` files. + +★★ To build it into a minimal Docker image with `Dockerfile`: + +``` +$ docker build . +``` + +To see what's inside this image, starting from its root directory `/` (**will fail!**): + +``` +$ docker run IMAGE-ID walk / +``` + + +## Case 3: Link with dependent .so files extracted from Ubuntu 14.04 + +Extract required `.so` files from Ubuntu 14.04: + + ``` + 149120 Feb 25 2015 ld-linux-x86-64.so.2 + 1840928 Feb 25 2015 libc.so.6 + ``` + +Then, pack them (together with `walk-dynamic`) into the tarball `rootfs-from-ubuntu1404.tar.gz`. + + +★★ To build it into a minimal Docker image with `Dockerfile`: + +``` +$ docker build . +``` + +To see what's inside this image, starting from its root directory `/`: + +``` +$ docker run IMAGE-ID walk / +``` + + +## Case 4: Link with dependent .so files extracted from CentOS 5.11 + +Extract required `.so` files from CentOS 5.11: + + ``` + 142488 Sep 16 2014 ld-linux-x86-64.so.2 + 1720712 Sep 16 2014 libc.so.6 + ``` + +Then, pack them (together with `walk-dynamic`) into the tarball `rootfs-from-centos511.tar.gz`. + + +★★ To build it into a minimal Docker image with `Dockerfile`: + +``` +$ docker build . +``` + +To see what's inside this image, starting from its root directory `/`: + +``` +$ docker run IMAGE-ID walk / +``` + + + +## Thinking: possible collision? + +System-wise `.so` files from Ubuntu 14.04: + + ``` + 149120 Feb 25 2015 ld-linux-x86-64.so.2 + 1840928 Feb 25 2015 libc.so.6 + ``` + +System-wise `.so` files from CentOS 5.11: + + ``` + 142488 Sep 16 2014 ld-linux-x86-64.so.2 + 1720712 Sep 16 2014 libc.so.6 + ``` + +Can these two suites co-exist at the same time? Dependency hell? diff --git a/build-walk/case1/Dockerfile b/build-walk/case1/Dockerfile new file mode 100644 index 0000000..f7b0ddb --- /dev/null +++ b/build-walk/case1/Dockerfile @@ -0,0 +1,5 @@ +# dockerize `walk-go` + +FROM scratch + +COPY walk-static /bin/walk diff --git a/build-walk/case1/walk-static b/build-walk/case1/walk-static new file mode 100755 index 0000000..71fbbb1 Binary files /dev/null and b/build-walk/case1/walk-static differ diff --git a/build-walk/case2/Dockerfile b/build-walk/case2/Dockerfile new file mode 100644 index 0000000..3ea8ffb --- /dev/null +++ b/build-walk/case2/Dockerfile @@ -0,0 +1,8 @@ +# dockerize `walk-c` +# +# CAUTION: doesn't work due to the lack of required .so files. +# + +FROM scratch + +COPY walk-dynamic /bin/walk diff --git a/build-walk/case2/walk-dynamic b/build-walk/case2/walk-dynamic new file mode 100755 index 0000000..139696c Binary files /dev/null and b/build-walk/case2/walk-dynamic differ diff --git a/build-walk/case3/Dockerfile b/build-walk/case3/Dockerfile new file mode 100644 index 0000000..056893f --- /dev/null +++ b/build-walk/case3/Dockerfile @@ -0,0 +1,5 @@ +# dockerize `walk-c`, with .so files extracted from Ubuntu 14.04 + +FROM scratch + +Add rootfs-from-ubuntu1404.tar.gz . diff --git a/build-walk/case3/rootfs-from-ubuntu1404.tar.gz b/build-walk/case3/rootfs-from-ubuntu1404.tar.gz new file mode 100644 index 0000000..4548cf7 Binary files /dev/null and b/build-walk/case3/rootfs-from-ubuntu1404.tar.gz differ diff --git a/build-walk/case3/walk-dynamic b/build-walk/case3/walk-dynamic new file mode 100755 index 0000000..139696c Binary files /dev/null and b/build-walk/case3/walk-dynamic differ diff --git a/build-walk/case4/Dockerfile b/build-walk/case4/Dockerfile new file mode 100644 index 0000000..03c2d60 --- /dev/null +++ b/build-walk/case4/Dockerfile @@ -0,0 +1,5 @@ +# dockerize `walk-c`, with .so files extracted from CentOS 5.11 + +FROM scratch + +Add rootfs-from-centos511.tar.gz . diff --git a/build-walk/case4/rootfs-from-centos511.tar.gz b/build-walk/case4/rootfs-from-centos511.tar.gz new file mode 100644 index 0000000..ea8b2b3 Binary files /dev/null and b/build-walk/case4/rootfs-from-centos511.tar.gz differ diff --git a/build-walk/case4/walk-dynamic b/build-walk/case4/walk-dynamic new file mode 100755 index 0000000..139696c Binary files /dev/null and b/build-walk/case4/walk-dynamic differ diff --git a/build-walk/case5/Dockerfile b/build-walk/case5/Dockerfile new file mode 100644 index 0000000..6c7f154 --- /dev/null +++ b/build-walk/case5/Dockerfile @@ -0,0 +1,5 @@ +# dockerize `walk-c`, with Ubuntu 14.04 as base image + +FROM ubuntu:14.04 + +COPY walk-dynamic /bin/walk diff --git a/build-walk/case5/walk-dynamic b/build-walk/case5/walk-dynamic new file mode 100755 index 0000000..139696c Binary files /dev/null and b/build-walk/case5/walk-dynamic differ diff --git a/build-walk/src/README.md b/build-walk/src/README.md new file mode 100644 index 0000000..33557bf --- /dev/null +++ b/build-walk/src/README.md @@ -0,0 +1,56 @@ +Build instructions of rootfs tarballs for walk executables +=== + + + +## Source code of walk + +Two versions of the same functionality are provided: + + - `c-version`: written in C + - `golang-version`: written in Go + + + +## Generate rootfs tarballs for walk-* + + +### Prepare + +Use the `extract-elf-so` executable copied from the project: [`William-Yeh/extract-elf-so`](https://github.com/William-Yeh/extract-elf-so). + +Install: + +```bash +$ curl -sSL http://bit.ly/install-extract-elf-so \ + | sudo bash +``` + +### Extract required .so files from Ubuntu 14.04 + +Execute the following command under Ubuntu 14.04: + +```bash +$ extract-elf-so \ + -d /bin \ + -n rootfs-from-ubuntu1404 \ + -z \ + walk-c +``` + +An `rootfs-from-ubuntu1404.tar.gz` tarball will be generated, if successful. + + +### Extract required .so files from CentOS 5.11 + +Execute the following command under CentOS 5.11: + +```bash +$ extract-elf-so \ + -d /bin \ + -n rootfs-from-centos511 \ + -z \ + walk-c +``` + +An `rootfs-from-centos511.tar.gz` tarball will be generated, if successful. diff --git a/build-walk/src/c-version/Makefile b/build-walk/src/c-version/Makefile new file mode 100644 index 0000000..ebabe7e --- /dev/null +++ b/build-walk/src/c-version/Makefile @@ -0,0 +1,7 @@ +all: walk.c + gcc -std=c99 -o walk-dynamic walk.c + gcc -std=c99 -o walk-static --static walk.c + + +clean: + rm -f walk-* diff --git a/build-walk/src/c-version/walk.c b/build-walk/src/c-version/walk.c new file mode 100644 index 0000000..7e8c3f5 --- /dev/null +++ b/build-walk/src/c-version/walk.c @@ -0,0 +1,85 @@ +// Directory walker, excluding /dev /proc /sys. +// +// Usage: walk [starting path] +// +// @adapted from: http://linux.die.net/man/3/ftw +// + + +#define _XOPEN_SOURCE 500 +#include +#include +#include +#include +#include +#include + +#define BUF_SIZE 512 +#define TIME_FORMAT "%F %R" +#define DEFAULT_DIRECTORY "/" + +const char* EXCLUDE_PATH[] = { + "/dev", + "/proc", + "/sys" +}; + + +int should_skip_this_path(const char* path) +{ + int array_length = sizeof(EXCLUDE_PATH) / sizeof(EXCLUDE_PATH[0]); + for (int i = 0; i < array_length; ++i) { + if (strstr(path, EXCLUDE_PATH[i]) == path) { + return 1; + } + } + return 0; +} + +static int +display_info(const char* fpath, const struct stat* sb, + int tflag, struct FTW* ftwbuf) +{ + // last update time + char utime[BUF_SIZE]; + struct tm* timeinfo = localtime(&sb->st_mtime); + strftime(utime, BUF_SIZE - 1, TIME_FORMAT, timeinfo); + //printf("%s\n", utime); + + // file type + const char* file_type = + (tflag == FTW_D) ? "d" : (tflag == FTW_DNR) ? "dnr" : + (tflag == FTW_DP) ? "dp" : (tflag == FTW_F) ? "f" : + (tflag == FTW_NS) ? "ns" : (tflag == FTW_SL) ? "sl" : + (tflag == FTW_SLN) ? "sln" : "???"; + + if (should_skip_this_path(fpath)) { + return 0; + } + + + printf("%-3s %s %7jd %-40s\n", + file_type, + utime, + (intmax_t) sb->st_size, + fpath); + return 0; /* To tell nftw() to continue */ +} + +int +main(int argc, char* argv[]) +{ + int flags = 0; + + if (argc > 2 && strchr(argv[2], 'd') != NULL) + flags |= FTW_DEPTH; + if (argc > 2 && strchr(argv[2], 'p') != NULL) + flags |= FTW_PHYS; + + if (nftw((argc < 2) ? DEFAULT_DIRECTORY : argv[1], display_info, 20, flags) + == -1) { + perror("nftw"); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/build-walk/src/golang-version/Dockerfile-compile b/build-walk/src/golang-version/Dockerfile-compile new file mode 100644 index 0000000..26e01c9 --- /dev/null +++ b/build-walk/src/golang-version/Dockerfile-compile @@ -0,0 +1,27 @@ +# building linux-amd64 native binary via Dockerized Go compiler +# +# @see https://registry.hub.docker.com/_/golang/ +# + +# pull base image +FROM golang:1.4.2 +MAINTAINER William Yeh + +ENV EXE_NAME walk_linux-amd64 +ENV GOPATH /opt +WORKDIR /opt + + +# fetch imported Go lib... +RUN go get github.com/kr/fs text/tabwriter +COPY walk.go /opt/ + +# compile... +RUN go build -o $EXE_NAME + + + +# copy executable +RUN mkdir -p /dist +VOLUME [ "/dist" ] +CMD cp *_linux-amd64 /dist diff --git a/build-walk/src/golang-version/Vagrantfile b/build-walk/src/golang-version/Vagrantfile new file mode 100644 index 0000000..d763d9e --- /dev/null +++ b/build-walk/src/golang-version/Vagrantfile @@ -0,0 +1,8 @@ +Vagrant.configure(2) do |config| + config.vm.box = "williamyeh/ubuntu-trusty64-docker" + + config.vm.provision "shell", inline: <<-SHELL + cd /vagrant + ./build.sh + SHELL +end diff --git a/build-walk/src/golang-version/build.sh b/build-walk/src/golang-version/build.sh new file mode 100755 index 0000000..9daeb3d --- /dev/null +++ b/build-walk/src/golang-version/build.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# +# scirpt for compiling go source via Dockerized Go compiler. +# +# If you'd like to compile via native Go compiler: +# +# $ go install github.com/docopt/docopt-go +# $ go build +# + +set -e +set -x + + +IMAGE_NAME=app_build_temp +docker build -t $IMAGE_NAME -f Dockerfile-compile . +docker run --rm -v "$(pwd)/dist:/dist" $IMAGE_NAME + +docker rmi -f $IMAGE_NAME \ No newline at end of file diff --git a/build-walk/src/golang-version/walk.go b/build-walk/src/golang-version/walk.go new file mode 100644 index 0000000..4421fc1 --- /dev/null +++ b/build-walk/src/golang-version/walk.go @@ -0,0 +1,55 @@ +// Directory walker, excluding /dev /proc /sys. +// +// Usage: walk [starting path] +// +// adapted from: https://raw.githubusercontent.com/kr/fs/master/example_test.go +// + +package main + +import ( + "fmt" + "os" + "regexp" + "strconv" + + "github.com/kr/fs" + "text/tabwriter" +) + +var REGEX_EXCLUDE_PATH = regexp.MustCompile(`^/(dev|proc|sys)`) + +const DATE_LAYOUT string = "2006-01-02 15:04" + +var starting_path = "." + +func main() { + + if len(os.Args) > 1 { + starting_path = os.Args[1] + } + + w := new(tabwriter.Writer) + w.Init(os.Stdout, 2, 0, 1, ' ', tabwriter.AlignRight) + + walker := fs.Walk(starting_path) + for walker.Step() { + if err := walker.Err(); err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + path := walker.Path() + if REGEX_EXCLUDE_PATH.FindStringSubmatch(path) != nil { + continue + } + + info := walker.Stat() + file_type := "f" + if info.IsDir() { + file_type = "d" + } + + fmt.Fprintln(w, file_type+" \t"+(info.ModTime().Format(DATE_LAYOUT)+" \t"+strconv.FormatInt(info.Size(), 10)+"\t\t"+path)) + } + w.Flush() +} diff --git a/build-walk/walk-cases.png b/build-walk/walk-cases.png new file mode 100644 index 0000000..4b45821 Binary files /dev/null and b/build-walk/walk-cases.png differ diff --git a/build-walk/walk-dynamic b/build-walk/walk-dynamic new file mode 100755 index 0000000..139696c Binary files /dev/null and b/build-walk/walk-dynamic differ diff --git a/build-walk/walk-static b/build-walk/walk-static new file mode 100755 index 0000000..71fbbb1 Binary files /dev/null and b/build-walk/walk-static differ diff --git a/config.md b/config.md index 68e1b14..2daf758 100644 --- a/config.md +++ b/config.md @@ -1,13 +1,31 @@ 行前準備 Part 2:預載範例程式碼 === +[ <-- Prev: [行前準備 Part 1](prepare.md) ] + + 為了節省課程現場下載程式及相關資源的時間及頻寬,請學員先在**網路暢通的地方**,根據指示備妥必要的軟體及設定。 -如果網路順暢,整個過程可能會花上數十分鐘,請耐心等候。 +整個過程可能會花上數十分鐘,請耐心等候。
+整個過程可能會花上數十分鐘,請耐心等候。
+整個過程可能會花上數十分鐘,請耐心等候。
+(因為很重要,所以要說三次。)   -☛ 程式範例會在每一梯次開課前一週才定案,因此,建議**等開課前一週再進行下列步驟**。 +☛ 注意事項: + +- 程式範例會在每一梯次開課前一週才定案,因此,建議**等開課前一週再進行下列步驟**。 + +- 進行以下步驟之前,請先確定 VirtualBox 之【預設機器資料夾】所在的磁碟上,仍有足夠空間,以容納各虛擬機內容及預載之 Docker 映像檔。建議至少要預留 15 GB: + ![預留 VirtualBox 虛擬機所需空間](img/vbox-diskspace.png) + +- 進行以下步驟之前,建議您先關閉(甚至解除安裝)可能與 VirtualBox 相衝的其他虛擬機軟體。譬如說,以下命令可暫時關閉 Hyper-V(還需要你手動重新開機,才會生效): + + ``` + bcdedit /set hypervisorlaunchtype off + ``` +   @@ -18,14 +36,26 @@ 2. 開啟 https://github.com/William-Yeh/docker-workshop -3. 按右上角的 ![Fork Button](icon-github-fork.png) 按鈕。 +3. 按右上角的 ![Fork Button](img/icon-github-fork.png) 按鈕。 4. 幾秒鐘之內,你應該會被自動導引到自己帳號底下的 docker-workshop 專案。 -### 步驟二:下載 workshop 範例程式 +### 步驟二:開啟終端機,以進行後續步驟 -請先切換到你選定的工作目錄,譬如: +Linux 及 Mac 使用者:請使用 xterm、終端機 (Terminal)、iTerm 或任何你慣用的終端機軟體。 + +Windows 使用者: + +1. 請先以「不含任何中文字」的使用者帳號,登入 Windows(否則會在某些 Vagrant 相關步驟出錯)。 +2. 以『系統管理者』權限,打開【命令提示字元】軟體(否則**可能**會在某些 Vagrant 相關步驟出錯)。 + + + + +### 步驟三:下載 workshop 範例程式 + +請先切換到你選定的工作目錄(Windows 用戶,請避免選用「含中文字」),譬如: ```shell cd YOUR_OWN_WORKING_DIRECTORY @@ -38,79 +68,56 @@ ``` -### 步驟三:切換至此 workshop 目錄 + +### 步驟四:切換至此 workshop 目錄 ```shell cd docker-workshop ``` -#### ⇡ 以上所列的幾個步驟,如有不清楚的,請見示範錄影: - -[![Docker Workshop - How to Fork Project](http://img.youtube.com/vi/n2ogtWHZRzo/0.jpg)](http://youtu.be/n2ogtWHZRzo) - ---- -
- -#### ⇣ 以下所列的幾個步驟,如有不清楚的,請見示範錄影: +這個目錄,課堂上會反覆用到。建議你設桌面捷徑,以節省課堂現場切換目錄的時間。 -[![Docker Workshop - How to Setup Lab Environment](http://img.youtube.com/vi/0aaMQ8u9Dvg/0.jpg)](http://youtu.be/0aaMQ8u9Dvg) +#### ⇡ 以上所列的幾個步驟,如有不清楚的,請見示範錄影: - -### 步驟四:安裝必要的 Vagrant 擴充套件 - -➤ 錄影 0:00~01:16 片段。 - -```shell -vagrant plugin install vagrant-hosts -vagrant plugin install vagrant-vbox-snapshot -``` +[![Docker Workshop - How to Fork Project](http://img.youtube.com/vi/n2ogtWHZRzo/0.jpg)](http://youtu.be/n2ogtWHZRzo) ### 步驟五:初始化 Vagrant 虛擬機 耗時最久的,就是這個步驟,請耐心等候。 -➤ 錄影 01:23~57:25 片段。 - -- 如果你的電腦是 Mac 或 Linux: +- 如果你的電腦是 Mac 或 Linux,請輸入: ```shell ./setup-vagrant ``` -- 如果你的電腦是 Windows: +- 如果你的電腦是 Windows,請輸入: ```shell setup-vagrant ``` -### 步驟六:快照 (snapshot) +### 步驟六:確認已設定完畢 -➤ 錄影 57:38~58:42 片段。 - -先替這一批 snapshot 取個易於辨識追溯的名稱,建議以現在時間 *mm/dd-HH:MM* 為名。 - -假設現在時間是 `02/19-08:00` (Feb 19, 08:00),請輸入以下指令: +輸入以下指令,查看各虛擬機的狀態: ```shell -vagrant snapshot take main 02/19-08:00 -vagrant snapshot take alice 02/19-08:00 -vagrant snapshot take bob 02/19-08:00 -vagrant snapshot take centos 02/19-08:00 -vagrant snapshot take registry 02/19-08:00 +vagrant status ``` +如果看到以下畫面,三台虛擬機都呈現 "poweroff" 狀態,就表示已經順利設定完畢: -### 步驟七:確認 snapshot 已完成 +``` +Current machine states: -➤ 錄影 59:08~片尾。 +main poweroff (virtualbox) +centos poweroff (virtualbox) +registry poweroff (virtualbox) -```shell -vagrant snapshot list main -vagrant snapshot list alice -vagrant snapshot list bob -vagrant snapshot list centos -vagrant snapshot list registry +This environment represents multiple VMs. The VMs are all listed +above with their current state. For more information about a specific +VM, run `vagrant status NAME`. ``` diff --git a/download-virtualbox.png b/img/download-virtualbox.png similarity index 100% rename from download-virtualbox.png rename to img/download-virtualbox.png diff --git a/icon-github-fork.png b/img/icon-github-fork.png similarity index 100% rename from icon-github-fork.png rename to img/icon-github-fork.png diff --git a/icon-github-watch.png b/img/icon-github-watch.png similarity index 100% rename from icon-github-watch.png rename to img/icon-github-watch.png diff --git a/img/vbox-diskspace.png b/img/vbox-diskspace.png new file mode 100644 index 0000000..152b26d Binary files /dev/null and b/img/vbox-diskspace.png differ diff --git a/vbox-win64-error.png b/img/vbox-win64-error.png similarity index 100% rename from vbox-win64-error.png rename to img/vbox-win64-error.png diff --git a/intro.md b/intro.md index 9ec7799..cfbd47f 100644 --- a/intro.md +++ b/intro.md @@ -7,9 +7,11 @@ Docker 建置實戰講堂 報名系統: - - [第三梯次](http://containersummit.ithome.com.tw/workshop/3/) : 2015-04-11 - - [第二梯次](http://containersummit.ithome.com.tw/workshop/2/) : 2015-03-07 - - [第一梯次](http://containersummit.ithome.com.tw/workshop/1/) : 2015-02-07 + - [第五梯次](http://containersummit.ithome.com.tw/workshop/#ticket) : 2015-06-27 (高雄) + - 第四梯次 : 2015-05-09 (台北) + - 第三梯次 : 2015-04-11 (台北) + - 第二梯次 : 2015-03-07 (台北) + - 第一梯次 : 2015-02-07 (台北) ## 課程目標 @@ -19,6 +21,21 @@ Docker 建置實戰講堂 ## 課程簡介 +- Docker 技術總覽 +- 實習環境介紹 +- 常用的工具指令:docker cli、docker compose +- 常用的 Dockerfile 指令 +- 從極簡化 Docker image 領會 Docker 三大特色 +- Docker 化典型伺服器軟體(以 Redis、Nginx 為例) +- 從程式源碼建置 Docker 化軟體(以 PHP、Node.js 為例) +- GitHub 與 Docker Hub 連動 +- Docker 網路模型 +- Docker 化軟體要素(暨常見地雷):port、volume、daemon off +- 綜合運用 + + +## 課程特色 + 1. **Docker 的 Why**:從 DevOps 角度,分析 Docker 的優異特性:dependency、isolation、lightweight、standard。 2. **Docker 的 How**:從雲端時代軟體架構角度(尤其是 [12-factor app](http://12factor.net/)),搭配重點案例,探討 Dockerfile 及「Docker 化軟體」設計要點。 @@ -30,7 +47,7 @@ Docker 建置實戰講堂 ## 課程時數 -6 小時。 +6 小時,涵蓋 90% 教材內容。 ## 適合對象 @@ -40,7 +57,7 @@ Docker 建置實戰講堂 ## 報名須知 -1. 本課程含大量實機操作內容,請自備筆記型電腦。 +1. 本課程含大量實機操作內容,請自備 **64 位元**的筆記型電腦(Windows、Mac、Linux 皆可,但需要是 **64 位元**的版本)。 2. 建議上課前學習 Git 版本控制指令: @@ -49,10 +66,8 @@ Docker 建置實戰講堂 3. 具備以下能力能讓你學得更快: - - 用過任何一種程式語言或框架(譬如:Bash、C、Java、Node.js、PHP、Python、Ruby...),撰寫可在 Linux 上執行的程式,並部署之。 + - 用過任何一種程式語言或框架(譬如:Bash、C、Go、Java、Node.js、PHP、Python、Ruby...),撰寫可在 Linux 上執行的程式,並部署之。 - 曾在 Linux 上安裝過任何一種開放源碼伺服器軟體(譬如:Apache HTTP Server、MongoDB、MySQL、Nginx、WordPress...)。 - 在任何一種雲端平台(Amazon EC2、DigitalOcean、Google Compute Engine、Linode、Microsoft Azure...),完成上述任務。 - - diff --git a/linking-full/README.md b/linking-full/README.md index 482b94d..4209adf 100644 --- a/linking-full/README.md +++ b/linking-full/README.md @@ -1,7 +1,7 @@ # Container Linking -This directory is to demostrate the "[container linking](https://docs.docker.com/userguide/dockerlinks/)" feature of Docker, using [wrk](https://github.com/William-Yeh/docker-wrk) (written in C and Lua), [Spray](https://github.com/William-Yeh/Docker-Spray-HttpServer) (written in Scala), [Fluentd](https://github.com/William-Yeh/docker-fluentd) (written in Ruby), and [Elasticsearch + Kibana](http://www.elasticsearch.org/overview/kibana/) (written in Java) as example. +This directory is to demonstrate the "[container linking](https://docs.docker.com/userguide/dockerlinks/)" feature of Docker, using [wrk](https://github.com/William-Yeh/docker-wrk) (written in C and Lua), [Spray](https://github.com/William-Yeh/Docker-Spray-HttpServer) (written in Scala), [Fluentd](https://github.com/William-Yeh/docker-fluentd) (written in Ruby), and [Elasticsearch + Kibana](http://www.elasticsearch.org/overview/kibana/) (written in Java) as example. ![Overview of the demo](./flows.png "Overview of the demo") diff --git a/provision/IMAGE-LIST b/make-registry-box/IMAGE-LIST similarity index 69% rename from provision/IMAGE-LIST rename to make-registry-box/IMAGE-LIST index b11c900..09a8951 100644 --- a/provision/IMAGE-LIST +++ b/make-registry-box/IMAGE-LIST @@ -11,18 +11,21 @@ ubuntu:latest centos:5.11 busybox:latest +# Docker Hub: https://registry.hub.docker.com/_/php/ +php:5.6.6-cli +#php:5.6.6-fpm + # Docker Hub: https://registry.hub.docker.com/_/node/ node:0.10.36-slim -node:0.10.16-onbuild +node:0.10.36-onbuild #--------------------------------------# # app # redis:2.8.19 -nginx:1.6.2 -nginx:1.7.8 -nginx:1.7.9 +nginx:1.9.0 +haproxy:1.5.12 mysql:5.6.22 wordpress:4.1.0 @@ -35,9 +38,10 @@ jwilder/nginx-proxy williamyeh/redis:2.8.19 williamyeh/wrk -williamyeh/spray-httpserver -williamyeh/fluentd -digitalwonderland/elasticsearch +#williamyeh/spray-httpserver +#williamyeh/fluentd +#digitalwonderland/elasticsearch +#elasticsearch:1.4.4 -ipython/notebook -#dockerfile/ghost +#ipython/notebook +#ghost:0.5.9 diff --git a/make-registry-box/README.md b/make-registry-box/README.md new file mode 100644 index 0000000..732b568 --- /dev/null +++ b/make-registry-box/README.md @@ -0,0 +1,37 @@ +Building a Docker Registry box with pre-loaded images +=== + + +## Name + +Official box name in Atlas (was: Vagrant Cloud): [`williamyeh/docker-workshop-registry`](https://atlas.hashicorp.com/williamyeh/boxes/docker-workshop-registry). + + + +## Purpose + +To build a Vagrant box, which: + + - derives from [`williamyeh/insecure-registry`](https://vagrantcloud.com/williamyeh/insecure-registry) box (refer to [this](https://github.com/William-Yeh/docker-enabled-vagrant/tree/master/insecure-registry) for implementation details). + - pre-loads Docker images listed in the `IMAGE-LIST` file from Docker Hub. + + + +## Build it on your own... + +### Prerequisite + +Install [`vagrant-hosts`](https://github.com/adrienthebo/vagrant-hosts) plugin: + +```bash +vagrant plugin install vagrant-hosts +``` + +### Build! + + +```bash +$ ./build.sh +``` + +If everything is OK, you'll obtain a box file `docker-workshop-registry.box`. Feel free to place it on your local disk or cloud storage (Dropbox, S3, etc). diff --git a/make-registry-box/Vagrantfile b/make-registry-box/Vagrantfile new file mode 100644 index 0000000..55f2c03 --- /dev/null +++ b/make-registry-box/Vagrantfile @@ -0,0 +1,34 @@ +Vagrant.require_version ">= 1.7.2" + + +$script = < 1.1:安裝步驟
- 1.2:Windows 作業系統注意事項 + 1.2:Windows 作業系統注意事項
+ 1.3:磁碟空間 2. 註冊 GitHub 帳號 3. 註冊 Docker Hub 帳號 4. 列印參考資料 +[ Next: --> [行前準備 Part 2](config.md) ] + + + ## 1. 安裝軟體 -本課程的「實機操作」部分,需要學員自備筆記型電腦。而這些筆電的作業系統,可能是 Windows,也可能是 Mac OS X,或各種 Linux distributions。 +本課程的「實機操作」部分,需要學員自備 **64 位元**的筆記型電腦。而這些筆電的作業系統,可能是 Windows,也可能是 Mac OS X,或各種 Linux distributions。 為了讓課程有個一致的實作環境,減少干擾施教者與學習者的環境變因,我們統一使用 [Vagrant](http://www.vagrantup.com/) + [VirtualBox](https://www.virtualbox.org/) 虛擬機器組合,作為課堂實作的統一環境。對這種軟體組合的技術細節感興趣的,請見本文附錄:【關於 Vagrant】。這裡先只講軟體安裝步驟。 @@ -31,35 +36,50 @@ 並依序執行之。 - ![下載必要的 VirtualBox 安裝檔案](download-virtualbox.png) + ![下載必要的 VirtualBox 安裝檔案](img/download-virtualbox.png) ### 1.2:Windows 作業系統注意事項 -在 Windows 上安裝 VirtualBox 時,如果遇到以下錯誤: +在 Windows 上,下載 VirtualBox 兩個安裝檔時,最好先置於「完全由**英文字**或**阿拉伯數字**所組成的路徑」上,再執行安裝程序,比較不會出現奇奇怪怪的亂碼問題。 + +- 安裝 VirtualBox 時,如果遇到以下錯誤: + + ![找不到指定的檔案](img/vbox-win64-error.png) + + 請先切換到放置下載檔案的目錄,找出剛剛下載回來的安裝檔名(以我的例子:`VirtualBox-4.3.22-98236-Win.exe`),再依序執行以下命令: + + ```bat + VirtualBox-4.3.22-98236-Win -extract + cd VirtualBox + dir + ``` - ![找不到指定的檔案](vbox-win64-error.png) + 你應該會看到一個 `msi` 類型的安裝檔,請執行它。 -請先切換到放置下載檔案的目錄,找出剛剛下載回來的安裝檔名(以我的例子:`VirtualBox-4.3.14-95030-Win.exe`),再依序執行以下命令: +- 安裝 VirtualBox Extension Pack 時,如果遇到 “The installer failed with exit code: 1” 錯誤,請先切換到放置下載檔案的目錄,找出剛剛下載回來的安裝檔名(以我的例子:`Oracle_VM_VirtualBox_Extension_Pack-4.3.22-98236.vbox-extpack`),再依序執行以下命令: - ```bat - $ VirtualBox-4.3.14-95030-Win -extract - $ cd VirtualBox - $ dir - ``` + ```bat + VBoxManage extpack uninstall "Oracle VM VirtualBox Extension Pack" + VBoxManage extpack cleanup + VBoxManage extpack install --replace Oracle_VM_VirtualBox_Extension_Pack-4.3.22-98236.vbox-extpack + ``` -你應該會看到一個 `msi` 類型的安裝檔,請執行它。 -最後,為了運作順暢,你還需要一個「**純命令列**的 ssh 程式」。如果你堅持要用 [PuTTY](http://www.chiark.greenend.org.uk/~sgtatham/putty/)、[PieTTY](http://ntu.csie.org/~piaip/pietty/) 或[可攜版](http://jedi.org/PieTTYPortable/),可能會在某些進階步驟踩到地雷,建議你安裝一個「**純命令列**的 ssh 程式」吧。以下是一些輕量級的 `ssh.exe` 選項: - - [OpenSSH for Windows](http://sourceforge.net/projects/opensshwindows/) - - [Git for the Windows platform](http://git-scm.com/download/win) 裡面含有一枚 `ssh.exe` +最後,為了 Vagrant 運作順暢,你還需要一個「**純命令列**的 ssh 程式」(像 [PuTTY](http://www.chiark.greenend.org.uk/~sgtatham/putty/)、[PieTTY](http://ntu.csie.org/~piaip/pietty/) 或[可攜版](http://jedi.org/PieTTYPortable/)之類的 GUI 程式,可能會在某些進階步驟踩到地雷;非 power user 請勿嘗試)。本課程建議您安裝以下這個「**純命令列**的 ssh 程式」: -安裝後,記得要去【控制台】把 `ssh.exe` 路徑加到 `PATH` 環境變數。 + - [Git for the Windows platform](http://git-scm.com/download/win),不但有 `git.exe`,裡面也含有一枚 `ssh.exe`。 -或者,乾脆安裝整套 [MinGW](http://www.mingw.org/) 或 [Cygwin](https://www.cygwin.com/) 算了⋯⋯ +安裝後,記得要去【控制台】把 `ssh.exe` 所在路徑加到 `PATH` 環境變數。 +### 1.3:磁碟空間 + +請確定 VirtualBox 之【預設機器資料夾】所在的磁碟上,仍有足夠空間,以容納各虛擬機內容及預載之 Docker 映像檔。 + +建議至少要預留 15 GB: +![預留 VirtualBox 虛擬機所需空間](img/vbox-diskspace.png) @@ -74,7 +94,7 @@ GitHub 註冊及設定完畢後,請再進行以下步驟,以訂閱 Docker Wo A. 開啟 https://github.com/William-Yeh/docker-workshop -B. 按右上角的 ![Watch Button](icon-github-watch.png) 按鈕。 +B. 按右上角的 ![Watch Button](img/icon-github-watch.png) 按鈕。 之後,範例程式的任何異動,會自動送到你的 GitHub 登入畫面及 email。這個管道也會通知你,最近梯次的範例程式碼,何時正式定案。 @@ -91,10 +111,10 @@ B. 按右上角的 ![Watch Button](icon-github-watch.png) 按鈕。 有幾份資料,在課堂上會反覆用到。因著作權所限,我們無法主動提供紙本。如果您習慣在紙本上註記,請於參加課程之前自行列印,攜至課堂上。 - - [Docker 命令查詢](http://philipzheng.gitbooks.io/docker_practice/content/appendix_command/README.html) - - [一張圖總結 Docker 的命令](http://philipzheng.gitbooks.io/docker_practice/content/_images/cmd_logic.png) ← 彩色 + - [Docker 命令查詢](http://philipzheng.gitbooks.io/docker_practice/content/appendix_command/README.html) + - [Dockerfile 指令](http://philipzheng.gitbooks.io/docker_practice/content/dockerfile/instructions.html) diff --git a/provision/setup-docker-tools.sh b/provision/setup-docker-tools.sh index aec8033..fbbcf9c 100755 --- a/provision/setup-docker-tools.sh +++ b/provision/setup-docker-tools.sh @@ -4,7 +4,7 @@ # readonly BASE_URL="https://raw.githubusercontent.com/William-Yeh/docker-host-tools/master/" -readonly APP=( "DOCKER" "docker-mirror" "docker-rm-stopped" "docker-rmi-repo" "docker-inspect-attr" ) +readonly APP=( "DOCKER" "docker-rm-stopped" "docker-rmi-repo" "docker-inspect-attr" ) cd /usr/local/bin @@ -14,3 +14,10 @@ for i in "${APP[@]}"; do curl -o $NAME "$BASE_URL$NAME" chmod a+x $NAME done + + +readonly OTHER_TOOLS=( "http://stedolan.github.io/jq/download/linux64/jq" ) +for i in "${OTHER_TOOLS[@]}"; do + curl -L -O $i +done +chmod a+x * diff --git a/provision/setup-hosts.sh b/provision/setup-hosts.sh new file mode 100755 index 0000000..d648e22 --- /dev/null +++ b/provision/setup-hosts.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# +# append host entry into /etc/hosts +# +# [NOTE] better way -- use 'vagrant-hosts' plugin: +# https://github.com/adrienthebo/vagrant-hosts +# + +cat <> /etc/hosts +10.0.0.200 registry.com +EOF diff --git a/reverse-proxy/README.md b/reverse-proxy/README.md new file mode 100644 index 0000000..a977ca2 --- /dev/null +++ b/reverse-proxy/README.md @@ -0,0 +1,116 @@ +Reverse proxy + Docker example +=== + +## Purpose + +Demonstrate how to use reverse proxy for backend application servers, all in the Docker's world. + + +## Software stack + +![bg](reverse-proxy.png) + + +#### Nginx + + - [Reverse proxy](http://nginx.com/resources/admin-guide/reverse-proxy/) + + - [Load balancing](http://nginx.com/resources/admin-guide/load-balancer/) + + - [SSL termination](http://nginx.com/resources/admin-guide/nginx-ssl-termination/): use `make-cert.sh` to generate self-signed certificate. + + +#### HAProxy + + - Reverse proxy + + - Load balancing + + - Active health monitoring + + - SSL termination: use `make-cert.sh` to generate self-signed certificate. + + + +#### Node.js application server instances + + - Source code in `app` directory. + - *N* instances. + - High availability and zero-downtime deployments via Nginx or HAProxy. + + +#### Redis server + + - Shared datastore across all Node.js application instances. + - [Persistence](http://redis.io/topics/persistence): RDB and AOF modes. + + +## Usage #1: simple case + +First, start the whole software stack: + +```bash +$ docker-compose up -d + +$ docker ps +``` + + +Second, connect to Nginx via HTTP and HTTPS : + +```bash +$ curl -v http://localhost:10080 + +$ curl -v --insecure https://localhost:10443 + +``` + +Third, connect to HAProxy via HTTP and HTTPS : + +```bash +$ curl -v http://localhost:10090 + +$ curl -v --insecure https://localhost:10091 +``` + + +## Usage #2: zero-downtime deployments + +**NOTE**: Nginx supports "[`health_check`](http://nginx.com/resources/admin-guide/load-balancer/#health_active)" only in its commercial product "Nginx Plus" (see [this](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#health_check) and [this](http://nginx.com/products/application-health-checks/)). Therefore, only HAProxy is demonstrated in this zero-downtime case. + +First, start the whole software stack: + +```bash +$ docker-compose -f docker-compose-2.yml up -d + +$ docker ps +``` + +Second, use browser to open HAProxy statistics report: `http://localhost:10100/` + + +Third, connect to HAProxy via HTTP and HTTPS : + +```bash +$ curl -v http://localhost/ + +$ curl -v --insecure https://localhost/ +``` + + +Fourth, stop one of the application instances (e.g., `app1`): + +```bash +$ docker-compose stop app1 +``` + +Then, try to connect via HTTP and HTTPS. Any downtime? + + +Fifth, let the stopped instance come back to live: + +```bash +$ docker-compose start app1 +``` + +Then, try to connect via HTTP and HTTPS. Does the instance recieve packets from HAProxy again? diff --git a/reverse-proxy/app/Dockerfile b/reverse-proxy/app/Dockerfile new file mode 100644 index 0000000..6b92a3b --- /dev/null +++ b/reverse-proxy/app/Dockerfile @@ -0,0 +1,3 @@ +FROM node:0.10.36-onbuild + +EXPOSE 3000 diff --git a/reverse-proxy/app/index.js b/reverse-proxy/app/index.js new file mode 100644 index 0000000..3e1dae8 --- /dev/null +++ b/reverse-proxy/app/index.js @@ -0,0 +1,75 @@ +// A simple web server that generates dyanmic content based on responses from Redis +// +// Adapted from: https://github.com/mranney/node_redis/blob/master/examples/web_server.js +// + +var http = require('http'), + ip = require("ip"); + +var PORT = process.env.PORT || 3000 ; +var REDIS_HOST = process.env.REDIS_HOST || '127.0.0.1' ; +var REDIS_PORT = process.env.REDIS_PORT || 6379 ; + + +function get_remote_ip(req) { + var ip_address = null; + try { + ip_address = req.headers['x-forwarded-for']; + } + catch (err) { + ip_address = req.connection.remoteAddress; + } + return ip_address; +} + + +function report_health(resp) { + resp.writeHead(200, { + "Content-Type": "text/plain" + }); + resp.write("OK"); + resp.end(); +} + + +var redis_client = require('redis').createClient(REDIS_PORT, REDIS_HOST); +var server = http.createServer(function(request, response) { + + console.log(request.url); + + // skip uninteresting URL requests + if (request.url === '/favicon.ico') { + return; + } + else if (request.url === '/health') { + report_health(response); + return; + } + + + response.writeHead(200, { + "Content-Type": "text/plain" + }); + + var total_requests; + + redis_client.incr("requests", function(err, reply) { + total_requests = reply; // stash response in outer scope + }); + + redis_client.hincrby("ip", get_remote_ip(request), 1); + redis_client.hgetall("ip", function(err, reply) { + // This is the last reply, so all of the previous replies must have completed already + response.write( + "Total requests: " + total_requests + "\n\n" + + "My IP: " + ip.address() + "\n\n" + + "Remote IP count: \n"); + Object.keys(reply).forEach(function(ip) { + response.write(" " + ip + ": " + reply[ip] + "\n"); + }); + response.end(); + }); + +}).listen(PORT); + +console.log('Server listening on port', PORT); diff --git a/reverse-proxy/app/package.json b/reverse-proxy/app/package.json new file mode 100644 index 0000000..e64bc33 --- /dev/null +++ b/reverse-proxy/app/package.json @@ -0,0 +1,12 @@ +{ + "name": "http-server-example", + "version": "0.0.1", + "description": "A simple http server with counter stored in Redis", + "scripts": { + "start": "node index.js" + }, + "dependencies": { + "redis": "*", + "ip": "*" + } +} diff --git a/reverse-proxy/certificate.crt b/reverse-proxy/certificate.crt new file mode 100644 index 0000000..6e88c40 --- /dev/null +++ b/reverse-proxy/certificate.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/TCCAuWgAwIBAgIJALapUPkV3mB6MA0GCSqGSIb3DQEBCwUAMIGUMQswCQYD +VQQGEwJUVzEPMA0GA1UECAwGVGFpcGVpMQ8wDQYDVQQHDAZUYWlwZWkxFDASBgNV +BAoMC1dpbGxpYW0gWWVoMQswCQYDVQQLDAJSRDEYMBYGA1UEAwwPd2lsbGlhbS15 +ZWgubmV0MSYwJAYJKoZIhvcNAQkBFhd3aWxsaWFtLnBqeWVoQGdtYWlsLmNvbTAe +Fw0xNTA1MjExNTE1NTNaFw0xNjA1MjAxNTE1NTNaMIGUMQswCQYDVQQGEwJUVzEP +MA0GA1UECAwGVGFpcGVpMQ8wDQYDVQQHDAZUYWlwZWkxFDASBgNVBAoMC1dpbGxp +YW0gWWVoMQswCQYDVQQLDAJSRDEYMBYGA1UEAwwPd2lsbGlhbS15ZWgubmV0MSYw +JAYJKoZIhvcNAQkBFhd3aWxsaWFtLnBqeWVoQGdtYWlsLmNvbTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANN2QBcBQwQO+KJAHj+4dLQeAxw2fC9sFVyr +p6QLRUKnmIyTZmyTTpAQCTVev1TlnAmHllfM/TgYytxHKr+5gQf8fvxxqDXZGnYW +5vmt/pHfHLEoKYv/3AZ2ND23Da6tnTw52BjkQa67rIbBja0vitWNLiy/2I409OAd +Ym2WjFvMntUaXwUnyPaRfBxaCvXk0G0dD2B55sxmaXchxhVwfzmC2l3DPVEbQF/s +9wNQ+msSFGy4qo6W2DQ+5vhqHZeBxbWM7FBJ6H/3T/nckNwh6INOKP3VzroDWQ8x +lnAL2casBaxtGkgm3zvzzkwb2lQ2K2TRIH1/452deuocO17BqFECAwEAAaNQME4w +HQYDVR0OBBYEFOwIll8hcydreGPcM0LtNx6ePw2uMB8GA1UdIwQYMBaAFOwIll8h +cydreGPcM0LtNx6ePw2uMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEB +AIfiL5tpq7Tw8O6R6aM/4vJKe5HwQKeuf/xNuF8SBVX48ZOs62F2BVdnQA9WVpbW +4VfWJ4h04pOGqMLZz4Xpk2tJ9boCrCfl2MFL35wjJ0LM+NGwp+W8qHwqXlHIZY9j +abierovSIQrzDGqy7qPF23t+Xh1uAzOO8Q1nPyG0xp6AV9jyEGWDmsd5uH1/iCDy +uLXrPhaO4brEBE1wtmOqSdoBugCKBfdxVBjLF/n9rXRRu7vqynRQzojn6BVUDj6Z +1QPQYyjsiYKVJ6qARB0hmXEmz60bcShgYeYRmZqxjT/iHXFJGfZljzd16wCeytpj +vDdY2+oB+CiBEDQLsZmDGNI= +-----END CERTIFICATE----- diff --git a/reverse-proxy/certificate.key b/reverse-proxy/certificate.key new file mode 100644 index 0000000..0e2c278 --- /dev/null +++ b/reverse-proxy/certificate.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTdkAXAUMEDvii +QB4/uHS0HgMcNnwvbBVcq6ekC0VCp5iMk2Zsk06QEAk1Xr9U5ZwJh5ZXzP04GMrc +Ryq/uYEH/H78cag12Rp2Fub5rf6R3xyxKCmL/9wGdjQ9tw2urZ08OdgY5EGuu6yG +wY2tL4rVjS4sv9iONPTgHWJtloxbzJ7VGl8FJ8j2kXwcWgr15NBtHQ9geebMZml3 +IcYVcH85gtpdwz1RG0Bf7PcDUPprEhRsuKqOltg0Pub4ah2XgcW1jOxQSeh/90/5 +3JDcIeiDTij91c66A1kPMZZwC9nGrAWsbRpIJt87885MG9pUNitk0SB9f+OdnXrq +HDtewahRAgMBAAECggEBAJ6OJ/gX5hwA7cUatpOoxzjW2BYrdGpKbKoTu5txJ/mW +mPeu/jmjKmEy5PSzmlsZ3yn2FHC5L5UEYfdRFWi0u181oDHBNxNJOmIZQg8su5i8 +5lKffqeOrXfa4cj0nLnBTAiAhtwDKozOA8r3DjgxUbRWK606HDJk2sfAwLDTFWMu +aTITr2yH/S1HwSyYa0a1dRTddLkz9vl3kNc1y1UfDlThzSh+aKDFx1PwgDapjCID +zZG8oAeLZWEf5LVRaEqH/5zqNMRNWF7Lotun9tB8K8ffO87k0QxEtBO2KGPNNffp +BMuCoF3wwZwAM+NfJG0gLJVjRgWh1eXXAeVprcd3eZECgYEA8XMul3+QCpLtx40y +1gnsJvrc+sNyPOtHr19CJKOwB27OFVXszdU2ZJukYHZjRQ+821k56B9+ts8I5Pqm +Ivlr46dFxj+Cjp951pM+6hVwGhRCGJ3JhYylSCMVUY7l/k+jiob4VHGrQ40Pglbx +wv2xRhDWlPjqmPw15dRYw3YtwDsCgYEA4DRybe+0feiVcGUXyXZrAN9I2GBi9K6N +g0YUVq6Gajnw4LvXmLo64QJSBu+8M959gXtM/PfcHGmJ6h/99sNCfKa46u8Sbaw0 +3VMjVoEUJDmvYFeK69YWRqQ/dvGqFrPoTYc7p8PY8h3zfQ4iRx57QnJh+Bopfy/J +8OrxW9qhXOMCgYA0j+mVq3x1ANVourFVbSWOus/+eyvUXTfm0qr4hPXeOF3+wQzj +JkZIEqXvJfaoYyuu6La1dJ3GOD3sczBVrsyw64pCBTNrzwXcE4P/u5kC5GQbLcZg +H2Uf1hte/OfOvBGTJ5zJ7VDmTMASlNDerRG5ehLsrV+M3GLzXI3SR1jgXwKBgEQ4 +8VDujbd6XG609EYDWJANYXcl7TaTHtCBNJMjzBbqxpc70WDR2jS5pggiTtxhqUML +J+QZ78pdeu/ZCEhcJhEUElS6pprkublXIPujj2NugiUIvmcA4Api1ET3SFBcMiXV +LpvgBlW2M28sK9YEOnF/qLv8CgB/pC1A8GyI3G0NAoGANuP+vY9Fp5SWORS2wmJL +hPTS62tpLHs/6leDDSlplpzTg5dURHGTb0TSj4VCoRLrHcm4oQZauh3hbtI9VarH +gnm+92Llj15ZPvGCw3nwea2U79aNQz5+nlWIJKOkzUnRD5boyCguhIxvYT3V4SQ4 +7DKnvYs0kjVXB4UkBNpHplY= +-----END PRIVATE KEY----- diff --git a/reverse-proxy/certificate.pem b/reverse-proxy/certificate.pem new file mode 100644 index 0000000..642e0ab --- /dev/null +++ b/reverse-proxy/certificate.pem @@ -0,0 +1,52 @@ +-----BEGIN CERTIFICATE----- +MIID/TCCAuWgAwIBAgIJALapUPkV3mB6MA0GCSqGSIb3DQEBCwUAMIGUMQswCQYD +VQQGEwJUVzEPMA0GA1UECAwGVGFpcGVpMQ8wDQYDVQQHDAZUYWlwZWkxFDASBgNV +BAoMC1dpbGxpYW0gWWVoMQswCQYDVQQLDAJSRDEYMBYGA1UEAwwPd2lsbGlhbS15 +ZWgubmV0MSYwJAYJKoZIhvcNAQkBFhd3aWxsaWFtLnBqeWVoQGdtYWlsLmNvbTAe +Fw0xNTA1MjExNTE1NTNaFw0xNjA1MjAxNTE1NTNaMIGUMQswCQYDVQQGEwJUVzEP +MA0GA1UECAwGVGFpcGVpMQ8wDQYDVQQHDAZUYWlwZWkxFDASBgNVBAoMC1dpbGxp +YW0gWWVoMQswCQYDVQQLDAJSRDEYMBYGA1UEAwwPd2lsbGlhbS15ZWgubmV0MSYw +JAYJKoZIhvcNAQkBFhd3aWxsaWFtLnBqeWVoQGdtYWlsLmNvbTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANN2QBcBQwQO+KJAHj+4dLQeAxw2fC9sFVyr +p6QLRUKnmIyTZmyTTpAQCTVev1TlnAmHllfM/TgYytxHKr+5gQf8fvxxqDXZGnYW +5vmt/pHfHLEoKYv/3AZ2ND23Da6tnTw52BjkQa67rIbBja0vitWNLiy/2I409OAd +Ym2WjFvMntUaXwUnyPaRfBxaCvXk0G0dD2B55sxmaXchxhVwfzmC2l3DPVEbQF/s +9wNQ+msSFGy4qo6W2DQ+5vhqHZeBxbWM7FBJ6H/3T/nckNwh6INOKP3VzroDWQ8x +lnAL2casBaxtGkgm3zvzzkwb2lQ2K2TRIH1/452deuocO17BqFECAwEAAaNQME4w +HQYDVR0OBBYEFOwIll8hcydreGPcM0LtNx6ePw2uMB8GA1UdIwQYMBaAFOwIll8h +cydreGPcM0LtNx6ePw2uMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEB +AIfiL5tpq7Tw8O6R6aM/4vJKe5HwQKeuf/xNuF8SBVX48ZOs62F2BVdnQA9WVpbW +4VfWJ4h04pOGqMLZz4Xpk2tJ9boCrCfl2MFL35wjJ0LM+NGwp+W8qHwqXlHIZY9j +abierovSIQrzDGqy7qPF23t+Xh1uAzOO8Q1nPyG0xp6AV9jyEGWDmsd5uH1/iCDy +uLXrPhaO4brEBE1wtmOqSdoBugCKBfdxVBjLF/n9rXRRu7vqynRQzojn6BVUDj6Z +1QPQYyjsiYKVJ6qARB0hmXEmz60bcShgYeYRmZqxjT/iHXFJGfZljzd16wCeytpj +vDdY2+oB+CiBEDQLsZmDGNI= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTdkAXAUMEDvii +QB4/uHS0HgMcNnwvbBVcq6ekC0VCp5iMk2Zsk06QEAk1Xr9U5ZwJh5ZXzP04GMrc +Ryq/uYEH/H78cag12Rp2Fub5rf6R3xyxKCmL/9wGdjQ9tw2urZ08OdgY5EGuu6yG +wY2tL4rVjS4sv9iONPTgHWJtloxbzJ7VGl8FJ8j2kXwcWgr15NBtHQ9geebMZml3 +IcYVcH85gtpdwz1RG0Bf7PcDUPprEhRsuKqOltg0Pub4ah2XgcW1jOxQSeh/90/5 +3JDcIeiDTij91c66A1kPMZZwC9nGrAWsbRpIJt87885MG9pUNitk0SB9f+OdnXrq +HDtewahRAgMBAAECggEBAJ6OJ/gX5hwA7cUatpOoxzjW2BYrdGpKbKoTu5txJ/mW +mPeu/jmjKmEy5PSzmlsZ3yn2FHC5L5UEYfdRFWi0u181oDHBNxNJOmIZQg8su5i8 +5lKffqeOrXfa4cj0nLnBTAiAhtwDKozOA8r3DjgxUbRWK606HDJk2sfAwLDTFWMu +aTITr2yH/S1HwSyYa0a1dRTddLkz9vl3kNc1y1UfDlThzSh+aKDFx1PwgDapjCID +zZG8oAeLZWEf5LVRaEqH/5zqNMRNWF7Lotun9tB8K8ffO87k0QxEtBO2KGPNNffp +BMuCoF3wwZwAM+NfJG0gLJVjRgWh1eXXAeVprcd3eZECgYEA8XMul3+QCpLtx40y +1gnsJvrc+sNyPOtHr19CJKOwB27OFVXszdU2ZJukYHZjRQ+821k56B9+ts8I5Pqm +Ivlr46dFxj+Cjp951pM+6hVwGhRCGJ3JhYylSCMVUY7l/k+jiob4VHGrQ40Pglbx +wv2xRhDWlPjqmPw15dRYw3YtwDsCgYEA4DRybe+0feiVcGUXyXZrAN9I2GBi9K6N +g0YUVq6Gajnw4LvXmLo64QJSBu+8M959gXtM/PfcHGmJ6h/99sNCfKa46u8Sbaw0 +3VMjVoEUJDmvYFeK69YWRqQ/dvGqFrPoTYc7p8PY8h3zfQ4iRx57QnJh+Bopfy/J +8OrxW9qhXOMCgYA0j+mVq3x1ANVourFVbSWOus/+eyvUXTfm0qr4hPXeOF3+wQzj +JkZIEqXvJfaoYyuu6La1dJ3GOD3sczBVrsyw64pCBTNrzwXcE4P/u5kC5GQbLcZg +H2Uf1hte/OfOvBGTJ5zJ7VDmTMASlNDerRG5ehLsrV+M3GLzXI3SR1jgXwKBgEQ4 +8VDujbd6XG609EYDWJANYXcl7TaTHtCBNJMjzBbqxpc70WDR2jS5pggiTtxhqUML +J+QZ78pdeu/ZCEhcJhEUElS6pprkublXIPujj2NugiUIvmcA4Api1ET3SFBcMiXV +LpvgBlW2M28sK9YEOnF/qLv8CgB/pC1A8GyI3G0NAoGANuP+vY9Fp5SWORS2wmJL +hPTS62tpLHs/6leDDSlplpzTg5dURHGTb0TSj4VCoRLrHcm4oQZauh3hbtI9VarH +gnm+92Llj15ZPvGCw3nwea2U79aNQz5+nlWIJKOkzUnRD5boyCguhIxvYT3V4SQ4 +7DKnvYs0kjVXB4UkBNpHplY= +-----END PRIVATE KEY----- diff --git a/reverse-proxy/docker-compose-2.yml b/reverse-proxy/docker-compose-2.yml new file mode 100644 index 0000000..489e4c2 --- /dev/null +++ b/reverse-proxy/docker-compose-2.yml @@ -0,0 +1,43 @@ +# +# Zero-downtime deployment. +# +# Feel free to stop/start individual appN instances, +# and see how HAProxy reacts. +# + +haproxy: + image: haproxy:1.5.12 + restart: always + volumes: + - ./haproxy-2.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro + - ./certificate.pem:/usr/local/etc/haproxy/certificate.pem:ro + net: "host" + + +app1: + build: app + ports: + - "3001:3000" + links: + - redisserver + environment: + - REDIS_HOST=redisserver + +app2: + build: app + ports: + - "3002:3000" + links: + - redisserver + environment: + - REDIS_HOST=redisserver + + +redisserver: + image: williamyeh/redis:2.8.19 + restart: always + volumes: + - .:/data + - ./redis.conf:/etc/redis/redis.conf:ro + command: + - start diff --git a/reverse-proxy/docker-compose.yml b/reverse-proxy/docker-compose.yml new file mode 100644 index 0000000..9360898 --- /dev/null +++ b/reverse-proxy/docker-compose.yml @@ -0,0 +1,51 @@ +nginx: + image: nginx:1.9.0 + restart: always + volumes: + - .:/etc/nginx:ro + ports: + - "10080:80" + - "10443:443" + links: + - app1:node1 + - app2:node2 + + +haproxy: + image: haproxy:1.5.12 + restart: always + volumes: + - .:/usr/local/etc/haproxy:ro + ports: + - "10090:80" + - "10091:443" + - "10100:1936" + links: + - app1:node1 + - app2:node2 + + +app1: + build: app + links: + - redisserver + environment: + - REDIS_HOST=redisserver + + +app2: + build: app + links: + - redisserver + environment: + - REDIS_HOST=redisserver + + +redisserver: + image: williamyeh/redis:2.8.19 + restart: always + volumes: + - .:/data + - ./redis.conf:/etc/redis/redis.conf:ro + command: + - start diff --git a/reverse-proxy/haproxy-2.cfg b/reverse-proxy/haproxy-2.cfg new file mode 100644 index 0000000..de440e4 --- /dev/null +++ b/reverse-proxy/haproxy-2.cfg @@ -0,0 +1,55 @@ +global + maxconn 4096 + nbproc 2 + #debug + #daemon # should be disabled in Docker + log 127.0.0.1 local0 + tune.ssl.default-dh-param 2048 + + +defaults + mode http + option httplog + log global + retries 3 + option redispatch + timeout client 30s + timeout connect 30s + timeout server 30s + + +frontend access_http + bind *:80 + mode http + reqadd X-Forwarded-Proto:\ http + default_backend node-app + + +frontend access_https + bind *:443 ssl no-sslv3 crt /usr/local/etc/haproxy/certificate.pem + mode http + option httpclose + option forwardfor + reqadd X-Forwarded-Proto:\ https + default_backend node-app + + +backend node-app + mode http + balance roundrobin + option forwardfor + http-request set-header X-Forwarded-Port %[dst_port] + http-request add-header X-Forwarded-Proto https if { ssl_fc } + option httpchk HEAD /health HTTP/1.1\r\nHost:localhost + server server1 127.0.0.1:3001 check + server server2 127.0.0.1:3002 check + #server server1 node1:3000 cookie server1 weight 1 maxconn 1024 check + #server server2 node2:3000 cookie server2 weight 1 maxconn 1024 check + + +listen stats *:10100 +#listen stats *:1936 + stats enable + stats uri / + #stats hide-version + stats refresh 5s diff --git a/reverse-proxy/haproxy.cfg b/reverse-proxy/haproxy.cfg new file mode 100644 index 0000000..0f92670 --- /dev/null +++ b/reverse-proxy/haproxy.cfg @@ -0,0 +1,54 @@ +global + maxconn 4096 + nbproc 2 + #debug + #daemon # should be disabled in Docker + log 127.0.0.1 local0 + tune.ssl.default-dh-param 2048 + + +defaults + mode http + option httplog + log global + retries 3 + option redispatch + timeout connect 5000 + timeout client 50000 + timeout server 50000 + + +frontend access_http + bind *:80 + mode http + reqadd X-Forwarded-Proto:\ http + default_backend node-app + + +frontend access_https + bind *:443 ssl no-sslv3 crt /usr/local/etc/haproxy/certificate.pem + mode http + option httpclose + option forwardfor + reqadd X-Forwarded-Proto:\ https + default_backend node-app + + +backend node-app + mode http + balance roundrobin + option forwardfor + http-request set-header X-Forwarded-Port %[dst_port] + http-request add-header X-Forwarded-Proto https if { ssl_fc } + option httpchk HEAD /health HTTP/1.1\r\nHost:localhost + server server1 node1:3000 check + server server2 node2:3000 check + #server server1 node1:3000 cookie server1 weight 1 maxconn 1024 check + #server server2 node2:3000 cookie server2 weight 1 maxconn 1024 check + + +listen stats *:1936 + stats enable + stats uri / + #stats hide-version + stats refresh 5s diff --git a/reverse-proxy/make-cert.sh b/reverse-proxy/make-cert.sh new file mode 100755 index 0000000..39ce984 --- /dev/null +++ b/reverse-proxy/make-cert.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +openssl req -new -x509 -sha256 -newkey rsa:2048 -days 365 -nodes -out ./certificate.crt -keyout ./certificate.key + +cat certificate.crt certificate.key > certificate.pem diff --git a/reverse-proxy/nginx.conf b/reverse-proxy/nginx.conf new file mode 100644 index 0000000..921b7f0 --- /dev/null +++ b/reverse-proxy/nginx.conf @@ -0,0 +1,43 @@ +worker_processes auto; + +events { worker_connections 1024; } + +http { + + upstream node-app { + least_conn; + zone backend 64k; + server node1:3000 weight=10 max_fails=3 fail_timeout=30s; + server node2:3000 weight=10 max_fails=3 fail_timeout=30s; + } + + server_tokens off; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + server { + listen 80; + listen 443 ssl; + + ssl_certificate /etc/nginx/certificate.crt; + ssl_certificate_key /etc/nginx/certificate.key; + + location / { + proxy_pass http://node-app; + #health_check; + + proxy_http_version 1.1; + proxy_set_header HOST $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + } + } +} diff --git a/reverse-proxy/pull-images.sh b/reverse-proxy/pull-images.sh new file mode 100755 index 0000000..d13e7bb --- /dev/null +++ b/reverse-proxy/pull-images.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +DOCKER pull nginx:1.9.0 +DOCKER pull haproxy:1.5.12 +DOCKER pull node:0.10.36-onbuild +DOCKER pull williamyeh/redis:2.8.19 diff --git a/reverse-proxy/redis.conf b/reverse-proxy/redis.conf new file mode 100644 index 0000000..c3ee534 --- /dev/null +++ b/reverse-proxy/redis.conf @@ -0,0 +1,10 @@ + +# Working directory for "dump.rdb" and "appendonly.aof" +dir /data/ + + +save 10 5 + + +appendonly yes +appendfsync everysec diff --git a/reverse-proxy/reverse-proxy.png b/reverse-proxy/reverse-proxy.png new file mode 100644 index 0000000..23cfe6b Binary files /dev/null and b/reverse-proxy/reverse-proxy.png differ diff --git a/setup-vagrant b/setup-vagrant index 13049f1..6a004b8 100755 --- a/setup-vagrant +++ b/setup-vagrant @@ -1,6 +1,6 @@ #!/bin/bash -readonly HOSTS=( "main" "alice" "bob" "centos" "registry" ) +readonly HOSTS=( "main" "centos" "registry" ) for i in "${HOSTS[@]}"; do NAME=$i diff --git a/setup-vagrant.bat b/setup-vagrant.bat index 21d4b97..2ad0f95 100755 --- a/setup-vagrant.bat +++ b/setup-vagrant.bat @@ -1,6 +1,6 @@ @ECHO OFF -set HOSTS=( main alice bob centos registry ) +set HOSTS=( main centos registry ) for %%i in %HOSTS% do ( vagrant up --provision %%i diff --git a/vagrant-tutorial/Vagrantfile b/vagrant-tutorial/Vagrantfile index 98de551..cadf79c 100644 --- a/vagrant-tutorial/Vagrantfile +++ b/vagrant-tutorial/Vagrantfile @@ -1,6 +1,14 @@ Vagrant.configure(2) do |config| - #config.vm.box = "ubuntu/trusty64" config.vm.box = "williamyeh/ubuntu-trusty64-docker" + #config.vm.box = "ubuntu/trusty64" + #config.vm.box = "ubuntu/precise64" + #config.vm.box = "bento/centos-7.1" + #config.vm.box = "bento/centos-6.7" + #config.vm.box = "debian/jessie64" + #config.vm.box = "yungsang/coreos" + #config.vm.box = "yungsang/coreos-alpha" + #config.vm.box = "yungsang/coreos-beta" + end