Image from https://tinyurl.com/q5hdmyu
Image from techglimpse.com
$ docker image list
$ docker pull alpine:latest
$ docker run --rm -id alpine:latest
6b30c015598eeaab0ab10f0849366f5c5f17090300dc64b6976f442d2fe1e516
$ docker ps
$ docker exec -it 6b30c015598e ash
/ # apk update
/ # apk add bash
/ # logout
$ docker diff 6b30c015598e
$ docker commit 6b30c015598e alpine:bash
$ docker run --rm -it alpine:bash bash
bash-4.4# ps -ef
$ lxc image list images: alpine/3.9
$ lxc launch -e images:alpine/3.9
$ lxc list
$ lxc shell unbiased-elk
# / apk update
# / apk add bash
# / logout
$ lxc publish unbiased-elk --alias alpine/3.9/bash --force
$ lxc launch alpine/3.9/bash –e
$ lxc exec equal-cricket bash
bash-4.4# ps -ef
$ singularity pull library://alpine
$ singularity shell library://alpine
Singularity> apk update
ERROR: Unable to lock database: Read-only file system
ERROR: Failed to open apk database: Read-only file system
$ singularity build --sandbox alpine-local library://alpine
$ sudo singularity shell --writable alpine-local/
Singularity alpine-local:~> apk update
Singularity alpine-local:~> apk add bash
Singularity alpine-local:~>
$ singularity build alpine-bash.sif alpine-local
$ singularity exec alpine-bash.sif bash
hpcadmintech2019@hpcadmintech2019demo:~$ unshare --mount --uts --ipc --net --user
--map-root-user --fork --pid bash
root@hpcadmintech2019demo:~#
root@hpcadmintech2019demo:~# echo $$
1
root@hpcadmintech2019demo:~# ip link
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen
1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
root@hpcadmintech2019demo:~# hostname mycontainer
root@hpcadmintech2019demo:~# exec bash
root@mycontainer:~# umount /
umount: /: not mounted.
root@mycontainer:~# id
uid=0(root) gid=0(root) groups=0(root),65534(nogroup)
root@mycontainer:~# ps -ef
UID PID PPID C STIME TTY TIME CMD
nobody 1 0 0 12:38 ? 00:00:01 /sbin/init maybe-ubiquity
nobody 2 0 0 12:38 ? 00:00:00 [kthreadd]
nobody 4 2 0 12:38 ? 00:00:00 [kworker/0:0H]
...
root 1472 1471 0 13:32 pts/0 00:00:00 -bash
root 1505 1472 0 13:34 pts/0 00:00:00 unshare --mount --uts --ipc --net
root 1507 1505 0 13:34 pts/0 00:00:00 bash
nobody 1527 2 0 13:44 ? 00:00:00 [kworker/u2:0]
root 1529 1507 0 13:48 pts/0 00:00:00 ps -ef
root@mycontainer:~#
root@mycontainer:~# kill -9 $(pidof unshare)
bash: kill: (1505) - No such process
root@mycontainer:~# mount -t proc none /proc
root@mycontainer:~# ps -ef
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 13:34 pts/0 00:00:00 bash
root 38 1 0 13:57 pts/0 00:00:00 ps -ef
root@mycontainer:~# ls -l /proc/
total 0
dr-xr-xr-x 9 root root 0 may 7 13:57 1
dr-xr-xr-x 9 root root 0 may 7 13:57 41
dr-xr-xr-x 2 nobody nogroup 0 may 7 13:57 acpi
dr-xr-xr-x 5 nobody nogroup 0 may 7 13:57 asound
...
root@mycontainer:~# docker save alpine -o alpine-image.tar
root@mycontainer:~# mkdir alpine-image
root@mycontainer:~# tar xf alpine-image.tar -C alpine-image
root@mycontainer:~# mkdir -p mycontainer/alpinefs
root@mycontainer:~# tar xf alpine-image/2904c2b59f0e40c1537b1a4ccaf72f4983988e8ad9
f3aea0af36f6bb8d177ead/layer.tar -C mycontainer/alpinefs --no-same-owner
root@mycontainer:~# ls -l mycontainer/alpinefs/
root@mycontainer:~# mkdir -p mycontainer/layer mycontainer/work mycontainer/fs
root@mycontainer:~# mount -t overlay -o lowerdir=mycontainer/alpinefs,upperdir=mycont
ainer/layer,workdir=mycontainer/work overlay mycontainer/fs
root@mycontainer:~# ls mycontainer/fs
bin etc lib mnt proc run srv tmp var
dev home media opt root sbin sys usr
root@mycontainer:~# cd mycontainer/fs
root@mycontainer:~/mycontainer/fs# mkdir -p mnt/.original-root
root@mycontainer:~/mycontainer/fs# pivot_root . mnt/.original-root
root@mycontainer:~/mycontainer/fs# exec chroot / ash
/ # ps -ef
PID USER TIME COMMAND
/ # mount
mount: no /proc/mounts
/ # mount -t proc none /proc
/ # mount -t sysfs none /sys
/ # mount -t tmpfs none /tmp
/ # ps -ef
PID USER TIME COMMAND
1 root 0:02 ash
163 root 0:00 ps -ef
/ # mount
/dev/sda2 on /mnt/.original-root type ext4 (rw,relatime,data=ordered)
udev on /mnt/.original-root/dev type devtmpfs (rw,nosuid,relatime,size=473936k,nr_inodes=
118484,mode=755)
...
none on /mnt/.original-root/proc type proc (rw,relatime)
overlay on / type overlay (rw,relatime,lowerdir=mycontainer/alpinefs,upperdir=mycontainer
none on /proc type proc (rw,relatime)
/ # umount -l mnt/.original-root
/ # rmdir mnt/.original-root
/ # mount
overlay on / type overlay (rw,relatime,lowerdir=mycontainer/alpinefs,upperdir=mycontainer
/layer,workdir=mycontainer/work)
none on /proc type proc (rw,relatime)
none on /sys type sysfs (rw,relatime)
none on /tmp type tmpfs (rw,relatime,uid=1003,gid=1003)
/ #
/ # ip link
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
/ # ping -c 1 localhost
PING localhost (127.0.0.1): 56 data bytes
ping: sendto: Network unreachable
/ # ip link set dev lo up
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
/ # ping -c 1 localhost
PING localhost (127.0.0.1): 56 data bytes
64 bytes from 127.0.0.1: seq=0 ttl=64 time=0.285 ms
--- localhost ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 0.285/0.285/0.285 ms
hpcadmintech2019@hpcadmintech2019demo:~$ pidof unshare
1306
hpcadmintech2019@hpcadmintech2019demo:~$ sudo ip link add name host-1306 type veth peer
name cont-1306
hpcadmintech2019@hpcadmintech2019demo:~$ sudo ip link set dev cont-1306 netns 1306
hpcadmintech2019@hpcadmintech2019demo:~$ ip addr show dev docker0
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group
default
link/ether 02:42:85:f0:62:de brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:85ff:fef0:62de/64 scope link
valid_lft forever preferred_lft forever
hpcadmintech2019@hpcadmintech2019demo:~$ sudo ip link set dev host-1306 master docker0 up
/ # ip link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
5: cont-1306@if6: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN qlen 1000
link/ether b6:3c:fe:4c:c7:85 brd ff:ff:ff:ff:ff:ff
/ # ip link set dev cont-1306 name eth0 up
/ # echo "1" > /proc/sys/net/ipv6/conf/all/disable_ipv6
/ # echo "1" > /proc/sys/net/ipv6/conf/default/disable_ipv6
/ # ip addr add 172.17.10.2/16 dev eth0
/ # ip a show eth0
5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
qlen 1000
link/ether b6:3c:fe:4c:c7:85 brd ff:ff:ff:ff:ff:ff
inet 172.17.10.2/16 scope global eth0
valid_lft forever preferred_lft forever
/ # ip route add default via 172.17.0.1 dev eth0 onlink
/ # echo "nameserver 8.8.8.8" >> /etc/resolv.conf
/ # ping -c 1 www.google.es
PING www.google.es (172.217.168.163): 56 data bytes
64 bytes from 172.217.168.163: seq=0 ttl=61 time=9.623 ms
--- www.google.es ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 9.623/9.623/9.623 ms
/ # apk update
...
OK: 9766 distinct packages available
/ # apk add bash
(1/5) Installing ncurses-terminfo-base (6.1_p20190105-r0)
...
(5/5) Installing bash (4.4.19-r1)
Executing bash-4.4.19-r1.post-install
Executing busybox-1.29.3-r10.trigger
OK: 14 MiB in 19 packages
/ # exec bash
bash-4.4#
bash-4.4# apk add python
...
(6/6) Installing python2 (2.7.16-r1)
Executing busybox-1.29.3-r10.trigger
OK: 54 MiB in 25 packages
bash-4.4# apk add vim
(1/2) Installing lua5.3-libs (5.3.5-r2)
(2/2) Installing vim (8.1.0630-r0)
Executing busybox-1.29.3-r10.trigger
OK: 81 MiB in 27 packages
hpcadmintech2019@hpcadmintech2019demo:~$ sudo su -
root@hpcadmintech2019demo:~# mkdir /sys/fs/cgroup/memory/mycontainer
root@hpcadmintech2019demo:~# ls /sys/fs/cgroup/memory/mycontainer/
cgroup.clone_children memory.limit_in_bytes
cgroup.event_control memory.max_usage_in_bytes
...
memory.kmem.tcp.max_usage_in_bytes notify_on_release
memory.kmem.tcp.usage_in_bytes tasks
memory.kmem.usage_in_bytes
root@hpcadmintech2019demo:~# echo “1306” > /sys/fs/cgroup/memory/mycontainer/tasks
root@hpcadmintech2019demo:~# cat /sys/fs/cgroup/memory/mycontainer/memory.usage_in_bytes
835584
root@hpcadmintech2019demo:~# echo "$((1024*1024*50))" > /sys/fs/cgroup/memory/mycontainer/me
mory.limit_in_bytes
root@hpcadmintech2019demo:~# echo "0" > /sys/fs/cgroup/memory/mycontainer/memory.swappiness
root@hpcadmintech2019demo:~# watch -n 1 cat /sys/fs/cgroup/memory/mycontainer/memory.usage_
in_bytes
Every 1,0s: cat /sys/fs/cgroup/... hpcadmintech2019demo: Fri May 10 11:52:07 2019
835584
bash-4.4# cd /tmp/
bash-4.4# cat > memusage.py <<EOT
data = ""
while True:
for f in range(0, 1024*1024):
data = data + str(f % 10)
print len(data) / 1024 / 1024, "mb."
EOT
bash-4.4# python memusage.py
1 mb.
2 mb.
3 mb.
4 mb.
...
Every 1,0s: cat /sys/fs/cgroup/me...
hpcadmintech2019demo: Fri May 10 11:56:37
2019
46899200
bash-4.4# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536
qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd
00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft
forever
5: eth0@if6:
<BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu
1500 qdisc noqueue state UP qlen 1000
link/ether b6:3c:fe:4c:c7:85 brd
ff:ff:ff:ff:ff:ff
inet 172.17.10.2/32 scope global eth0
valid_lft forever preferred_lft
forever
hpcadmintech2019@hpcadmintech2019demo:~$
docker run --rm -it alpine ash
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536
qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd
00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft
forever
11: eth0@if12:
<BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN>
mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:11:00:02 brd
ff:ff:ff:ff:ff:ff
inet 172.17.0.2/16 brd 172.17.255.255
scope global eth0
valid_lft forever preferred_lft
forever
$ sudo service slurmctld restart
$ sudo service slurmd restart
$ sudo service slurmdbd restart
$ sinfo
PARTITION AVAIL TIMELIMIT NODES STATE NODELIST
debug* up infinite 1 idle localhost
$ tail /usr/local/etc/slurm.conf
# COMPUTE NODES
NodeName=localhost CPUs=2 State=UNKNOWN
PartitionName=debug Nodes=localhost Default=YES MaxTime=INFINITE State=UP
$ srun -n 2 hostname
hpcadmintech2019demo
Hpcadmintech2019demo
$ srun -n 2 docker run --rm -i alpine hostname
9b49fdaf17c6
4aaf8bb64623
$ srun -n 2 singularity exec alpine_latest.sif hostname
INFO: Convert SIF file to sandbox...
INFO: Convert SIF file to sandbox...
hpcadmintech2019demo
INFO: Cleaning up image...
hpcadmintech2019demo
INFO: Cleaning up image...
$ srun -n 2 ps -ef
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 08:49 ? 00:00:12 /sbin/init maybe-ubiquity
...
$ srun -n 2 docker run --rm alpine ps –ef
PID USER TIME COMMAND
1 root 0:00 ps -ef
PID USER TIME COMMAND
1 root 0:00 ps –ef
$ srun -n 2 singularity exec alpine_latest.sif ps -ef
PID USER TIME COMMAND
1 nobody 0:12 {systemd} /sbin/init maybe-ubiquity
...
1278 hpcadmin 0:00 /lib/systemd/systemd --user
$ sbatch -n 2 ej1.sh
Submitted batch job 34
$ squeue
JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)
34 debug ej1.sh hpcadmin R 0:03 1 localhost
$ cat slurm-34.out
mié may 22 10:10:13 UTC 2019
hpcadmintech2019demo
mié may 22 10:10:23 UTC 2019 #!/bin/sh
date
hostname
sleep 10
date
Fichero: ej1.sh
$ sbatch ej1.docker.sh
Submitted batch job 36
$ squeue
JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)
36 debug ej1.dock hpcadmin R 0:02 1 localhost
$ cat slurm-36.out
Wed May 22 10:15:24 UTC 2019
1cbad0cacba3
Wed May 22 10:15:34 UTC 2019
#!/bin/sh
docker run --rm -i alpine ash -s <<EOT
date
hostname
sleep 10
date
EOT
Fichero: ej1.docker.sh
$ sbatch ej2.sh
Submitted batch job 38
$ cat slurm-38.out
Ubuntu 18.04.2 LTS n l
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 08:49 ? 00:00:12 /sbin/init maybe-ubiquity
root 2 0 0 08:49 ? 00:00:00 [kthreadd]
...
#!/bin/sh
cat /etc/issue
ps -ef
Fichero: ej2.sh
$ sbatch ej2.docker.sh
Submitted batch job 39
$ cat slurm-39.out
Welcome to Alpine Linux 3.9
Kernel r on an m (l)
PID USER TIME COMMAND
1 root 0:00 ash -s
7 root 0:00 ps -ef
#!/bin/sh
docker run --rm -i alpine ash -s <<EOT
cat /etc/issue
ps -ef
EOT
Fichero: ej2.docker.sh
$ sbatch -n 2 ej3.sh
Submitted batch job 40
$ cat slurm-40.out
mié may 22 10:23:30 UTC 2019
hpcadmintech2019demo
hpcadmintech2019demo
mié may 22 10:23:30 UTC 2019 #!/bin/sh
date
srun hostname
date
Fichero: ej3.sh
$ sbatch -n 2 ej3.docker.sh
Submitted batch job 41
hpcadmintech2019@hpcadmintech2019demo:~$ cat slurm-41.out
mié may 22 10:26:33 UTC 2019
556d7a5e3822
577354d5dd0d
mié may 22 10:26:34 UTC 2019 #!/bin/sh
date
srun docker run --rm -i alpine hostname
date
Fichero: ej3.docker.sh
$ sbatch ej4.sh
Submitted batch job 43
$ ps -ef | grep sleep
hpcadmi+ 8040 8039 1 10:30 ? 00:00:00 srun sleep 360
hpcadmi+ 8043 8040 0 10:30 ? 00:00:00 srun sleep 360
hpcadmi+ 8055 8050 0 10:30 ? 00:00:00 /bin/sleep 360
hpcadmi+ 8057 6057 0 10:30 pts/0 00:00:00 grep --color=auto sleep
#!/bin/sh
srun sleep 360
Fichero: ej4.sh
$ ps -ef | grep sleep
hpcadmi+ 8988 8987 0 10:48 ? 00:00:00 srun docker run --rm -i alpine sleep 360
hpcadmi+ 8991 8988 0 10:48 ? 00:00:00 srun docker run --rm -i alpine sleep 360
hpcadmi+ 9003 8998 1 10:48 ? 00:00:00 /usr/bin/docker run --rm -i alpine sleep 360
root 9044 9019 1 10:48 ? 00:00:00 sleep 360
hpcadmi+ 9091 6057 0 10:48 pts/0 00:00:00 grep --color=auto sleep
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS
PORTS NAMES
67237262d464 alpine "sleep 360" 6 seconds ago Up 5
seconds agitated_hamilton
#!/bin/sh
srun docker run --rm -i alpine sleep 360
Fichero: ej3.docker.sh
$ srun id
uid=1003(hpcadmintech2019) gid=1003(hpcadmintech2019)
groups=1003(hpcadmintech2019),4(adm),24(cdrom),27(sudo),...
$ srun docker run --rm -i alpine id
uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),4(adm),...
$ srun singularity exec alpine_latest.sif id
INFO: Convert SIF file to sandbox...
uid=1003(hpcadmintech2019) gid=1003(hpcadmintech2019)
groups=65534(nobody),65534(nobody),65534(nobody),...,1003(hpcadmintech2019)
INFO: Cleaning up image...
$ srun docker run --rm -i -u 1003:1003 alpine id
uid=1003 gid=1003
$ srun docker run --rm -i -u 1003:1003 alpine ash -c 'ls -ld $HOME'
drwxr-xr-x 1 root root 4096 May 22 10:59 /
$ srun docker run --rm -i -u 1003:1003 -v /etc/passwd:/etc/passwd:ro alpine ash -c 'ls -
ld $HOME'
ls: /home/hpcadmintech2019: No such file or directory
srun: error: localhost: task 0: Exited with exit code 1
$ srun docker run --rm -i -u 1003:1003 -v /etc/passwd:/etc/passwd:ro -v $HOME:$HOME
alpine ash -c 'ls -ld $HOME'
drwxr-xr-x 10 hpcadmin 1003 4096 May 22 10:48 /home/hpcadmintech2019
#!/bin/bash
SDOCKER_IMAGE=${SDOCKER_IMAGE:-alpine}
srun docker run --rm -i -u $(id -u):$(id -g) -v /etc/passwd:/etc/passwd:ro -v
/etc/group:/etc/group:ro -v $HOME:$HOME --workdir $HOME $SDOCKER_IMAGE "$@"
$ SDOCKER_IMAGE=alpine ./sdocker python
$ cat > Dockerfile <<EOT
FROM alpine
RUN apk update && apk add python coreutils
EOT
$ docker build . –t alpine:python
data = ""
while True:
for f in range(0, 1024*1024):
data = data + str(f % 10)
print len(data) / 1024 / 1024, "mb."
$ SDOCKER_IMAGE=alpine:python ./sdocker stdbuf -i0 -o0 -e0 python memusage.py
$ SDOCKER_IMAGE=alpine ./sdocker octave
$ cat > Dockerfile <<EOT
FROM ubuntu
RUN apt update && apt –y install --no-install-recommends octave
EOT
$ docker build . –t ubuntu:octave
m=100
n=100
args=argv();
if (nargin>0) m=str2num(args{1}); endif
if (nargin>1) n=str2num(args{2}); endif
A = rand(m,n)
tic(); B1=A*A; toc();
tic(); B=A'*A; toc();
tic(); C=B*inv(B); toc();
$ SDOCKER_IMAGE=ubuntu:octave ./sdocker stdbuf -i0 -o0 -e0 octave app.m
#!/bin/bash
SDOCKER_IMAGE=ubuntu:octave ./sdocker octave app.m 10000 1000
$ sbatch octave.sh
$ ps -ef | grep octave
hpcadmi+ 28601 28600 0 22:06 ? 00:00:00 /bin/bash ./sdocker octave app.m 1000 1000
...
hpcadmi+ 28663 28638 65 22:06 ? 00:00:02 /usr/bin/octave-cli app.m 1000 1000
$ docker run --rm -v /etc:/etc -it alpine ash
/ $ adduser mynewroot -G root
...
/ $ exit $ docker run --privileged alpine ash -c 'echo 1 > /proc/sys/kernel/sysrq;
echo o > /proc/sysrq-trigger'
$ cat /bin/dosh
#!/bin/bash
sudo /bin/shell2docker "$@"
# useradd user1
...
# su – user1
$ docker ps
Got permission denied while trying to connect to the Docker daemon socket ...
/var/run/docker.sock: connect: permission denied
# usermod user1 –s /bin/dosh
# su - user1
user1@hpcadmintech2019demo:~$ # su – user1
# docker ps
CONTAINER ID IMAGE COMMAND CREATED
STATUS PORTS NAMES
5bc7e3afe23f ubuntu:latest "bash" 23 minutes ago Up
23 minutes dosh-user1
FROM ubuntu
RUN apt-get update && apt-get install -y cowsay
ENTRYPOINT ["/usr/games/cowsay"]
$ docker build . -t hpcadmintech:cow
...
$ docker run --rm -it hpcadmintech:cow i am a cow in a fat container
_______________________________
< i am a cow in a fat container >
-------------------------------
 ^__^
 (oo)_______
(__) )/
||----w |
$ minidock -i hpcadmintech:cow -t hpcadmintech:minicow --apt -- i am a cow
$ docker images hpcadmintech
REPOSITORY TAG IMAGE ID CREATED SIZE
hpcadmintech cow ca38bcceb4d6 3 minutes ago 168MB
hpcadmintech minicow a2dfb4c63fbb 2 minutes ago 6.89MB
FROM ubuntu:latest
RUN apt-get update && apt-get install -y ssh iproute2 iputils-ping wget
$ docker build . -t hpcadmintech:ui
...
$ docker run --rm -it hpcadmintech:ui
root@d2a99656917d:/# ls -l /bin/
$ minidock -i hpcadmintech:ui -t hpcadmintech:miniui --apt -E bash -E 'ssh
localhost' -E ip -E id -E cat -E ls -E mkdir -E 'ping -c 1 www.google.es' -- wget
www.google.es
$ docker images hpcadmintech
REPOSITORY TAG IMAGE ID CREATED SIZE
hpcadmintech miniui c8fcb68f7b74 6 seconds ago 16.3MB
hpcadmintech ui 9937bb48bf32 3 minutes ago 244MB
$ docker run --rm -it hpcadmintech:miniui
bash-4.4# ls -l /bin/
@minicon analiza un sistema de ficheros y
elimina aquellos ficheros que no son
imprescindibles para ejecutar las aplicaciones
que necesitamos.
@dosh es un shell que permite proporcionar
sistemas personalizados para los usuarios, y
dándoles sólo los recursos que van a poder
utilizar.
La clave para que los usuarios puedan usar
@docker es no dejarles usar @docker (de
forma arbitraria)
Es posible lanzar cargas en @slurm de
forma que se ejecuten dentro de
contenedores @docker (utilizando las
credenciales del usuario), siempre que se
hagan los ajustes necesarios.
Los sistemas como @docker, @lxc o
@singularity utilizan llamadas al kernel de
Linux para crear los contenedores, y cada uno
ofrece características particulares.
Containers for sysadmins

Containers for sysadmins

  • 4.
  • 6.
  • 12.
    $ docker imagelist $ docker pull alpine:latest $ docker run --rm -id alpine:latest 6b30c015598eeaab0ab10f0849366f5c5f17090300dc64b6976f442d2fe1e516 $ docker ps $ docker exec -it 6b30c015598e ash / # apk update / # apk add bash / # logout $ docker diff 6b30c015598e $ docker commit 6b30c015598e alpine:bash $ docker run --rm -it alpine:bash bash bash-4.4# ps -ef
  • 17.
    $ lxc imagelist images: alpine/3.9 $ lxc launch -e images:alpine/3.9 $ lxc list $ lxc shell unbiased-elk # / apk update # / apk add bash # / logout $ lxc publish unbiased-elk --alias alpine/3.9/bash --force $ lxc launch alpine/3.9/bash –e $ lxc exec equal-cricket bash bash-4.4# ps -ef
  • 20.
    $ singularity pulllibrary://alpine $ singularity shell library://alpine Singularity> apk update ERROR: Unable to lock database: Read-only file system ERROR: Failed to open apk database: Read-only file system $ singularity build --sandbox alpine-local library://alpine $ sudo singularity shell --writable alpine-local/ Singularity alpine-local:~> apk update Singularity alpine-local:~> apk add bash Singularity alpine-local:~> $ singularity build alpine-bash.sif alpine-local $ singularity exec alpine-bash.sif bash
  • 26.
    hpcadmintech2019@hpcadmintech2019demo:~$ unshare --mount--uts --ipc --net --user --map-root-user --fork --pid bash root@hpcadmintech2019demo:~#
  • 27.
    root@hpcadmintech2019demo:~# echo $$ 1 root@hpcadmintech2019demo:~#ip link 1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 root@hpcadmintech2019demo:~# hostname mycontainer root@hpcadmintech2019demo:~# exec bash root@mycontainer:~# umount / umount: /: not mounted.
  • 28.
    root@mycontainer:~# id uid=0(root) gid=0(root)groups=0(root),65534(nogroup) root@mycontainer:~# ps -ef UID PID PPID C STIME TTY TIME CMD nobody 1 0 0 12:38 ? 00:00:01 /sbin/init maybe-ubiquity nobody 2 0 0 12:38 ? 00:00:00 [kthreadd] nobody 4 2 0 12:38 ? 00:00:00 [kworker/0:0H] ... root 1472 1471 0 13:32 pts/0 00:00:00 -bash root 1505 1472 0 13:34 pts/0 00:00:00 unshare --mount --uts --ipc --net root 1507 1505 0 13:34 pts/0 00:00:00 bash nobody 1527 2 0 13:44 ? 00:00:00 [kworker/u2:0] root 1529 1507 0 13:48 pts/0 00:00:00 ps -ef root@mycontainer:~#
  • 29.
    root@mycontainer:~# kill -9$(pidof unshare) bash: kill: (1505) - No such process
  • 30.
    root@mycontainer:~# mount -tproc none /proc root@mycontainer:~# ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 13:34 pts/0 00:00:00 bash root 38 1 0 13:57 pts/0 00:00:00 ps -ef root@mycontainer:~# ls -l /proc/ total 0 dr-xr-xr-x 9 root root 0 may 7 13:57 1 dr-xr-xr-x 9 root root 0 may 7 13:57 41 dr-xr-xr-x 2 nobody nogroup 0 may 7 13:57 acpi dr-xr-xr-x 5 nobody nogroup 0 may 7 13:57 asound ...
  • 31.
    root@mycontainer:~# docker savealpine -o alpine-image.tar root@mycontainer:~# mkdir alpine-image root@mycontainer:~# tar xf alpine-image.tar -C alpine-image root@mycontainer:~# mkdir -p mycontainer/alpinefs root@mycontainer:~# tar xf alpine-image/2904c2b59f0e40c1537b1a4ccaf72f4983988e8ad9 f3aea0af36f6bb8d177ead/layer.tar -C mycontainer/alpinefs --no-same-owner root@mycontainer:~# ls -l mycontainer/alpinefs/
  • 32.
    root@mycontainer:~# mkdir -pmycontainer/layer mycontainer/work mycontainer/fs root@mycontainer:~# mount -t overlay -o lowerdir=mycontainer/alpinefs,upperdir=mycont ainer/layer,workdir=mycontainer/work overlay mycontainer/fs root@mycontainer:~# ls mycontainer/fs bin etc lib mnt proc run srv tmp var dev home media opt root sbin sys usr
  • 33.
    root@mycontainer:~# cd mycontainer/fs root@mycontainer:~/mycontainer/fs#mkdir -p mnt/.original-root root@mycontainer:~/mycontainer/fs# pivot_root . mnt/.original-root root@mycontainer:~/mycontainer/fs# exec chroot / ash / # ps -ef PID USER TIME COMMAND / # mount mount: no /proc/mounts
  • 34.
    / # mount-t proc none /proc / # mount -t sysfs none /sys / # mount -t tmpfs none /tmp / # ps -ef PID USER TIME COMMAND 1 root 0:02 ash 163 root 0:00 ps -ef / # mount /dev/sda2 on /mnt/.original-root type ext4 (rw,relatime,data=ordered) udev on /mnt/.original-root/dev type devtmpfs (rw,nosuid,relatime,size=473936k,nr_inodes= 118484,mode=755) ... none on /mnt/.original-root/proc type proc (rw,relatime) overlay on / type overlay (rw,relatime,lowerdir=mycontainer/alpinefs,upperdir=mycontainer none on /proc type proc (rw,relatime)
  • 35.
    / # umount-l mnt/.original-root / # rmdir mnt/.original-root / # mount overlay on / type overlay (rw,relatime,lowerdir=mycontainer/alpinefs,upperdir=mycontainer /layer,workdir=mycontainer/work) none on /proc type proc (rw,relatime) none on /sys type sysfs (rw,relatime) none on /tmp type tmpfs (rw,relatime,uid=1003,gid=1003) / #
  • 36.
    / # iplink 1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 / # ping -c 1 localhost PING localhost (127.0.0.1): 56 data bytes ping: sendto: Network unreachable
  • 37.
    / # iplink set dev lo up / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever / # ping -c 1 localhost PING localhost (127.0.0.1): 56 data bytes 64 bytes from 127.0.0.1: seq=0 ttl=64 time=0.285 ms --- localhost ping statistics --- 1 packets transmitted, 1 packets received, 0% packet loss round-trip min/avg/max = 0.285/0.285/0.285 ms
  • 38.
    hpcadmintech2019@hpcadmintech2019demo:~$ pidof unshare 1306 hpcadmintech2019@hpcadmintech2019demo:~$sudo ip link add name host-1306 type veth peer name cont-1306 hpcadmintech2019@hpcadmintech2019demo:~$ sudo ip link set dev cont-1306 netns 1306
  • 39.
    hpcadmintech2019@hpcadmintech2019demo:~$ ip addrshow dev docker0 4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default link/ether 02:42:85:f0:62:de brd ff:ff:ff:ff:ff:ff inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 valid_lft forever preferred_lft forever inet6 fe80::42:85ff:fef0:62de/64 scope link valid_lft forever preferred_lft forever hpcadmintech2019@hpcadmintech2019demo:~$ sudo ip link set dev host-1306 master docker0 up
  • 40.
    / # iplink 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 5: cont-1306@if6: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether b6:3c:fe:4c:c7:85 brd ff:ff:ff:ff:ff:ff / # ip link set dev cont-1306 name eth0 up / # echo "1" > /proc/sys/net/ipv6/conf/all/disable_ipv6 / # echo "1" > /proc/sys/net/ipv6/conf/default/disable_ipv6 / # ip addr add 172.17.10.2/16 dev eth0
  • 41.
    / # ipa show eth0 5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP qlen 1000 link/ether b6:3c:fe:4c:c7:85 brd ff:ff:ff:ff:ff:ff inet 172.17.10.2/16 scope global eth0 valid_lft forever preferred_lft forever / # ip route add default via 172.17.0.1 dev eth0 onlink / # echo "nameserver 8.8.8.8" >> /etc/resolv.conf / # ping -c 1 www.google.es PING www.google.es (172.217.168.163): 56 data bytes 64 bytes from 172.217.168.163: seq=0 ttl=61 time=9.623 ms --- www.google.es ping statistics --- 1 packets transmitted, 1 packets received, 0% packet loss round-trip min/avg/max = 9.623/9.623/9.623 ms
  • 42.
    / # apkupdate ... OK: 9766 distinct packages available / # apk add bash (1/5) Installing ncurses-terminfo-base (6.1_p20190105-r0) ... (5/5) Installing bash (4.4.19-r1) Executing bash-4.4.19-r1.post-install Executing busybox-1.29.3-r10.trigger OK: 14 MiB in 19 packages / # exec bash bash-4.4#
  • 43.
    bash-4.4# apk addpython ... (6/6) Installing python2 (2.7.16-r1) Executing busybox-1.29.3-r10.trigger OK: 54 MiB in 25 packages bash-4.4# apk add vim (1/2) Installing lua5.3-libs (5.3.5-r2) (2/2) Installing vim (8.1.0630-r0) Executing busybox-1.29.3-r10.trigger OK: 81 MiB in 27 packages
  • 44.
    hpcadmintech2019@hpcadmintech2019demo:~$ sudo su- root@hpcadmintech2019demo:~# mkdir /sys/fs/cgroup/memory/mycontainer root@hpcadmintech2019demo:~# ls /sys/fs/cgroup/memory/mycontainer/ cgroup.clone_children memory.limit_in_bytes cgroup.event_control memory.max_usage_in_bytes ... memory.kmem.tcp.max_usage_in_bytes notify_on_release memory.kmem.tcp.usage_in_bytes tasks memory.kmem.usage_in_bytes root@hpcadmintech2019demo:~# echo “1306” > /sys/fs/cgroup/memory/mycontainer/tasks
  • 45.
    root@hpcadmintech2019demo:~# cat /sys/fs/cgroup/memory/mycontainer/memory.usage_in_bytes 835584 root@hpcadmintech2019demo:~#echo "$((1024*1024*50))" > /sys/fs/cgroup/memory/mycontainer/me mory.limit_in_bytes root@hpcadmintech2019demo:~# echo "0" > /sys/fs/cgroup/memory/mycontainer/memory.swappiness root@hpcadmintech2019demo:~# watch -n 1 cat /sys/fs/cgroup/memory/mycontainer/memory.usage_ in_bytes Every 1,0s: cat /sys/fs/cgroup/... hpcadmintech2019demo: Fri May 10 11:52:07 2019 835584
  • 46.
    bash-4.4# cd /tmp/ bash-4.4#cat > memusage.py <<EOT data = "" while True: for f in range(0, 1024*1024): data = data + str(f % 10) print len(data) / 1024 / 1024, "mb." EOT bash-4.4# python memusage.py 1 mb. 2 mb. 3 mb. 4 mb. ... Every 1,0s: cat /sys/fs/cgroup/me... hpcadmintech2019demo: Fri May 10 11:56:37 2019 46899200
  • 47.
    bash-4.4# ip a 1:lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP qlen 1000 link/ether b6:3c:fe:4c:c7:85 brd ff:ff:ff:ff:ff:ff inet 172.17.10.2/32 scope global eth0 valid_lft forever preferred_lft forever hpcadmintech2019@hpcadmintech2019demo:~$ docker run --rm -it alpine ash / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 11: eth0@if12: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0 valid_lft forever preferred_lft forever
  • 49.
    $ sudo serviceslurmctld restart $ sudo service slurmd restart $ sudo service slurmdbd restart $ sinfo PARTITION AVAIL TIMELIMIT NODES STATE NODELIST debug* up infinite 1 idle localhost $ tail /usr/local/etc/slurm.conf # COMPUTE NODES NodeName=localhost CPUs=2 State=UNKNOWN PartitionName=debug Nodes=localhost Default=YES MaxTime=INFINITE State=UP
  • 50.
    $ srun -n2 hostname hpcadmintech2019demo Hpcadmintech2019demo $ srun -n 2 docker run --rm -i alpine hostname 9b49fdaf17c6 4aaf8bb64623 $ srun -n 2 singularity exec alpine_latest.sif hostname INFO: Convert SIF file to sandbox... INFO: Convert SIF file to sandbox... hpcadmintech2019demo INFO: Cleaning up image... hpcadmintech2019demo INFO: Cleaning up image...
  • 51.
    $ srun -n2 ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 08:49 ? 00:00:12 /sbin/init maybe-ubiquity ... $ srun -n 2 docker run --rm alpine ps –ef PID USER TIME COMMAND 1 root 0:00 ps -ef PID USER TIME COMMAND 1 root 0:00 ps –ef $ srun -n 2 singularity exec alpine_latest.sif ps -ef PID USER TIME COMMAND 1 nobody 0:12 {systemd} /sbin/init maybe-ubiquity ... 1278 hpcadmin 0:00 /lib/systemd/systemd --user
  • 52.
    $ sbatch -n2 ej1.sh Submitted batch job 34 $ squeue JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 34 debug ej1.sh hpcadmin R 0:03 1 localhost $ cat slurm-34.out mié may 22 10:10:13 UTC 2019 hpcadmintech2019demo mié may 22 10:10:23 UTC 2019 #!/bin/sh date hostname sleep 10 date Fichero: ej1.sh
  • 53.
    $ sbatch ej1.docker.sh Submittedbatch job 36 $ squeue JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 36 debug ej1.dock hpcadmin R 0:02 1 localhost $ cat slurm-36.out Wed May 22 10:15:24 UTC 2019 1cbad0cacba3 Wed May 22 10:15:34 UTC 2019 #!/bin/sh docker run --rm -i alpine ash -s <<EOT date hostname sleep 10 date EOT Fichero: ej1.docker.sh
  • 54.
    $ sbatch ej2.sh Submittedbatch job 38 $ cat slurm-38.out Ubuntu 18.04.2 LTS n l UID PID PPID C STIME TTY TIME CMD root 1 0 0 08:49 ? 00:00:12 /sbin/init maybe-ubiquity root 2 0 0 08:49 ? 00:00:00 [kthreadd] ... #!/bin/sh cat /etc/issue ps -ef Fichero: ej2.sh
  • 55.
    $ sbatch ej2.docker.sh Submittedbatch job 39 $ cat slurm-39.out Welcome to Alpine Linux 3.9 Kernel r on an m (l) PID USER TIME COMMAND 1 root 0:00 ash -s 7 root 0:00 ps -ef #!/bin/sh docker run --rm -i alpine ash -s <<EOT cat /etc/issue ps -ef EOT Fichero: ej2.docker.sh
  • 56.
    $ sbatch -n2 ej3.sh Submitted batch job 40 $ cat slurm-40.out mié may 22 10:23:30 UTC 2019 hpcadmintech2019demo hpcadmintech2019demo mié may 22 10:23:30 UTC 2019 #!/bin/sh date srun hostname date Fichero: ej3.sh
  • 57.
    $ sbatch -n2 ej3.docker.sh Submitted batch job 41 hpcadmintech2019@hpcadmintech2019demo:~$ cat slurm-41.out mié may 22 10:26:33 UTC 2019 556d7a5e3822 577354d5dd0d mié may 22 10:26:34 UTC 2019 #!/bin/sh date srun docker run --rm -i alpine hostname date Fichero: ej3.docker.sh
  • 58.
    $ sbatch ej4.sh Submittedbatch job 43 $ ps -ef | grep sleep hpcadmi+ 8040 8039 1 10:30 ? 00:00:00 srun sleep 360 hpcadmi+ 8043 8040 0 10:30 ? 00:00:00 srun sleep 360 hpcadmi+ 8055 8050 0 10:30 ? 00:00:00 /bin/sleep 360 hpcadmi+ 8057 6057 0 10:30 pts/0 00:00:00 grep --color=auto sleep #!/bin/sh srun sleep 360 Fichero: ej4.sh
  • 59.
    $ ps -ef| grep sleep hpcadmi+ 8988 8987 0 10:48 ? 00:00:00 srun docker run --rm -i alpine sleep 360 hpcadmi+ 8991 8988 0 10:48 ? 00:00:00 srun docker run --rm -i alpine sleep 360 hpcadmi+ 9003 8998 1 10:48 ? 00:00:00 /usr/bin/docker run --rm -i alpine sleep 360 root 9044 9019 1 10:48 ? 00:00:00 sleep 360 hpcadmi+ 9091 6057 0 10:48 pts/0 00:00:00 grep --color=auto sleep $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 67237262d464 alpine "sleep 360" 6 seconds ago Up 5 seconds agitated_hamilton #!/bin/sh srun docker run --rm -i alpine sleep 360 Fichero: ej3.docker.sh
  • 60.
    $ srun id uid=1003(hpcadmintech2019)gid=1003(hpcadmintech2019) groups=1003(hpcadmintech2019),4(adm),24(cdrom),27(sudo),... $ srun docker run --rm -i alpine id uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),4(adm),... $ srun singularity exec alpine_latest.sif id INFO: Convert SIF file to sandbox... uid=1003(hpcadmintech2019) gid=1003(hpcadmintech2019) groups=65534(nobody),65534(nobody),65534(nobody),...,1003(hpcadmintech2019) INFO: Cleaning up image...
  • 61.
    $ srun dockerrun --rm -i -u 1003:1003 alpine id uid=1003 gid=1003 $ srun docker run --rm -i -u 1003:1003 alpine ash -c 'ls -ld $HOME' drwxr-xr-x 1 root root 4096 May 22 10:59 / $ srun docker run --rm -i -u 1003:1003 -v /etc/passwd:/etc/passwd:ro alpine ash -c 'ls - ld $HOME' ls: /home/hpcadmintech2019: No such file or directory srun: error: localhost: task 0: Exited with exit code 1 $ srun docker run --rm -i -u 1003:1003 -v /etc/passwd:/etc/passwd:ro -v $HOME:$HOME alpine ash -c 'ls -ld $HOME' drwxr-xr-x 10 hpcadmin 1003 4096 May 22 10:48 /home/hpcadmintech2019
  • 62.
    #!/bin/bash SDOCKER_IMAGE=${SDOCKER_IMAGE:-alpine} srun docker run--rm -i -u $(id -u):$(id -g) -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro -v $HOME:$HOME --workdir $HOME $SDOCKER_IMAGE "$@"
  • 63.
    $ SDOCKER_IMAGE=alpine ./sdockerpython $ cat > Dockerfile <<EOT FROM alpine RUN apk update && apk add python coreutils EOT $ docker build . –t alpine:python
  • 64.
    data = "" whileTrue: for f in range(0, 1024*1024): data = data + str(f % 10) print len(data) / 1024 / 1024, "mb." $ SDOCKER_IMAGE=alpine:python ./sdocker stdbuf -i0 -o0 -e0 python memusage.py
  • 65.
    $ SDOCKER_IMAGE=alpine ./sdockeroctave $ cat > Dockerfile <<EOT FROM ubuntu RUN apt update && apt –y install --no-install-recommends octave EOT $ docker build . –t ubuntu:octave
  • 66.
    m=100 n=100 args=argv(); if (nargin>0) m=str2num(args{1});endif if (nargin>1) n=str2num(args{2}); endif A = rand(m,n) tic(); B1=A*A; toc(); tic(); B=A'*A; toc(); tic(); C=B*inv(B); toc(); $ SDOCKER_IMAGE=ubuntu:octave ./sdocker stdbuf -i0 -o0 -e0 octave app.m
  • 67.
    #!/bin/bash SDOCKER_IMAGE=ubuntu:octave ./sdocker octaveapp.m 10000 1000 $ sbatch octave.sh $ ps -ef | grep octave hpcadmi+ 28601 28600 0 22:06 ? 00:00:00 /bin/bash ./sdocker octave app.m 1000 1000 ... hpcadmi+ 28663 28638 65 22:06 ? 00:00:02 /usr/bin/octave-cli app.m 1000 1000
  • 70.
    $ docker run--rm -v /etc:/etc -it alpine ash / $ adduser mynewroot -G root ... / $ exit $ docker run --privileged alpine ash -c 'echo 1 > /proc/sys/kernel/sysrq; echo o > /proc/sysrq-trigger' $ cat /bin/dosh #!/bin/bash sudo /bin/shell2docker "$@"
  • 71.
    # useradd user1 ... #su – user1 $ docker ps Got permission denied while trying to connect to the Docker daemon socket ... /var/run/docker.sock: connect: permission denied # usermod user1 –s /bin/dosh
  • 72.
    # su -user1 user1@hpcadmintech2019demo:~$ # su – user1 # docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 5bc7e3afe23f ubuntu:latest "bash" 23 minutes ago Up 23 minutes dosh-user1
  • 77.
    FROM ubuntu RUN apt-getupdate && apt-get install -y cowsay ENTRYPOINT ["/usr/games/cowsay"] $ docker build . -t hpcadmintech:cow ... $ docker run --rm -it hpcadmintech:cow i am a cow in a fat container _______________________________ < i am a cow in a fat container > ------------------------------- ^__^ (oo)_______ (__) )/ ||----w |
  • 78.
    $ minidock -ihpcadmintech:cow -t hpcadmintech:minicow --apt -- i am a cow $ docker images hpcadmintech REPOSITORY TAG IMAGE ID CREATED SIZE hpcadmintech cow ca38bcceb4d6 3 minutes ago 168MB hpcadmintech minicow a2dfb4c63fbb 2 minutes ago 6.89MB
  • 79.
    FROM ubuntu:latest RUN apt-getupdate && apt-get install -y ssh iproute2 iputils-ping wget $ docker build . -t hpcadmintech:ui ... $ docker run --rm -it hpcadmintech:ui root@d2a99656917d:/# ls -l /bin/
  • 80.
    $ minidock -ihpcadmintech:ui -t hpcadmintech:miniui --apt -E bash -E 'ssh localhost' -E ip -E id -E cat -E ls -E mkdir -E 'ping -c 1 www.google.es' -- wget www.google.es $ docker images hpcadmintech REPOSITORY TAG IMAGE ID CREATED SIZE hpcadmintech miniui c8fcb68f7b74 6 seconds ago 16.3MB hpcadmintech ui 9937bb48bf32 3 minutes ago 244MB $ docker run --rm -it hpcadmintech:miniui bash-4.4# ls -l /bin/
  • 84.
    @minicon analiza unsistema de ficheros y elimina aquellos ficheros que no son imprescindibles para ejecutar las aplicaciones que necesitamos.
  • 85.
    @dosh es unshell que permite proporcionar sistemas personalizados para los usuarios, y dándoles sólo los recursos que van a poder utilizar.
  • 86.
    La clave paraque los usuarios puedan usar @docker es no dejarles usar @docker (de forma arbitraria)
  • 87.
    Es posible lanzarcargas en @slurm de forma que se ejecuten dentro de contenedores @docker (utilizando las credenciales del usuario), siempre que se hagan los ajustes necesarios.
  • 89.
    Los sistemas como@docker, @lxc o @singularity utilizan llamadas al kernel de Linux para crear los contenedores, y cada uno ofrece características particulares.