This document contains the steps that were covered in the Docker 101 labs. Useful if you missed a class or forgot a step.
- An AWS account
- An SSH client, OpenSSH preferred
- Basic Linux knowledge preferred
ctrl-b C
-- create new windowctrl-b n
-- jump to next windowctrl-b p
-- jump to previous windowctrl-b 0
-- jump to 0th windowctrl-b ""
-- split window horizontally (pane)ctrl-b O
-- jump to next panectrl-b x
-- close pane
- Spin up a small EC2 instance using the Ubuntu Linux AMI
uname -a
to verify we are running a Linux 3.1.0 or higher kernelsudo apt-get install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install docker-ce
sudo docker info
- destroy the instance
- Spin up a small EC2 instance using the Ubuntu Linux AMI
uname -a
to verify we are running a Linux 3.1.0 or higher kernelsudo apt-get update
whereis curl
sudo apt-get install curl
curl https://get.docker.com/ | sudo sh
sudo docker info
docker info
sudo usermod -aG docker ubuntu
docker info
- log out and back in again
docker info
- save the instance (we'll be using it in future labs)
- Spin up a Ubuntu EC2 instance with Docker installed
sudo status docker
status docker
cat /etc/group
docker info
sudo service docker stop
sudo service docker start
docker help
man docker
docker help run
docker run --help
docker run --interactive --tty ubuntu /bin/bash
- start a second ssh session to your EC2 instance (
tmux
can simplify this) - compare results of commands from inside Docker and on the EC2 instance
whoami
hostname
cat /etc/hosts
hostname --all-ip-addresses
ps -aux
uname -a
top
ls /bin
sudo find / -type d | wc --lines
-- nosudo
needed on the Docker sidecat /proc/cpuinfo
cat /proc/meminfo
cat /proc/net/dev
- in your Docker container,
apt-get update; apt-get install vim
- in your Docker container,
exit
. docker ps
-- show running containers
docker ps --all
-- show all containersdocker ps --latest
-- show the last running containerdocker run --name wolverine --interactive --tty ubuntu /bin/bash
exit
the containerdocker ps --latest
-- notice the container namedocker start wolverine
-- start stopped containerdocker ps
-- should see the wolverine container runningdocker attach <container id>
-- see how few characters you can get away withexit
to stop the container
docker run --detach --name nightcrawler ubuntu /bin/sh -c "while true; do echo hello world; sleep 2; done"
docker logs --follow --timestamps nightcrawler
docker top nightcrawler
docker stats nightcrawler
docker exec --detach nightcrawler touch /etc/new_config_file
docker exec --interactive --tty nightcrawler /bin/bash
ls -alh /etc/new_config_file
-- should see the file added previouslyexit
docker stop nightcrawler
docker ps -a
-- container still exists but is not runningdocker run --restart=always --detach --name banshee ubuntu /bin/sh -c "sleep 2; exit 1964"
watch docker ps
-- notice how the container keeps restarting after the "failure"docker stop banshee
docker run --restart=on-failure:5 --detach --name colossus ubuntu /bin/sh -c "sleep 5; exit 1964"
watch docker ps
-- notice how Docker gives up restarting after 5 triesdocker inspect colossus
docker inspect --format='{{ .State.Running }}' colossus
docker stop colossus
docker rm colossus
- clean up the remaining containers on your own. Try using id and names.
docker rm --volumes --force $(docker ps --all --quiet)
-- shell magic to nuke all containers
- visit
https://hub.docker.com/
- create an account (we'll use it in later labs)
- click the
Explore
link - browse through the images marked as official
- poke around the following images looking for differences between them
docker run --interactive --tty alpine /bin/bash
docker run --interactive --tty centos /bin/bash
docker run --interactive --tty amazonlinux /bin/bash
docker run --interactive --tty bash /bin/bash
docker run --interactive --tty clearlinux /bin/bash
docker images
docker run --interactive --tty ubuntu:16.04
docker run --interactive --tty ubuntu:14.04
docker run --interactive --tty ubuntu:latest
docker pull ubuntu:12.04
docker images
docker search python
docker search kurron
docker run --interactive --tty kurron/docker-azul-jdk-8-build /bin/bash
java -version
ansible --version
docker --version
exit
docker images
docker rmi --force $(docker images --quiet)
docker images
docker run --interactive --tty ubuntu:latest
apt-get update
apt-get install apache2
exit
LAST=$(docker ps --latest --quiet)
echo ${LAST}
docker commit ${LAST} kurron/apache2
<--- use your own repository accountdocker images kurron/apache2
docker commit --message "Created by hand" --author "Ron Kurr kurron@jvmguy.com" ${LAST} kurron/apache2:by-hand
docker inspect kurron/apache2:by-hand
docker run --interactive --tty kurron/apache2:by-hand
service apache2 status
git clone https://github.com/kurron/docker-study-group-labs.git
cd docker-study-group-labs/solutions/lab-07
docker build --tag="kurron/static_web:v1.0.0" .
docker images
docker build --file Dockerfile.broken .
docker build --no-cache --tag="kurron/static_web:v1.0.0" .
docker history 85098924c514
<--- your image id will be different
docker run --detach --publish 80 --name domino kurron/static_web:v1.0.0 nginx -g "daemon off;"
docker ps --latest
docker port cb888707fcba
docker port domino 80
docker run --detach --publish 80:80 --name domino kurron/static_web:v1.0.0 nginx -g "daemon off;"
docker run --detach --publish 8080:80 --name domino kurron/static_web:v1.0.0 nginx -g "daemon off;"
docker run --detach --publish 127.0.0.1:8080:80 --name domino kurron/static_web:v1.0.0 nginx -g "daemon off;"
docker run --detach --publish 127.0.0.1::80 --name domino kurron/static_web:v1.0.0 nginx -g "daemon off;"
- stop and remove all containers
docker run --detach --publish-all --name domino kurron/static_web:v1.0.0 nginx -g "daemon off;"
docker port domino
curl localhost:32769
<--- your port will be different
cd solutions/lab-10
docker build --tag="kurron/dockerfile-example:v1.0.0" .
docker run --interactive --tty kurron/dockerfile-example:v1.0.0
docker run --interactive --tty kurron/dockerfile-example:v1.0.0 -alh /opt
docker run --interactive --tty kurron/dockerfile-example:v1.0.0 -alh /opt/wordpress
docker images
docker inspect kurron/dockerfile-example:v1.0.0
cd labs/lab-11
- edit the
Dockerfile
andlocal.txt
files, using your personal settings docker build --tag="kurron/publish-example:v1.0.0" .
<--- use your own accountdocker run --interactive --tty kurron/publish-example:v1.0.0
docker push kurron/publish-example:v1.0.0
docker login
docker push kurron/publish-example:v1.0.0
- visit https://hub.docker.com/ and find your image
- run somebody else's image, illustrating how image sharing works
This is difficult to explain in text so try and be in class for this one.
- Create a GitHub account if you don't already have one
- Create a new repository using the
labs/lab-11
folder as source - Log into your Docker Hub account
- click
Create -> Create Automated Build
Add Repository
- Add your GitHub account
- Select your repository
Create
to create the build project- Verify that the build was successful
- Checkout your GitHub project
- Make an edit to your
local.txt
git commit -am 'Ron made me do this'
git push
- In the Docker Hub console, make sure your Docker build gets triggered
- Pull down the latest image and run it, ensuring your changes show up
cd docker-study-group-labs
git reset --hard
<--- will nuke any local changes you may have madegit pull
to get the current bitscd solutions/lab-12
docker build --tag="kurron/mount-example:v1.0.0" .
<--- use your own accountdocker history kurron/mount-example:v1.0.0
docker run --detach --publish 80 --name mystique --volume ${PWD}/website:/var/www/html/website:ro kurron/mount-example:v1.0.0 nginx
docker port mystique
curl --silent localhost:32771
<--- your port will be different- edit
website/index.html
curl --silent localhost:32771
- determine the public address of your EC2 instance
- open your web browser to
http://ec2-instance-address:32771/
- Tip: Volumes can also be shared between containers and can persist even when containers are stopped
- Tip: If the container directory doesn't exist Docker will create it.
- stop any running containers
docker rm --volumes --force $(docker ps --all --quiet)
docker rmi --force $(docker images --quiet)
docker run --name thor --detach --publish-all redis:latest
docker port thor
sudo apt-get install redis-tools
redis-cli -h 127.0.0.1 -p 32769 ping
<--- use your own portdocker run --name sif --interactive --tty --rm redis:latest redis-cli ping
<-- will fail with a connection error
- Every Docker container is assigned an IP address, provided through an interface created when we installed Docker. That interface is called
docker0
. ip a show docker0
(you may have to install theiproute2
package)- The
docker0
interface is a virtual Ethernet bridge that connects our containers and the local host network. ip a show
-- for every container there is aveth
interfacedocker run --interactive --tty --rm ubuntu:latest bash
apt-get update && apt-get install iproute2 traceroute
ip a show eth0
-- we can see the EC2-side ip address of the containertraceroute google.com
-- notice how we go through thedocker0
ip address?exit
sudo iptables --table nat --list --numeric
-- this is just to underscore that NAT is happeningdocker inspect thor
ordocker inspect --format '{{ .NetworkSettings.IPAddress }}' thor
to get the ip addressredis-cli -h 172.17.0.2 ping
<--- use your own ip, notice we no longer have to specify a portDNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:32769 to:172.17.0.2:6379
show the NATing going ondocker inspect --format '{{ .NetworkSettings.IPAddress }}' thor
-- remember this valuedocker restart thor
docker inspect --format '{{ .NetworkSettings.IPAddress }}' thor
-- address can change on you- TIP: hard coding addresses and the fact that addresses can change make internal networking difficult to use in production
- stop any running containers
docker rm --volumes --force $(docker ps --all --quiet)
docker rmi --force $(docker images --quiet)
docker network create asgard
docker network inspect asgard
- TIP: in addition to bridge networks, which exist on a single host, we can also create overlay networks, which allow us to span multiple hosts.
docker network ls
- TIP:
docker network rm
will remove a network docker run --name thor --net asgard --detach --publish-all redis:latest
docker network inspect asgard
-- notice howthor
is now a member of the network?docker run --name sif --net asgard --interactive --tty --rm redis:latest redis-cli -h thor ping
<-- this now works!docker run --name heimdall --net asgard --interactive --tty --rm ubuntu:latest /bin/bash
apt-get update && apt-get install dnsutils iputils-ping
nslookup thor
ping thor.asgard
-- the network name becomes the domain namectrl-c
thenexit
docker run --name hogun --detach --publish-all redis:latest
docker network inspect asgard
-- notice howhogun
is not a member of the network?docker network connect asgard hogun
docker network inspect asgard
-- notice howhogun
is now a member of the network?- recreate
heimdall
and use him to seehogun
docker network disconnect asgard hogun
to removehogun
from the network
cd solutions/lab-17
./clean-slate-protocol.sh
cat Dockerfile
docker build --tag="kurron/docker-in-docker:latest" .
docker run --rm kurron/docker-in-docker:latest
docker run --interactive --tty --rm --workdir /work-area --volume ${PWD}:/work-area:ro kurron/docker-in-docker:latest bash
docker build --tag="kurron/docker-in-docker:latest" .
<--- why does this fail?exit
docker run --interactive --tty --rm --workdir /work-area --volume /var/run/docker.sock:/var/run/docker.sock --volume ${PWD}/Dockerfile-CentOS:/work-area/Dockerfile:ro kurron/docker-in-docker:latest bash
cat Dockerfile
docker build --tag="kurron/docker-in-docker:CentOS" .
docker images
<-- notice the newly build CentOS imagesdocker run --rm kurron/docker-in-docker:CentOS
docker run --interactive --tty --rm --cidfile=/tmp/containerid.txt kurron/docker-in-docker:CentOS bash
<-- you just started a Docker container from within a Docker container!exit
<-- leave the CentOS containercat /tmp/containerid.txt
<-- holds the id of the container we just exitedexit
<-- leave the docker-in-docker container- Tip: use
docker wait <container id>
to wait for a long running container to exit, obtaining its exit code - Tip: Drone is a Docker-in-Docker build engine
- Tip: Shippable is a CI/CD SaaS that supports Docker
cd solutions/lab-18
./clean-slate-protocol.sh
docker run --interactive --tty --rm --workdir /work-area --volume ${PWD}/config.ini:/work-area/config.ini:ro ubuntu:latest bash
cat config.ini
exit
docker run --interactive --tty --rm --env username=logan --env password=Weapon-X ubuntu:latest bash
env | sort
exit
docker run --interactive --tty --rm --env-file config.ini ubuntu:latest bash
env | sort
exit
cd solutions/lab-19
./clean-slate-protocol.sh
cat nginx/Dockerfile
docker build --tag="study-group/nginx:latest" nginx
cat new-england/Dockerfile
docker build --tag="study-group/new-england:latest" new-england
cat miami/Dockerfile
docker build --tag="study-group/miami:latest" miami
docker images
docker run --name new-england study-group/new-england:latest
docker run --name miami study-group/miami:latest
docker run --name superbowl --detach --publish-all --volumes-from new-england:ro study-group/nginx:latest nginx
docker ps
<-- notice how only thesuperbowl
container is runningIP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' superbowl)
curl --silent ${IP}
docker stop superbowl
docker rm superbowl
docker run --name superbowl --detach --publish-all --volumes-from miami:ro study-group/nginx:latest nginx
IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' superbowl)
curl --silent ${IP}
docker inspect -f "{{ range .Mounts }}{{.}}{{end}}" superbowl
docker inspect -f "{{ range .Mounts }}{{.}}{{end}}" miami
docker inspect -f "{{ range .Mounts }}{{.}}{{end}}" new-england
- use
ls
andcat
to poke around those folders (need to usesudo
) docker run --rm --volumes-from new-england:ro --volume $(pwd):/backup:rw ubuntu tar cvf /backup/backup.tar /var/www/html
tar --list --verbose --file backup.tar
- Volume Bullet Points
- Volumes can be shared and reused between containers
- A container doesn't have to be running to share its volumes
- Changes to a volume are made directly
- Changes to a volume will not be included when you update an image
- Volumes persist even when no containers use them
cd solutions/lab-20
./clean-slate-protocol.sh
cat install-docker-compose.sh
./install-docker-compose.sh
less docker-compose.yml
docker-compose config
docker-compose pull
docker-compose images
docker-compose up -d
docker-compose ps
docker-compose top
docker-compose logs
docker volume ls
docker volume inspect lab20_mongodb-data
docker-compose up -d
docker-compose ps
docker-compose port showcase 8080
curl --silent localhost:32811/operations/health | python -m json.tool
<--- use the correct portcurl --silent localhost:32811/operations/info | python -m json.tool
<--- use the correct portdocker-compose down --rmi all --volumes --remove-orphans
Docker Machine is a tool that lets you install Docker Engine on virtual hosts, and manage the hosts with docker-machine commands. You can use Machine to create Docker hosts on your local Mac or Windows box, on your company network, in your data center, or on cloud providers like Azure, AWS, or Digital Ocean.
git reset --hard
cd solutions/lab-21
./clean-slate-protocol.sh
./install-docker-machine.sh
docker-machine --help
docker-machine create --help
cat fix-ssh-key-permissions.sh
./fix-ssh-key-permissions.sh
cat create-docker-machine.sh
cat create-alpha-machine.sh
./create-alpha-machine.sh us-east-1 a
./create-bravo-machine.sh us-east-1 a
./create-charlie-machine.sh us-east-1 a
./create-delta-machine.sh us-east-1 a
./create-echo-machine.sh us-east-1 a
cat fix-docker-permissions.sh
./fix-docker-permissions.sh <alpha ip>
./fix-docker-permissions.sh <bravo ip>
./fix-docker-permissions.sh <charlie ip>
./fix-docker-permissions.sh <delta ip>
./fix-docker-permissions.sh <echo ip>
docker-machine ls
docker-machine inspect alpha
docker-machine ip bravo
docker-machine ssh charlie hostname
docker-machine scp fix-ssh-key-permissions.sh delta:/tmp
docker-machine url echo
docker-machine ls
docker-machine stop charlie
docker-machine ls
- examine the EC2 console to see the status of the charlie instance
docker-machine start charlie
docker-machine ls
<-- what is it complaining about and why?docker-machine regenerate-certs charlie
docker-machine ls
docker-machine status delta
docker-machine restart echo
docker-machine ls
docker-machine version alpha
docker-machine upgrade alpha
docker-machine rm alpha
- See if alpha is in the EC2 console
docker-machine stop bravo charlie delta echo
A swarm is a cluster of Docker Engines where you deploy services. The Docker Engine CLI includes the commands for swarm management, such as adding and removing nodes. The CLI also includes the commands you need to deploy services to the swarm and manage service orchestration. A node is an instance of the Docker Engine participating in the swarm.
git reset --hard
cd solutions/lab-22
./clean-slate-protocol.sh
docker-machine start bravo charlie delta echo
docker-machine ls
docker-machine regenerate-certs bravo charlie delta echo
docker-machine ls
- In the EC2 console, adjust the security group to allow ingress on ports 22, 2376, 2377, 3376, 7946 (tcp and udp), 4789(udp)
cat create-swarm.sh
./create-swarm.sh
As of Docker 1.9.0, the ability to create a network specific to a set of containers was added. There are couple forms of Docker networking but we'll be focusing on overlay networking, aka multi-host networking. Based on Virtual Extensible LAN (VXLAN) technology, an overlay network gives each container participating in the network its own ip address. The address is only routable to containers participating in the network. Since each container gets its own address, you won't get port collisions and you don't have to play the "port mapping game" to find an open port to bind your service to. Containers can participate in multiple networks so you have the ability to segregate parts of your architecture and still route traffic to where it needs to go. Finally, networks created on the Swarm manager node are automatically made available to the Swarm workers. There is a legacy networking mode that required a dedicated consensus server so if you see instructions requiring Consul or etcd to be installed, it is probably dealing with the legacy stuff.
git reset --hard
cd solutions/lab-23
./clean-slate-protocol.sh
cat create-network.sh
./create-network.sh
Swarm supports two types of services. One type, the Global Service, is a container that is targeted to all nodes in the swarm. So, if you have 10 nodes in your swarm, then all 10 will contain the global services you have deployed. The second type, the Replicated Service, is a container that is targeted to a specific deployment count. For example, if I have a stateless web application and I specify a replication of 3, then 3 out of my 10 containers will be running an instance of the web application
git reset --hard
cd solutions/lab-24
./clean-slate-protocol.sh
cat create-global-service.sh
./create-global-service.sh
Notice all the nodes, including the manager node, now have hello-global service running on them? If I were to add another node to the swarm, it would also get told to run the service. When would you want to use a global service? I use them for "bookkeeping" type of containers such as DataDog or Consul.
Last time, we talked about global services. Today we'll look at replicated services. As the name suggests, the desire is to have multiple copies of a container running in the cluster. Containers housing stateless applications, such as a static web site, are candidates for replication. Containers that rely on local state will not work properly as a replicated service due to migration and load balancing issues. So what is a replicated service? If you need multiple containers to be running, probably for availability reasons, you can easily tell Docker that you would like N number of containers running at all times.
git reset --hard
cd solutions/lab-25
./clean-slate-protocol.sh
cat create-replicated-service.sh
./create-replicated-service.sh
In the above example, we told Docker to deploy 2 copies of the alpine container into the cluster. We don't care what nodes are running the containers, just as long as there are two of them. If possible, Docker will schedule the containers on separate hosts. If a container fails, then it will be replaced. Very straightforward. Next time, we'll showcase constrained services which give us a bit more control over the placement of our containers.
Last time, we looked at replicated services. Today we'll look at a nuanced version of replicated services: constrained services. The primary difference between a replicated service and a constrained one is that we can put restrictions on where the containers can be run. The simplest constraint is to put containers on nodes that have been tagged with a particular label. You can also use other placement criteria using simple boolean expressions but the currently available selection attributes are more limited than what you'll find in other schedulers, such a Kubernetes or Nomad.
git reset --hard
cd solutions/lab-26
./clean-slate-protocol.sh
cat create-constrained-service.sh
./create-constrained-service.sh
In this example, we are asking to have our 3 alpine containers to only run on worker nodes and Docker will do its best to comply. If there are no nodes tagged as being workers, Docker will wait until one becomes available and start the containers. Next time, we'll learn how to scale down our running services.
Last time, we looked at constrained deployments. Today we'll see how to scale our services down. In truth, there really isn't much to do because Docker takes care of everything for us. All we need to do is to tell the swarm how many instances we need currently.
git reset --hard
cd solutions/lab-27
./clean-slate-protocol.sh
cat scale-down-service.sh
./scale-down-service.sh
As you can see, we are telling Docker to scale back our hello-constrained service down to a single instance. The interesting part of this example is that we can see that the swarm is turning off instances on nodes, leaving us with the single instance. Again, this is an example of declarative operations. We're telling Docker what we want, not how to do it.
Last time, we looked at how to scale down our services. Today, we'll look at how to remove them all together.
git reset --hard
cd solutions/lab-28
./clean-slate-protocol.sh
cat remove-service.sh
./remove-service.sh
As you can see, removing a service is very straight forward and is simple as removing a file in Linux.
Last time, we saw how simple it was to remove a service from the swarm. Today, we'll look at something a little more interesting: rolling upgrades. The scenario is this, you have an existing collection of services deployed and you need to upgrade them to current bits. You would love for the service to remain available during the upgrade process and avoid making your customers unhappy. How can this be done? The answer is Docker's rolling upgrades. The idea is simple, once the process is started, one by one a service in the swarm gets replaced with a newer version. During the process, you will have a mixture of the new and old bits so your solution cannot be sensitive to that fact. Lets see how this looks in practice.
git reset --hard
cd solutions/lab-29
./clean-slate-protocol.sh
cat upgrade-service.sh
./upgrade-service.sh
In this simple example, we install version 3.0.6 of Redis into the swarm and later decide to upgrade Redis 3.0.7. This example is contrived and doesn't incorporate things you might do in a real setting, such as monitoring of the state of the containers as they transition or what to do if there is a problem during the replacement process.
Last time, we looked at rolling upgrades. Today, we'll learn how to temporarily take a node off-line for maintenance. At some point, you are probably going to have turn your node off and perform some maintenance on the box it is running on. It could be a simple as upgrading the version of Docker or as complex as swapping out a drive. During that time, you want to tell the Swarm that the node is temporarily going away and that some other node needs to take its place in the interim. Thankfully, the process is pretty simple.
git reset --hard
cd solutions/lab-30
./clean-slate-protocol.sh
cat maintenance-mode.sh
./maintenance-mode.sh
Things to note in the above session. First, the work shifts from delta to echo. Second, once we bring delta back on-line the work remains with echo: no rebalancing of the work occurs.
Docker Engine swarm mode makes it easy to publish ports for services to make them available to resources outside the swarm. All nodes participate in an ingress routing mesh. The routing mesh enables each node in the swarm to accept connections on published ports for any service running in the swarm, even if there’s no task running on the node. The routing mesh routes all incoming requests to published ports on available nodes to an active container.
git reset --hard
cd solutions/lab-31
./clean-slate-protocol.sh
cat service-mesh.sh
./service-mesh.sh
- adjust the
docker-machine
security group to allows port 80 traffic to flow - run
watch 'curl --silent <IP address> | python3 -m json.tool'
, noticing the changing address andHOSTNAME
- look up the public address of some of the other nodes and hit those
- adjust the scale up or down and see how results are affected
docker-machine ssh bravo docker service scale nginx=2
docker-machine ssh bravo docker service ps nginx
The business has decided that in order to stay competitive, our product needs to support developer extension points. They want an experience similar to AWS Lambda where code written in a variety of programming languages can interact with our system in a safe and predictable manner. Luckily, we've already covered everything we'll need to produce a Docker-based proof of concept. In this lab, we'll create a FaaS implementation as a series of short shell scripts that interact with Docker that simulates the developer experience. The implementation must support the following:
- JVM and Python based functions
- functions that accept a string and that return a string
- developer can specify the following runtime constraints
- RAM
- CPU
- whether networking is needed or not
- the name of the script to run
- a file containing environment variables that the script can use for configuration
Create a script that launches a container using the appropriate image and runtime switches. You will have two scripts, one for each runtime. Your task is complete if the string passed to the script is printed in uppercase. There is a solution in solutions/lab-32
if you get stuck.
git reset --hard
cd labs/lab-32
./clean-slate-protocol.sh
cat faas.env
cat faas.groovy
cat faas.py
cat run-groovy-function.sh
cat run-python-function.sh
- poke around Docker Hub to find the appropriate images
In this lab, we will host our images on Amazon instead of using public repositories. Normally, this is done for security and operational reasons. We will create the registry in our AWS account, push an image to it and have one of our classmates pull from it. The container simply prints the current date and time.
git reset --hard
git pull
cd labs/lab-33
./clean-slate-protocol.sh
- edit
Dockerfile
so that it creates an image that runs the Linuxdate
command - edit
create-docker-image.sh
as needed to create the proper image ./create-docker-image.sh
to create the image- run the proper Docker command to verify the image built correctly
- edit
test-image.sh
so that it tests your image ./test-image.sh
to verify your image works correctly- log into your AWS account, navigating to Compute->Elastic Container Service
- create your repository. You can call it anything you want but
aws-study-group
will be used in the solution - follow the instructions from Amazon on how to authenticate to your registry
- edit
tag-image.sh
so that it tags the existing image with a tag suitable for your new repository ./tag-image.sh
to tag the image- run the proper Docker command to verify the image got tagged correctly
- edit
push.image.sh
./push-image.sh
to push it to the registry- use the console to verify the image made it
- select a classmate
- using the AWS console, give their account the ability to pull down your image
- have them pull down your image and run it
- Using the AWS console, figure out how to auto-delete images that are older than 30 days
Lab N: Docker Store
- /var/lib/docker/containers (3.15)
- /var/lib/docker (4.2)
This project is licensed under the Apache License Version 2.0, January 2004.