-
Notifications
You must be signed in to change notification settings - Fork 1
/
Dockerfile
203 lines (162 loc) · 8.79 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
# AWS CLI v2
ARG AWS_CLI_VERSION=2.17.44
ARG ALPINE_VERSION=3.19
# To fetch the right alpine version use:
# docker run --rm --entrypoint ash eu.gcr.io/google.com/cloudsdktool/google-cloud-cli:${GOOGLE_CLOUD_CLI_IMAGE_TAG} -c 'cat /etc/issue'
# You can find the list of the available image tags here:
# https://console.cloud.google.com/gcr/images/google.com:cloudsdktool/EU/google-cloud-cli
#
# Check the available version for the AWS CLI here:
# https://github.com/sparkfabrik/docker-alpine-aws-cli/pkgs/container/docker-alpine-aws-cli
# Use the same version of the base image in different stages
ARG GOOGLE_CLOUD_CLI_IMAGE_TAG
FROM ghcr.io/sparkfabrik/docker-alpine-aws-cli:${AWS_CLI_VERSION}-alpine${ALPINE_VERSION} AS awscli
# Building and downloading all the tools in a single stage
FROM eu.gcr.io/google.com/cloudsdktool/google-cloud-cli:${GOOGLE_CLOUD_CLI_IMAGE_TAG} AS build
# Build target arch passed by BuildKit
ARG TARGETARCH
# Install components for the building stage.
RUN apk --no-cache add autoconf automake build-base curl gzip libtool make openssl unzip
# Download helm
# https://github.com/helm/helm/releases
ENV HELM_VERSION=3.15.4
RUN curl -o /tmp/helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz -L0 "https://get.helm.sh/helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz" \
&& tar -zxvf /tmp/helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz -C /tmp \
&& mv /tmp/linux-${TARGETARCH}/helm /usr/local/bin/helm
# Download stern
# https://github.com/stern/stern/releases
ENV STERN_VERSION=1.30.0
RUN curl -o /tmp/stern_${STERN_VERSION}_linux_${TARGETARCH}.tar.gz -LO "https://github.com/stern/stern/releases/download/v${STERN_VERSION}/stern_${STERN_VERSION}_linux_${TARGETARCH}.tar.gz" \
&& tar -zxvf /tmp/stern_${STERN_VERSION}_linux_${TARGETARCH}.tar.gz -C /tmp \
&& mv /tmp/stern /usr/local/bin/stern
# Download jq
# https://github.com/jqlang/jq/releases
ENV JQ_VERSION=1.7.1
RUN curl -o /tmp/jq-${JQ_VERSION}.tar.gz -L0 "https://github.com/stedolan/jq/archive/refs/tags/jq-${JQ_VERSION}.tar.gz" \
&& tar -zxvf /tmp/jq-${JQ_VERSION}.tar.gz -C /tmp
# https://github.com/kkos/oniguruma/tree/v6.9.9
ENV ONIGURUMA_VERSION=6.9.9
RUN curl -o /tmp/oniguruma-${ONIGURUMA_VERSION}.tar.gz -L0 "https://github.com/kkos/oniguruma/archive/refs/tags/v${ONIGURUMA_VERSION}.tar.gz" \
&& tar -zxvf /tmp/oniguruma-${ONIGURUMA_VERSION}.tar.gz -C /tmp
# Compile JQ
RUN cd /tmp/jq-jq-${JQ_VERSION} \
&& rmdir modules/oniguruma \
&& mv /tmp/oniguruma-${ONIGURUMA_VERSION} /tmp/jq-jq-${JQ_VERSION}/modules/oniguruma \
&& autoreconf -fi \
&& ./configure --with-oniguruma=builtin --disable-maintainer-mode \
&& make LDFLAGS=-all-static -j4 \
&& mv jq /usr/local/bin/jq
# Use the same version of the base image in different stages
ARG GOOGLE_CLOUD_CLI_IMAGE_TAG
# Create the final image
FROM eu.gcr.io/google.com/cloudsdktool/google-cloud-cli:${GOOGLE_CLOUD_CLI_IMAGE_TAG}
# Build target arch passed by BuildKit
ARG TARGETARCH
# Add additional components to gcloud SDK.
RUN gcloud components install app-engine-java beta gke-gcloud-auth-plugin
# Use the gke-auth-plugin to authenticate to the GKE cluster.
# Install gke-gcloud-auth-plugin (https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke)
ENV USE_GKE_GCLOUD_AUTH_PLUGIN=true
# Remove unnecessary components.
RUN rm -f /usr/local/libexec/docker/cli-plugins/docker-buildx
# Install additional components.
RUN apk --no-cache add \
bash-completion bat curl gettext grep groff less \
make mandoc ncurses openssl unzip vim yq
# Create utility folder
RUN mkdir -p /utility
# Install AWS CLI v2 using the binary builded in the awscli stage
COPY --from=awscli /usr/local/aws-cli/ /usr/local/aws-cli/
RUN ln -s /usr/local/aws-cli/v2/current/bin/aws /usr/local/bin/aws \
&& ln -s /usr/local/aws-cli/v2/current/bin/aws_completer /usr/local/bin/aws_completer
# Download kubectl
# https://console.cloud.google.com/storage/browser/kubernetes-release/release
ENV KUBECTL_STABLE_VERSION=1.29
RUN echo "Installing kubectl using the stable version of ${KUBECTL_STABLE_VERSION}..." && \
curl -so /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -L -s "https://storage.googleapis.com/kubernetes-release/release/stable-${KUBECTL_STABLE_VERSION}.txt")/bin/linux/${TARGETARCH}/kubectl && \
chmod +x /usr/local/bin/kubectl
# Download kubectx and kubens utilities
# https://github.com/ahmetb/kubectx
ENV KUBECTX_VERSION=0.9.5
RUN curl -o /utility/kubens -sLO "https://github.com/ahmetb/kubectx/releases/download/v${KUBECTX_VERSION}/kubens" \
&& curl -o /utility/kubectx -sLO "https://github.com/ahmetb/kubectx/releases/download/v${KUBECTX_VERSION}/kubectx" \
&& chmod +x /utility/kubens /utility/kubectx \
&& curl -o /utility/kubens.autocomple.sh -sLO "https://raw.githubusercontent.com/ahmetb/kubectx/v${KUBECTX_VERSION}/completion/kubens.bash" \
&& curl -o /etc/profile.d/kubectx.sh -sLO "https://raw.githubusercontent.com/ahmetb/kubectx/v${KUBECTX_VERSION}/completion/kubectx.bash" \
&& chmod +x /etc/profile.d/kubectx.sh /utility/kubens.autocomple.sh
# Install Krew - kubectl plugin manager
# https://github.com/kubernetes-sigs/krew/releases
# https://krew.sigs.k8s.io/docs/user-guide/setup/install/
ENV KREW_VERSION=0.4.4
RUN set -x; cd "$(mktemp -d)" \
&& OS="$(uname | tr '[:upper:]' '[:lower:]')" \
&& ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" \
&& KREW="krew-${OS}_${ARCH}" \
&& curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/download/v${KREW_VERSION}/${KREW}.tar.gz" \
&& tar zxvf "${KREW}.tar.gz" \
&& rm "${KREW}.tar.gz" \
&& ./"${KREW}" install krew
ENV PATH="/root/.krew/bin:$PATH"
# Install kube-capacity using krew https://github.com/robscott/kube-capacity
RUN kubectl krew install resource-capacity \
# Install community-images using krew https://github.com/kubernetes-sigs/community-images#kubectl-community-images
&& kubectl krew install community-images
# Make krew directory executable
RUN chmod 755 /root && chmod -R 755 /root/.krew
# Grant permissions to the /usr/local/bin directory to allow standard users to manipulate kubens.
# See docker-entrypoint.sh in 'Using original kubens' feature.
RUN chmod 777 /usr/local/bin
# Copy helm from previous stage
COPY --from=build /usr/local/bin/helm /usr/local/bin/helm
RUN chmod +x /usr/local/bin/helm
# Copy stern from previous stage
COPY --from=build /usr/local/bin/stern /usr/local/bin/stern
RUN chmod +x /usr/local/bin/stern
# Copy compiled jq from previous stage
COPY --from=build /usr/local/bin/jq /usr/local/bin/jq
RUN chmod +x /usr/local/bin/jq
# Overwrite kubens with custom kubens script (we don't have namespace list permission)
COPY scripts/kubens /utility/kubens.patched
RUN chmod +x /utility/kubens.patched
RUN ln -s /utility/kubens.patched /usr/local/bin/kubens
# Create userless home, it will be used only for cache
ENV HOME=/cloud-tools-cli
RUN mkdir /cloud-tools-cli \
&& chmod 777 /cloud-tools-cli
# Save history
ENV HISTFILE=/cloud-tools-cli/dotfiles/.bash_history
RUN mkdir -p /cloud-tools-cli/dotfiles
# Prompter function to build the bash prompt with additional information
ENV PROMPT_COMMAND=prompter
COPY scripts/prompter.sh /etc/profile.d/prompter.sh
RUN chmod +x /etc/profile.d/prompter.sh
# Add bash functions
COPY scripts/bash_functions.sh /etc/profile.d/bash_functions.sh
RUN chmod +x /etc/profile.d/bash_functions.sh
# Final settings
RUN touch /etc/profile.d/tools-completion.sh \
&& chmod +x /etc/profile.d/tools-completion.sh \
&& echo "source <(kubectl completion bash)" >> /etc/profile.d/tools-completion.sh \
&& echo "alias k=\"kubectl\"" >> /etc/profile.d/tools-completion.sh \
&& echo "complete -o default -F __start_kubectl k" >> /etc/profile.d/tools-completion.sh \
&& echo "export PATH=\"${PATH}\"" >> /etc/profile.d/tools-completion.sh \
&& echo "alias kube-capacity=\"kubectl resource-capacity\"" >> /etc/profile.d/tools-completion.sh \
&& echo "source <(helm completion bash)" >> /etc/profile.d/tools-completion.sh \
&& echo "source <(stern --completion bash)" >> /etc/profile.d/tools-completion.sh \
&& echo "source /google-cloud-sdk/path.bash.inc" >> /etc/profile.d/tools-completion.sh \
&& echo "complete -C '/usr/local/bin/aws_completer' aws" >> /etc/profile.d/tools-completion.sh
# Additional entrypoints
RUN mkdir -p /docker-entrypoint.d
COPY docker-entrypoint.sh /docker-entrypoint.sh
COPY scripts/docker-entrypoint.d /docker-entrypoint.d
# Make /etc/profile and /etc/profile.d writable for everyone.
# This image could be used with a non-root user.
RUN chmod 777 /etc/profile \
&& chmod -R 777 /etc/profile.d
# Create custom directory for custom-docker-entrypoint.d
RUN mkdir -p /custom-docker-entrypoint.d
# Entrypoint configuration
RUN chmod +x /docker-entrypoint.sh \
&& find /docker-entrypoint.d -type f -exec chmod +x {} +
ENTRYPOINT [ "/docker-entrypoint.sh" ]
CMD [ "bash", "-il" ]