container does not have permission to read or write to the specified directory
a1046149428 opened this issue · 1 comments
a1046149428 commented
a1046149428 commented
kind: Template
apiVersion: v1
metadata:
name: zk-persistent
annotations:
openshift.io/display-name: Zookeeper (Persistent)
description: Create a replicated Zookeeper server with persistent storage
iconClass: icon-database
tags: database,zookeeper
labels:
template: zk-persistent
component: zk
parameters:
- name: NAME
value: zk-persistent
required: true
- name: SOURCE_IMAGE
description: Container image
value: docker.io/brycehuang/zookeeper-oc
required: true
- name: ZOO_VERSION
description: Version
value: 1.0.0-3.5.5
required: true
- name: ZOO_REPLICAS
description: Number of nodes
value: "3"
required: true
- name: VOLUME_DATA_CAPACITY
description: Persistent volume capacity for zookeeper dataDir directory (e.g. 512Mi, 2Gi)
value: 1Gi
required: true
- name: VOLUME_DATALOG_CAPACITY
description: Persistent volume capacity for zookeeper dataLogDir directory (e.g. 512Mi, 2Gi)
value: 1Gi
required: true
- name: ZOO_TICK_TIME
description: The number of milliseconds of each tick
value: "2000"
required: true
- name: ZOO_INIT_LIMIT
description: The number of ticks that the initial synchronization phase can take
value: "5"
required: true
- name: ZOO_SYNC_LIMIT
description: The number of ticks that can pass between sending a request and getting an acknowledgement
value: "2"
required: true
- name: ZOO_CLIENT_PORT
description: The port at which the clients will connect
value: "2181"
required: true
- name: ZOO_SERVER_PORT
description: Server port
value: "2888"
required: true
- name: ZOO_ELECTION_PORT
description: Election port
value: "3888"
required: true
- name: ZOO_MAX_CLIENT_CNXNS
description: The maximum number of client connections
value: "60"
required: true
- name: ZOO_SNAP_RETAIN_COUNT
description: The number of snapshots to retain in dataDir
value: "3"
required: true
- name: ZOO_PURGE_INTERVAL
description: Purge task interval in hours. Set to 0 to disable auto purge feature
value: "1"
required: true
- name: ZOO_MEMORY
description: JVM heap size
value: "-Xmx512M -Xms512M"
required: true
- name: RESOURCE_MEMORY_REQ
description: The memory resource request.
value: "512M"
required: true
- name: RESOURCE_MEMORY_LIMIT
description: The limits for memory resource.
value: "512M"
required: true
- name: RESOURCE_CPU_REQ
description: The CPU resource request.
value: "300m"
required: true
- name: RESOURCE_CPU_LIMIT
description: The limits for CPU resource.
value: "300m"
required: true
objects:
- apiVersion: v1
kind: Service
metadata:
name: ${NAME}
labels:
zk-name: ${NAME}
component: zk
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- port: ${ZOO_CLIENT_PORT}
name: client
- port: ${ZOO_SERVER_PORT}
name: server
- port: ${ZOO_ELECTION_PORT}
name: election
clusterIP: None
selector:
zk-name: ${NAME}
- apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ${NAME}
labels:
zk-name: ${NAME}
component: zk
spec:
podManagementPolicy: "Parallel"
serviceName: ${NAME}
selector:
matchLabels:
zk-name: ${NAME}
component: zk
replicas: ${ZOO_REPLICAS}
template:
metadata:
labels:
zk-name: ${NAME}
template: zk-persistent
component: zk
# annotations:
## Use this annotation if you want allocate each pod on different node
## Note the number of nodes must be upper than REPLICAS parameter.
# scheduler.alpha.kubernetes.io/affinity: >
# {
# "podAntiAffinity": {
# "requiredDuringSchedulingIgnoredDuringExecution": [{
# "labelSelector": {
# "matchExpressions": [{
# "key": "zk-name",
# "operator": "In",
# "values": ["zk"]
# }]
# },
# "topologyKey": "kubernetes.io/hostname"
# }]
# }
# }
spec:
containers:
- name: ${NAME}
imagePullPolicy: IfNotPresent
image: ${SOURCE_IMAGE}:${ZOO_VERSION}
resources:
requests:
memory: ${RESOURCE_MEMORY_REQ}
cpu: ${RESOURCE_CPU_REQ}
limits:
memory: ${RESOURCE_MEMORY_LIMIT}
cpu: ${RESOURCE_CPU_LIMIT}
ports:
- containerPort: ${ZOO_CLIENT_PORT}
name: client
- containerPort: ${ZOO_SERVER_PORT}
name: server
- containerPort: ${ZOO_ELECTION_PORT}
name: election
env:
- name : SETUP_DEBUG
value: "true"
- name : ZOO_REPLICAS
value: ${ZOO_REPLICAS}
- name : ZK_HEAP_SIZE
value: ${ZOO_HEAP_SIZE}
- name : ZK_tickTime
value: ${ZOO_TICK_TIME}
- name : ZK_initLimit
value: ${ZOO_INIT_LIMIT}
- name : ZK_syncLimit
value: ${ZOO_SYNC_LIMIT}
- name : ZK_maxClientCnxns
value: ${ZOO_MAX_CLIENT_CNXNS}
- name : ZK_autopurge_snapRetainCount
value: ${ZOO_SNAP_RETAIN_COUNT}
- name : ZK_autopurge_purgeInterval
value: ${ZOO_PURGE_INTERVAL}
- name : ZK_clientPort
value: ${ZOO_CLIENT_PORT}
- name : ZOO_SERVER_PORT
value: ${ZOO_SERVER_PORT}
- name : ZOO_ELECTION_PORT
value: ${ZOO_ELECTION_PORT}
- name : JAVA_ZK_JVMFLAGS
value: "\"${ZOO_MEMORY}\""
readinessProbe:
exec:
command:
- zkServer.sh
- status
initialDelaySeconds: 20
timeoutSeconds: 10
livenessProbe:
exec:
command:
- zkServer.sh
- status
initialDelaySeconds: 20
timeoutSeconds: 10
securityContext:
runAsUser: 1001
fsGroup: 1001
volumeMounts:
- name: datadir
mountPath: /opt/zookeeper/data
- name: datalogdir
mountPath: /opt/zookeeper/data-log
volumeClaimTemplates:
- metadata:
name: datadir
spec:
storageClassName: nfs-storage
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: ${VOLUME_DATA_CAPACITY}
- metadata:
name: datalogdir
spec:
storageClassName: nfs-storage
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: ${VOLUME_DATALOG_CAPACITY}
it is my template