my app is inside a docker container and works perfectly fine in local host..but when i run this docker image in kubernetes cluster it gives me this error
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
django.db.utils.OperationalError: could not translate host name "db" to address: Name or service not known
here is my database settings.py
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
I deployed the image into kubernetes cluster with frontend.yml manifest file..it looks like this
frontend.yml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: dockersample-app
labels:
app: polls
spec:
replicas: 3
template:
metadata:
labels:
app: dcokersample-app
spec:
containers:
- name: dcokersample
image: mahesh61437/dockersample:v6
imagePullPolicy: Always
ports:
- containerPort: 8000
---service.yml
apiVersion: v1
kind: Service
metadata:
name: dockersample-app
labels:
app: dockersample-app
spec:
type: LoadBalancer
ports:
- port: 8000
targetPort: 8000
selector:
app: dockersample-app
here is my DOCKER File
FROM python:3
RUN apt-get update
EXPOSE 8000
ENV PYTHONUNBUFFERED 1
RUN mkdir /code
WORKDIR /code
COPY requirements.txt /code/
RUN pip install -r requirements.txt
COPY . /code/
CMD python manage.py runserver
kubectl get pod,svc,deployment,pvc,pv -o yaml
apiVersion: v1
items:
- apiVersion: v1
kind: Pod
metadata:
annotations:
cilium.io/identity: "63547"
creationTimestamp: "2019-02-14T09:49:39Z"
generateName: dockersample-app-557878d964-
labels:
app: dcokersample-app
pod-template-hash: 557878d964
name: dockersample-app-557878d964-fm94j
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: dockersample-app-557878d964
uid: d8b8a828-303d-11e9-94cc-9252dc3b5955
resourceVersion: "271350"
selfLink: /api/v1/namespaces/default/pods/dockersample-app-557878d964-fm94j
uid: d8bc708b-303d-11e9-94cc-9252dc3b5955
spec:
containers:
- image: mahesh61437/dockersample:v6
imagePullPolicy: Always
name: dcokersample
ports:
- containerPort: 8000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-svb6z
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: vibrant-ramanujan-8zmn
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-svb6z
secret:
defaultMode: 420
secretName: default-token-svb6z
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:39Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:49Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:49Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:39Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://d82ec6f089cc76e64e7ba68d56ba5c1263343c08929d648c9fef005d4a08488c
image: mahesh61437/dockersample:v6
imageID: docker-pullable://mahesh61437/dockersample#sha256:54aa303cc5534609a1b579718f192323fad9dd57bd92a2897cd64f110438c965
lastState: {}
name: dcokersample
ready: true
restartCount: 0
state:
running:
startedAt: "2019-02-14T09:49:49Z"
hostIP: 10.139.16.196
phase: Running
podIP: 10.244.1.64
qosClass: BestEffort
startTime: "2019-02-14T09:49:39Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
cilium.io/identity: "63547"
creationTimestamp: "2019-02-14T09:49:39Z"
generateName: dockersample-app-557878d964-
labels:
app: dcokersample-app
pod-template-hash: 557878d964
name: dockersample-app-557878d964-ftngl
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: dockersample-app-557878d964
uid: d8b8a828-303d-11e9-94cc-9252dc3b5955
resourceVersion: "271354"
selfLink: /api/v1/namespaces/default/pods/dockersample-app-557878d964-ftngl
uid: d8bdda66-303d-11e9-94cc-9252dc3b5955
spec:
containers:
- image: mahesh61437/dockersample:v6
imagePullPolicy: Always
name: dcokersample
ports:
- containerPort: 8000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-svb6z
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: vibrant-ramanujan-8zm3
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-svb6z
secret:
defaultMode: 420
secretName: default-token-svb6z
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:39Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:49Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:49Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:39Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://ef71c722fbcc70ceb96d929e983e22263cbc40a54fd666cf73cc0dd73c437cae
image: mahesh61437/dockersample:v6
imageID: docker-pullable://mahesh61437/dockersample#sha256:54aa303cc5534609a1b579718f192323fad9dd57bd92a2897cd64f110438c965
lastState: {}
name: dcokersample
ready: true
restartCount: 0
state:
running:
startedAt: "2019-02-14T09:49:48Z"
hostIP: 10.139.120.24
phase: Running
podIP: 10.244.2.187
qosClass: BestEffort
startTime: "2019-02-14T09:49:39Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
cilium.io/identity: "63547"
creationTimestamp: "2019-02-14T09:49:39Z"
generateName: dockersample-app-557878d964-
labels:
app: dcokersample-app
pod-template-hash: 557878d964
name: dockersample-app-557878d964-lq78m
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: dockersample-app-557878d964
uid: d8b8a828-303d-11e9-94cc-9252dc3b5955
resourceVersion: "271358"
selfLink: /api/v1/namespaces/default/pods/dockersample-app-557878d964-lq78m
uid: d8be0705-303d-11e9-94cc-9252dc3b5955
spec:
containers:
- image: mahesh61437/dockersample:v6
imagePullPolicy: Always
name: dcokersample
ports:
- containerPort: 8000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-svb6z
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: vibrant-ramanujan-8z79
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-svb6z
secret:
defaultMode: 420
secretName: default-token-svb6z
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:39Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:50Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:50Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-02-14T09:49:39Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://fa3c8f25b260b0e3c032907ff796b5e22bf0479646d457914de518c3c6180be0
image: mahesh61437/dockersample:v6
imageID: docker-pullable://mahesh61437/dockersample#sha256:54aa303cc5534609a1b579718f192323fad9dd57bd92a2897cd64f110438c965
lastState: {}
name: dcokersample
ready: true
restartCount: 0
state:
running:
startedAt: "2019-02-14T09:49:49Z"
hostIP: 10.139.16.250
phase: Running
podIP: 10.244.0.168
qosClass: BestEffort
startTime: "2019-02-14T09:49:39Z"
- apiVersion: v1
kind: Service
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app":"dockersample-app"},"name":"dockersample-app","namespace":"default"},"spec":{"ports":[{"port":8000,"targetPort":8000}],"selector":{"app":"dockersample-app"},"type":"LoadBalancer"}}
creationTimestamp: "2019-02-14T09:49:39Z"
labels:
app: dockersample-app
name: dockersample-app
namespace: default
resourceVersion: "271514"
selfLink: /api/v1/namespaces/default/services/dockersample-app
uid: d8c78f7a-303d-11e9-94cc-9252dc3b5955
spec:
clusterIP: 10.245.57.250
externalTrafficPolicy: Cluster
ports:
- nodePort: 32204
port: 8000
protocol: TCP
targetPort: 8000
selector:
app: dockersample-app
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer:
ingress:
- ip: 174.138.123.199
- apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2019-02-12T09:31:19Z"
labels:
component: apiserver
provider: kubernetes
name: kubernetes
namespace: default
resourceVersion: "6"
selfLink: /api/v1/namespaces/default/services/kubernetes
uid: f3f45187-2ea8-11e9-94cc-9252dc3b5955
spec:
clusterIP: 10.245.0.1
ports:
- name: https
port: 443
protocol: TCP
targetPort: 443
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"extensions/v1beta1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"polls"},"name":"dockersample-app","namespace":"default"},"spec":{"replicas":3,"template":{"metadata":{"labels":{"app":"dcokersample-app"}},"spec":{"containers":[{"image":"mahesh61437/dockersample:v6","imagePullPolicy":"Always","name":"dcokersample","ports":[{"containerPort":8000}]}]}}}}
creationTimestamp: "2019-02-14T09:49:39Z"
generation: 1
labels:
app: polls
name: dockersample-app
namespace: default
resourceVersion: "271360"
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/dockersample-app
uid: d8b79710-303d-11e9-94cc-9252dc3b5955
spec:
progressDeadlineSeconds: 2147483647
replicas: 3
revisionHistoryLimit: 2147483647
selector:
matchLabels:
app: dcokersample-app
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: dcokersample-app
spec:
containers:
- image: mahesh61437/dockersample:v6
imagePullPolicy: Always
name: dcokersample
ports:
- containerPort: 8000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 3
conditions:
- lastTransitionTime: "2019-02-14T09:49:49Z"
lastUpdateTime: "2019-02-14T09:49:49Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 1
readyReplicas: 3
replicas: 3
updatedReplicas: 3
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"labels":{"app":"postgres"},"name":"postgres-pv-claim","namespace":"default"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"5Gi"}},"storageClassName":"manual"}}
pv.kubernetes.io/bind-completed: "yes"
pv.kubernetes.io/bound-by-controller: "yes"
creationTimestamp: "2019-02-14T10:11:47Z"
finalizers:
- kubernetes.io/pvc-protection
labels:
app: postgres
name: postgres-pv-claim
namespace: default
resourceVersion: "273451"
selfLink: /api/v1/namespaces/default/persistentvolumeclaims/postgres-pv-claim
uid: f02728ee-3040-11e9-94cc-9252dc3b5955
spec:
accessModes:
- ReadWriteMany
dataSource: null
resources:
requests:
storage: 5Gi
storageClassName: manual
volumeMode: Filesystem
volumeName: postgres-pv-volume
status:
accessModes:
- ReadWriteMany
capacity:
storage: 5Gi
phase: Bound
- apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"PersistentVolume","metadata":{"annotations":{},"labels":{"app":"postgres","type":"local"},"name":"postgres-pv-volume"},"spec":{"accessModes":["ReadWriteMany"],"capacity":{"storage":"5Gi"},"hostPath":{"path":"/mnt/data"},"storageClassName":"manual"}}
pv.kubernetes.io/bound-by-controller: "yes"
creationTimestamp: "2019-02-14T10:11:47Z"
finalizers:
- kubernetes.io/pv-protection
labels:
app: postgres
type: local
name: postgres-pv-volume
resourceVersion: "273449"
selfLink: /api/v1/persistentvolumes/postgres-pv-volume
uid: f01f5beb-3040-11e9-94cc-9252dc3b5955
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 5Gi
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: postgres-pv-claim
namespace: default
resourceVersion: "273446"
uid: f02728ee-3040-11e9-94cc-9252dc3b5955
hostPath:
path: /mnt/data
type: ""
persistentVolumeReclaimPolicy: Retain
storageClassName: manual
volumeMode: Filesystem
status:
phase: Bound
kind: List
metadata:
resourceVersion: ""
selfLink: ""
I cant figure out what I must do now. If you think you can suggest better in my code me something please leave a comment.
It's missing the database deployement and service (optional but highly recommended PersistentVolumeClaim).
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: db-deployment
labels:
app: db-deployment
spec:
replicas: 1
template:
metadata:
labels:
app: db
spec:
containers:
- image: postgres:9.4
name: db
ports:
- containerPort: 5432
volumeMounts:
- name: postgres-db-data
mountPath: /var/lib/postgresql
volumes:
- name: postgres-db-data
persistentVolumeClaim:
claimName: db-data
---
apiVersion: v1
kind: Service
metadata:
name: db
labels:
name: db
spec:
ports:
- name: db
port: 5432
selector:
app: db-deployment
---
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
name: "db-data"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: "your storage class"
Related
I have been working on a project with Postgres. I want to add this to EKS. my deployment is giving me an error as:
Warning FailedScheduling 29s default-scheduler 0/2 nodes are available: 2 pod has unbound immediate PersistentVolumeClaims. preemption: 0/2 nodes are available: 2 Preemption is not helpful for scheduling.
I have tried changing the storage capacity but nothing worked.
Various components of the YAML file are below:
apiVersion: v1
kind: Secret
metadata:
name: xdb-secret
namespace: default
type: Opaque
data:
django-secret-key: xxx
django_database_name: xxx
django_database_username: xxx
django_database_password: xxx
email_host_user: xxx
email_host_password: xxx
# ---
# apiVersion: v1
# kind: PersistentVolume
# metadata:
# name: xdb-pv
# labels:
# type: local
# spec:
# capacity:
# storage: 10Gi
# volumeMode: Filesystem
# accessModes:
# - ReadWriteOnce
# storageClassName: standard
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: xdb-pvc
namespace: default
labels:
app: local
spec:
storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
# volumeName: xdb-pv
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: xdb-deployment
namespace: default
labels:
app: xdb
spec:
selector:
matchLabels:
app: xdb
replicas: 1
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: xdb
spec:
# initContainers:
# Init containers are exactly like regular containers, except:
# - Init containers always run to completion.
# - Each init container must complete successfully before the next one starts.
containers:
- name: xdb-cont
image: postgres:latest
env:
- name: DJANGO_SECRET_KEY
valueFrom:
secretKeyRef:
name: xdb-secret
key: django-secret-key
- name: DJANGO_DATABASE_NAME
valueFrom:
secretKeyRef:
name: xdb-secret
key: django_database_name
- name: DJANGO_DATABASE_USERNAME
valueFrom:
secretKeyRef:
name: xdb-secret
key: django_database_username
- name: DJANGO_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: xdb-secret
key: django_database_password
- name: EMAIL_HOST_USER
valueFrom:
secretKeyRef:
name: xdb-secret
key: email_host_user
- name: EMAIL_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: xdb-secret
key: email_host_password
- name: DJANGO_DATABASE_PORT
value: "5432"
- name: DJANGO_DATABASE_HOST
value: xdb-service
ports:
- containerPort: 5432
name: xdb-cont
volumeMounts:
- name: xdb-volume-mount
mountPath: /var/lib/postgresql/data
volumes:
- name: xdb-volume-mount
persistentVolumeClaim:
claimName: xdb-pvc
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: xdb-service
namespace: default
spec:
selector:
app: xdb
ports:
- protocol: TCP
port: 5432
targetPort: 5432
Details:
replica: 1
node: 2
each node capacity: 20Gi
Capacity type: Spot
Desired size: 2 nodes
Minimum size: 2 nodes
Maximum size: 5 nodes
I am not sure what's going wrong in this.
So, I have deployed my service as a NodePort and connection requests (from my terminal) are not sent to the application without port-forward. Here are my specs:
$ kubectl get svc rhs-servicebase -o yaml
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |
apiVersion: ambassador/v1
kind: Mapping
name: http_referral-health-signal_mapping
grpc: false
prefix: /referral-health-signal/
rewrite: /
timeout_ms: 0
service: rhs-servicebase:9000
cors:
origins: "*"
headers: X-User-Agent, X-Grpc-Web, Content-Type, Authorization
max_age: "1728000"
creationTimestamp: "2022-08-31T22:32:28Z"
labels:
app.kubernetes.io/name: servicebase
name: rhs-servicebase
namespace: default
resourceVersion: "93013"
uid: 84aba835-6399-49f4-be4f-4e6454d1bd7d
spec:
clusterIP: 10.103.51.237
clusterIPs:
- 10.103.51.237
externalTrafficPolicy: Cluster
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- nodePort: 30001
port: 9000
protocol: TCP
targetPort: 9000
selector:
app.kubernetes.io/instance: rhs
app.kubernetes.io/name: servicebase
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}
$ kubectl get deployment rhs-servicebase -o yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2022-08-31T22:32:28Z"
generation: 1
labels:
app.kubernetes.io/instance: rhs
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: servicebase
helm.sh/chart: servicebase-0.1.5
name: rhs-servicebase
namespace: default
resourceVersion: "93040"
uid: 04af37db-94e0-42b3-91e1-56272791c70a
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: rhs
app.kubernetes.io/name: servicebase
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: rhs
app.kubernetes.io/name: servicebase
spec:
containers:
- env:
- name: TZ
value: UTC
- name: LOG_FILE
value: application.log
- name: S3_BUCKET
value: livongo-int-healthsignal
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: aws-access-key-id
name: referral-health-signal
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: aws-secret-access-key
name: referral-health-signal
image: localhost:5000/referral-health-signal:latest
imagePullPolicy: Always
name: servicebase
ports:
- containerPort: 9000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: secret-referral-health-signal
secret:
defaultMode: 420
items:
- key: aws-access-key-id
path: AWS_ACCESS_KEY_ID
- key: aws-secret-access-key
path: AWS_SECRET_ACCESS_KEY
secretName: referral-health-signal
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2022-08-31T22:32:30Z"
lastUpdateTime: "2022-08-31T22:32:30Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
- lastTransitionTime: "2022-08-31T22:32:28Z"
lastUpdateTime: "2022-08-31T22:32:30Z"
message: ReplicaSet "rhs-servicebase-6f676c458c" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
$ kubectl get pod rhs-servicebase-6f676c458c-f2rw6 -o yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2022-08-31T22:32:28Z"
generateName: rhs-servicebase-6f676c458c-
labels:
app.kubernetes.io/instance: rhs
app.kubernetes.io/name: servicebase
pod-template-hash: 6f676c458c
name: rhs-servicebase-6f676c458c-f2rw6
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: rhs-servicebase-6f676c458c
uid: 983b450e-4fe2-40fb-a332-a959d9b569bc
resourceVersion: "93036"
uid: 3dff4f66-8369-4855-a371-0fc2f37209a4
spec:
containers:
- env:
- name: TZ
value: UTC
- name: LOG_FILE
value: application.log
- name: S3_BUCKET
value: livongo-int-healthsignal
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: aws-access-key-id
name: referral-health-signal
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: aws-secret-access-key
name: referral-health-signal
image: localhost:5000/referral-health-signal:latest
imagePullPolicy: Always
name: servicebase
ports:
- containerPort: 9000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-c984r
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: docker-desktop
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: secret-referral-health-signal
secret:
defaultMode: 420
items:
- key: aws-access-key-id
path: AWS_ACCESS_KEY_ID
- key: aws-secret-access-key
path: AWS_SECRET_ACCESS_KEY
secretName: referral-health-signal
- name: kube-api-access-c984r
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2022-08-31T22:32:28Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2022-08-31T22:32:30Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2022-08-31T22:32:30Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2022-08-31T22:32:28Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://09d33da961a079adb4f6629eebd595dd6338b56f1f3ec779878503e9de04524f
image: localhost:5000/referral-health-signal:latest
imageID: docker-pullable://localhost:5000/referral-health-signal#sha256:d4dfeb70caa8145babcb025c287ec361bb1e920bf556cdec166d1d54f2136d1a
lastState: {}
name: servicebase
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2022-08-31T22:32:29Z"
hostIP: 192.168.65.4
phase: Running
podIP: 10.1.0.64
podIPs:
- ip: 10.1.0.64
qosClass: BestEffort
startTime: "2022-08-31T22:32:28Z"
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d6h
rhs-servicebase NodePort 10.103.51.237 <none> 9000:30001/TCP 40m
$ kubectl get endpoints
NAME ENDPOINTS AGE
kubernetes 192.168.65.4:6443 5d6h
rhs-servicebase 10.1.0.64:9000 40m
I can't understand what I'm missing in my config that's not allowing this to happen. Even when I exec into the pod and type $ curl -i http://localhost:9000/ I don't get a response until I've turned on port forwarding - this is weird, right? At least within the container, it should be able to ping itself?
Please help !!
I have a GKE cluster with Istio deployed in it. I have added the clusters node-pool instance groups to the backend of a GCP HTTP(S) LB.
To perform health check on the backends I have created the following health-check:
name: gke-http-hc
path: /healthz/ready (istio-ingressgateway readinessProbe path)
port: 30302 (for this the target port is 15021, which is the status port of istio-ingressgateway)
Protocol: HTTP
I can see that the health checks are all successful. But, if I try to access my application with my app URL, I get 404 error.
But, if I apply a TCP type health check and access the application with the app URL, I get the desired response 200 OK.
The TCP health check has following config:
name: gke-tcp-hc
Protocol: TCP
Port: 31397 (for this the target post is 80)
Why does my app behave differently for HTTP and TCP health-checks? Is there any other configuration I need to do to make the HTTP health check (query istio-ingressgateway's status) work?
Following are my k8s manifests for istio-ingressgateway:
Deployment:
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: istio-ingressgateway
install.operator.istio.io/owning-resource: unknown
install.operator.istio.io/owning-resource-namespace: istio-system
istio: ingressgateway
istio.io/rev: default
operator.istio.io/component: IngressGateways
operator.istio.io/managed: Reconcile
operator.istio.io/version: 1.9.5
release: istio
name: istio-ingressgateway
namespace: istio-system
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: istio-ingressgateway
istio: ingressgateway
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/path: /stats/prometheus
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
sidecar.istio.io/inject: "false"
creationTimestamp: null
labels:
app: istio-ingressgateway
chart: gateways
heritage: Tiller
install.operator.istio.io/owning-resource: unknown
istio: ingressgateway
istio.io/rev: default
operator.istio.io/component: IngressGateways
release: istio
service.istio.io/canonical-name: istio-ingressgateway
service.istio.io/canonical-revision: latest
sidecar.istio.io/inject: "false"
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
weight: 2
- preference:
matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- ppc64le
weight: 2
- preference:
matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- s390x
weight: 2
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
- ppc64le
- s390x
containers:
- args:
- proxy
- router
- --domain
- $(POD_NAMESPACE).svc.cluster.local
- --proxyLogLevel=warning
- --proxyComponentLogLevel=misc:error
- --log_output_level=default:info
- --serviceCluster
- istio-ingressgateway
env:
- name: JWT_POLICY
value: third-party-jwt
- name: PILOT_CERT_PROVIDER
value: istiod
- name: CA_ADDR
value: istiod.istio-system.svc:15012
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: INSTANCE_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.serviceAccountName
- name: CANONICAL_SERVICE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.labels['service.istio.io/canonical-name']
- name: CANONICAL_REVISION
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.labels['service.istio.io/canonical-revision']
- name: ISTIO_META_WORKLOAD_NAME
value: istio-ingressgateway
- name: ISTIO_META_OWNER
value: kubernetes://apis/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway
- name: ISTIO_META_UNPRIVILEGED_POD
value: "true"
- name: ISTIO_META_ROUTER_MODE
value: standard
- name: ISTIO_META_CLUSTER_ID
value: Kubernetes
image: docker.io/istio/proxyv2:1.9.5
imagePullPolicy: IfNotPresent
name: istio-proxy
ports:
- containerPort: 15021
protocol: TCP
- containerPort: 8080
protocol: TCP
- containerPort: 8443
protocol: TCP
- containerPort: 15012
protocol: TCP
- containerPort: 15443
protocol: TCP
- containerPort: 15090
name: http-envoy-prom
protocol: TCP
readinessProbe:
failureThreshold: 30
httpGet:
path: /healthz/ready
port: 15021
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: "2"
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/istio/proxy
name: istio-envoy
- mountPath: /etc/istio/config
name: config-volume
- mountPath: /var/run/secrets/istio
name: istiod-ca-cert
- mountPath: /var/run/secrets/tokens
name: istio-token
readOnly: true
- mountPath: /var/lib/istio/data
name: istio-data
- mountPath: /etc/istio/pod
name: podinfo
- mountPath: /etc/istio/ingressgateway-certs
name: ingressgateway-certs
readOnly: true
- mountPath: /etc/istio/ingressgateway-ca-certs
name: ingressgateway-ca-certs
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 1337
runAsGroup: 1337
runAsNonRoot: true
runAsUser: 1337
serviceAccount: istio-ingressgateway-service-account
serviceAccountName: istio-ingressgateway-service-account
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: istio-ca-root-cert
name: istiod-ca-cert
- downwardAPI:
defaultMode: 420
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.labels
path: labels
- fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
path: annotations
- path: cpu-limit
resourceFieldRef:
containerName: istio-proxy
divisor: 1m
resource: limits.cpu
- path: cpu-request
resourceFieldRef:
containerName: istio-proxy
divisor: 1m
resource: requests.cpu
name: podinfo
- emptyDir: {}
name: istio-envoy
- emptyDir: {}
name: istio-data
- name: istio-token
projected:
defaultMode: 420
sources:
- serviceAccountToken:
audience: istio-ca
expirationSeconds: 43200
path: istio-token
- configMap:
defaultMode: 420
name: istio
optional: true
name: config-volume
- name: ingressgateway-certs
secret:
defaultMode: 420
optional: true
secretName: istio-ingressgateway-certs
- name: ingressgateway-ca-certs
secret:
defaultMode: 420
optional: true
secretName: istio-ingressgateway-ca-certs
Service:
apiVersion: v1
kind: Service
metadata:
labels:
app: istio-ingressgateway
install.operator.istio.io/owning-resource: unknown
install.operator.istio.io/owning-resource-namespace: istio-system
istio: ingressgateway
istio.io/rev: default
operator.istio.io/component: IngressGateways
operator.istio.io/managed: Reconcile
operator.istio.io/version: 1.9.5
release: istio
name: istio-ingressgateway
namespace: istio-system
spec:
clusterIP: 10.30.192.198
externalTrafficPolicy: Cluster
ports:
- name: status-port
nodePort: 30302
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
nodePort: 31397
port: 80
protocol: TCP
targetPort: 8080
- name: https
nodePort: 32343
port: 443
protocol: TCP
targetPort: 8443
- name: tcp-istiod
nodePort: 30255
port: 15012
protocol: TCP
targetPort: 15012
- name: tls
nodePort: 30490
port: 15443
protocol: TCP
targetPort: 15443
selector:
app: istio-ingressgateway
istio: ingressgateway
sessionAffinity: None
type: NodePort
Here are my app manifests:
Deployment:
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: app-st-sc5ght
release: app-st-sc5ght
heritage: Helm
chart: app-chart
name: app-st-sc5ght
namespace: app-st
spec:
replicas: 5
selector:
matchLabels:
app: app-st-sc5ght
release: app-st-sc5ght
heritage: Helm
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
labels:
app: app1
release: app-st-sc5ght
heritage: Helm
spec:
imagePullSecrets:
- name: registry-key
volumes:
- name: app-config
configMap:
name: app-st-config
containers:
- image: reg.org.jp/app:1.0.1
imagePullPolicy: Always
name: app
resources:
requests:
memory: "64Mi"
cpu: 0.2
limits:
memory: "256Mi"
cpu: 0.5
env:
- name: STDOUT_STACKDRIVER_LOG
value: '1'
ports:
- containerPort: 9000
protocol: TCP
volumeMounts:
- name: app-config
mountPath: /app_config
readOnly: true
livenessProbe:
httpGet:
path: /status
port: 9000
initialDelaySeconds: 11
periodSeconds: 7
readinessProbe:
httpGet:
path: /status
port: 9000
initialDelaySeconds: 3
periodSeconds: 5
Service:
---
apiVersion: v1
kind: Service
metadata:
name: app-st-sc5ght
namespace: app-st
labels:
app: app-st-sc5ght
release: app-st-sc5ght
heritage: Helm
spec:
type: NodePort
ports:
- port: 9000
nodePort: 32098
targetPort: 9000
protocol: TCP
name: app-web
selector:
app: app-st-sc5ght
DestinationRule:
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: app-st-sc5ght
namespace: app-st
labels:
app: app-st-sc5ght
release: app-st-sc5ght
heritage: Helm
spec:
host: app-st-sc5ght.app-st.svc.cluster.local
subsets:
- name: stable
labels:
track: stable
version: stable
- name: rollout
labels:
track: rollout
version: rollout
Gateway:
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: app-st-sc5ght
namespace: app-st
labels:
app: app-st-sc5ght
release: app-st-sc5ght
heritage: Helm
track: stable
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: app-st-sc5ght
protocol: HTTP
hosts:
- st.app.org
VirtualService:
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: app-st-sc5ght
namespace: app-st
labels:
app: app-st-sc5ght
release: app-st-sc5ght
heritage: Helm
track: stable
spec:
gateways:
- app-st-sc5ght
hosts:
- st.app.org
http:
- match:
- uri:
prefix: /status
headers:
request:
add:
endpoint: status
response:
add:
endpoint: status
version: 1.0.1
route:
- destination:
port:
number: 9000
host: app-st-sc5ght.app-st.svc.cluster.local
subset: stable
weight: 100
- destination:
port:
number: 9000
host: app-st-sc5ght.app-st.svc.cluster.local
subset: rollout
weight: 0
- match:
- uri:
prefix: /public/list/v4/
rewrite:
uri: /list/v4/
headers:
request:
add:
endpoint: list
response:
add:
endpoint: list
route:
- destination:
port:
number: 9000
host: app-st-sc5ght.app-st.svc.cluster.local
subset: stable
weight: 100
- destination:
port:
number: 9000
host: app-st-sc5ght.app-st.svc.cluster.local
subset: rollout
weight: 0
- match:
- uri:
prefix: /
headers:
request:
add:
endpoint: home
response:
add:
endpoint: home
route:
- destination:
port:
number: 9000
host: app-st-sc5ght.app-st.svc.cluster.local
subset: stable
weight: 100
- destination:
port:
number: 9000
host: app-st-sc5ght.app-st.svc.cluster.local
subset: rollout
weight: 0
I'm trying to create a deployment on AWS EKS with my application and metricbeat as sidecar, so I have the following YML:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metricbeat-modules
namespace: testframework
labels:
k8s-app: metricbeat
data:
kubernetes.yml: |-
- module: kubernetes
metricsets:
- node
- system
- pod
- container
- volume
period: 10s
host: ${NODE_NAME}
hosts: [ "https://${NODE_IP}:10250" ]
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.verification_mode: "none"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metricbeat-config
namespace: testframework
labels:
k8s-app: metricbeat
data:
metricbeat.yml: |-
processors:
- add_cloud_metadata:
- add_tags:
tags: ["EKSCORP_DEV"]
target: "cluster_test"
metricbeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
output.elasticsearch:
index: "metricbeat-k8s-%{[agent.version]}-%{+yyyy.MM.dd}"
setup.template.name: "metricbeat-k8s"
setup.template.pattern: "metricbeat-k8s-*"
setup.ilm.enabled: false
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: testframework-initializr-deploy
namespace: testframework
spec:
replicas: 1
selector:
matchLabels:
app: testframework-initializr
template:
metadata:
labels:
app: testframework-initializr
annotations:
co.elastic.logs/enabled: 'true'
co.elastic.logs/json.keys_under_root: 'true'
co.elastic.logs/json.add_error_key: 'true'
co.elastic.logs/json.message_key: 'message'
spec:
containers:
- name: testframework-initializr
image: XXXXX.dkr.ecr.us-east-1.amazonaws.com/testframework-initializr
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /health/liveness
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 60
failureThreshold: 5
readinessProbe:
httpGet:
port: 8080
path: /health
initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 10
failureThreshold: 3
- name: metricbeat-sidecar
image: docker.elastic.co/beats/metricbeat:7.12.0
args: [
"-c", "/etc/metricbeat.yml",
"-e",
"-system.hostfs=/hostfs"
]
env:
- name: ELASTIC_CLOUD_ID
value: xxxx
- name: ELASTIC_CLOUD_AUTH
value: xxxx
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
securityContext:
runAsUser: 0
volumeMounts:
- name: config
mountPath: /etc/metricbeat.yml
readOnly: true
subPath: metricbeat.yml
- name: modules
mountPath: /usr/share/metricbeat/modules.d
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0640
name: metricbeat-config
- name: modules
configMap:
defaultMode: 0640
name: metricbeat-modules
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prom-admin
rules:
- apiGroups: [""]
resources: ["pods", "nodes"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prom-rbac
subjects:
- kind: ServiceAccount
name: default
namespace: testframework
roleRef:
kind: ClusterRole
name: prom-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: testframework-initializr-service
namespace: testframework
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
selector:
app: testframework-initializr
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: testframework-initializr-ingress
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internal
alb.ingress.kubernetes.io/target-type: ip
spec:
rules:
- host: dev-initializr.test.net
http:
paths:
- backend:
serviceName: testframework-initializr-service
servicePort: 80
Well, after startup the POD in AWS EKS, I got the following error in Kubernetes Metricbeat Container:
INFO module/wrapper.go:259 Error fetching data for metricset kubernetes.system: error doing HTTP request to fetch 'system' Metricset data: error making http request: Get "https://IP_FROM_FARGATE_HERE:10250/stats/summary": dial tcp IP_FROM_FARGATE_HERE:10250: connect: connection refused
I tried to use the "NODE_NAME" instead "NODE_IP", but I got "No Such Host". Any idea how can I fix it?
I have a k8s environment with one master and two slave nodes. In one of the node two pods(assume pod-A and pod-B) are running and in that, pod-A get evicted due to disk pressure but another one pod-B was running in the same node without evicting. Even though i have checked the node resources(ram and disk space), plenty of the space is available. Also i have checked the docker thing using "docker system df", there it is showing reclaimable space is 48% for images and all remaining thing as 0% reclaimable. So, at-last i have removed all evicted pods of pod-B, it is running fine now.
1)When pod-B is running in the same node why pod-A got evicted?
2)Why pod-B is evicted, when sufficient resources are available?
apiVersion: datas/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.17.0 (0c01409)
creationTimestamp: null
labels:
io.kompose.service: zuul
name: zuul
spec:
progressDeadlineSeconds: 2145893647
replicas: 1
revisionHistoryLimit: 2145893647
selector:
matchLabels:
io.kompose.service: zuul
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: zuul
spec:
containers:
- env:
- name: DATA_DIR
value: /data/work/
- name: log_file_path
value: /data/work/logs/zuul/
- name: spring_cloud_zookeeper_connectString
value: zoo_host:5168
image: repository/zuul:version
imagePullPolicy: Always
name: zuul
ports:
- containerPort: 9090
hostPort: 9090
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data/work/
name: zuul-claim0
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
disktype: node1
imagePullSecrets:
- name: regcred
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /opt/DATA_DIR
type: ""
name: zuul-claim0
status: {}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.17.0 (0c01409)
creationTimestamp: null
labels:
io.kompose.service: routing
name: routing
spec:
progressDeadlineSeconds: 2148483657
replicas: 1
revisionHistoryLimit: 2148483657
selector:
matchLabels:
io.kompose.service: routing
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: routing
spec:
containers:
- env:
- name: DATA_DIR
value: /data/work/
- name: log_file_path
value: /data/logs/routing/
- name: spring_cloud_zookeeper_connectString
value: zoo_host:5168
image: repository/routing:version
imagePullPolicy: Always
name: routing
ports:
- containerPort: 8090
hostPort: 8090
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data/work/
name: routing-claim0
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
disktype: node1
imagePullSecrets:
- name: regcred
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /opt/DATA_DIR
type: ""
name: routing-claim0
status: {}