Merge branch 'seth/video-server-healthchecks' into develop
This commit is contained in:
commit
c0296d4552
|
|
@ -67,10 +67,10 @@ pipelines:
|
|||
- VERSION="latest"
|
||||
- docker load --input "coturn.tar"
|
||||
- docker load --input "coturn-dns.tar"
|
||||
- docker tag "coturn" "gcr.io/${GCLOUD_PROJECT}/coturn:${VERSION}"
|
||||
- docker tag "coturn-dns" "gcr.io/${GCLOUD_PROJECT}/coturn-dns:${VERSION}"
|
||||
- docker push "gcr.io/${GCLOUD_PROJECT}/coturn:${VERSION}"
|
||||
- docker push "gcr.io/${GCLOUD_PROJECT}/coturn-dns:${VERSION}"
|
||||
- docker tag "coturn" "gcr.io/${GCLOUD_PROJECT}/coturn:${VERSION-staging}"
|
||||
- docker tag "coturn-dns" "gcr.io/${GCLOUD_PROJECT}/coturn-dns:${VERSION-staging}"
|
||||
- docker push "gcr.io/${GCLOUD_PROJECT}/coturn:${VERSION-staging}"
|
||||
- docker push "gcr.io/${GCLOUD_PROJECT}/coturn-dns:${VERSION-staging}"
|
||||
services:
|
||||
- docker
|
||||
- step:
|
||||
|
|
@ -113,3 +113,72 @@ pipelines:
|
|||
variables:
|
||||
KUBE_CONFIG: $KUBE_CONFIG_PRD
|
||||
KUBECTL_COMMAND: '-n coturn-dns rollout status -w deployment/coturn-dns'
|
||||
develop:
|
||||
- parallel:
|
||||
- step:
|
||||
name: Build images
|
||||
script:
|
||||
- docker build . --file docker/coturn/Dockerfile --tag coturn
|
||||
- docker build . --file docker/coturn-dns/Dockerfile --tag coturn-dns
|
||||
- docker save coturn --output "coturn.tar"
|
||||
- docker save coturn-dns --output "coturn-dns.tar"
|
||||
services:
|
||||
- docker
|
||||
caches:
|
||||
- docker
|
||||
artifacts:
|
||||
- "*.tar"
|
||||
- step:
|
||||
name: Deploy terraform
|
||||
image: hashicorp/terraform:latest
|
||||
script:
|
||||
- cd terraform/
|
||||
- terraform init
|
||||
- terraform plan
|
||||
- terraform apply -input=false -auto-approve
|
||||
- step:
|
||||
name: Deploy K8s apps (staging)
|
||||
script:
|
||||
- pipe: atlassian/kubectl-run:3.1.2
|
||||
variables:
|
||||
KUBE_CONFIG: $KUBE_CONFIG_STG
|
||||
KUBECTL_COMMAND: '-n argocd apply -k k8s/argocd/overlays/staging'
|
||||
- step:
|
||||
name: Push images
|
||||
image: google/cloud-sdk:alpine
|
||||
script:
|
||||
# Authenticating with the service account key file
|
||||
- echo $GCLOUD_API_KEYFILE | base64 -d > ./gcloud-api-key.json
|
||||
- gcloud auth activate-service-account --key-file gcloud-api-key.json
|
||||
- gcloud config set project $GCLOUD_PROJECT
|
||||
- cat ./gcloud-api-key.json | docker login -u _json_key --password-stdin https://gcr.io
|
||||
# Push Docker images
|
||||
- VERSION="latest"
|
||||
- docker load --input "coturn.tar"
|
||||
- docker load --input "coturn-dns.tar"
|
||||
- docker tag "coturn" "gcr.io/${GCLOUD_PROJECT}/coturn:${VERSION}"
|
||||
- docker tag "coturn-dns" "gcr.io/${GCLOUD_PROJECT}/coturn-dns:${VERSION}"
|
||||
- docker push "gcr.io/${GCLOUD_PROJECT}/coturn:${VERSION}"
|
||||
- docker push "gcr.io/${GCLOUD_PROJECT}/coturn-dns:${VERSION}"
|
||||
services:
|
||||
- docker
|
||||
- step:
|
||||
name: Deploy coturn/coturn-dns to staging
|
||||
deployment: staging
|
||||
script:
|
||||
- pipe: atlassian/kubectl-run:1.1.2
|
||||
variables:
|
||||
KUBE_CONFIG: $KUBE_CONFIG_STG
|
||||
KUBECTL_COMMAND: '-n coturn rollout restart deployment/coturn'
|
||||
- pipe: atlassian/kubectl-run:1.1.2
|
||||
variables:
|
||||
KUBE_CONFIG: $KUBE_CONFIG_STG
|
||||
KUBECTL_COMMAND: '-n coturn rollout status -w deployment/coturn'
|
||||
- pipe: atlassian/kubectl-run:1.1.2
|
||||
variables:
|
||||
KUBE_CONFIG: $KUBE_CONFIG_STG
|
||||
KUBECTL_COMMAND: '-n coturn-dns rollout restart deployment/coturn-dns'
|
||||
- pipe: atlassian/kubectl-run:1.1.2
|
||||
variables:
|
||||
KUBE_CONFIG: $KUBE_CONFIG_STG
|
||||
KUBECTL_COMMAND: '-n coturn-dns rollout status -w deployment/coturn-dns'
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
from ipaddress import ip_address, IPv4Address
|
||||
from kubernetes import client, config
|
||||
import boto3
|
||||
import time
|
||||
|
|
@ -9,14 +10,23 @@ COTURN_DOMAIN_NAME=os.environ['COTURN_DOMAIN_NAME']
|
|||
config.load_incluster_config()
|
||||
v1 = client.CoreV1Api()
|
||||
|
||||
|
||||
def validIPAddress(IP: str) -> str:
|
||||
try:
|
||||
return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6"
|
||||
except ValueError:
|
||||
return "Invalid"
|
||||
|
||||
while(True):
|
||||
ips=[]
|
||||
|
||||
pods = v1.list_namespaced_pod(namespace="coturn")
|
||||
|
||||
for i in pods.items:
|
||||
node_status = v1.read_node(name=i.spec.node_name)
|
||||
for adr in node_status.status.addresses:
|
||||
if adr.type=="ExternalIP":
|
||||
# only collect IPv4 addresses, because we are only updating A records here
|
||||
if adr.type=="ExternalIP" and validIPAddress(adr.address) == "IPv4":
|
||||
ips.append({'Value': adr.address})
|
||||
|
||||
print("Node IPs: "+str(ips))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
# capture current state in prod
|
||||
kubectl get deployment coturn-dns -o yaml --namespace coturn-dns > existing-production-deployment.yaml
|
||||
|
||||
# capture current state in relese
|
||||
kubectl get deployment coturn-dns -o yaml --namespace coturn-dns > existing-staging-deployment.yaml
|
||||
|
||||
|
||||
KUBECONFIG=stg
|
||||
cd k8s/coturn-dns
|
||||
kubectl delete secret gcr-json-key -n coturn-dns
|
||||
kubectl delete ClusterRole pods-list --namespace coturn-dns
|
||||
kubectl delete ClusterRoleBinding pods-list --namespace coturn-dns
|
||||
kubectl delete deployment coturn-dns -n coturn-dns
|
||||
|
||||
ENV=production/staging
|
||||
helm upgrade --install coturn-dns . --values values-$ENV.yaml --namespace coturn-dns
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "4"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"coturn-dns"},"name":"coturn-dns","namespace":"coturn-dns"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"coturn-dns"}},"strategy":{"rollingUpdate":{"maxSurge":0,"maxUnavailable":1},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"coturn-dns"}},"spec":{"containers":[{"env":[{"name":"AWS_ACCESS_KEY_ID","valueFrom":{"secretKeyRef":{"key":"username","name":"aws-user-coturn-dns"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"password","name":"aws-user-coturn-dns"}}},{"name":"PYTHONUNBUFFERED","value":"1"},{"name":"HOSTED_ZONE","value":"Z00156242SK162FEXDPVF"},{"name":"COTURN_DOMAIN_NAME","value":"coturn.video.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/coturn-dns:latest","imagePullPolicy":"Always","name":"coturn-dns","resources":{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}}],"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
|
||||
creationTimestamp: "2021-11-18T15:50:39Z"
|
||||
generation: 4
|
||||
labels:
|
||||
app.kubernetes.io/instance: coturn-dns
|
||||
name: coturn-dns
|
||||
namespace: coturn-dns
|
||||
resourceVersion: "272107840"
|
||||
uid: 9c0beb6c-43fb-4d01-8ec3-b54110035037
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: coturn-dns
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/restartedAt: "2021-11-18T19:57:36Z"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: coturn-dns
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: username
|
||||
name: aws-user-coturn-dns
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: password
|
||||
name: aws-user-coturn-dns
|
||||
- name: PYTHONUNBUFFERED
|
||||
value: "1"
|
||||
- name: HOSTED_ZONE
|
||||
value: Z00156242SK162FEXDPVF
|
||||
- name: COTURN_DOMAIN_NAME
|
||||
value: coturn.video.jamkazam.com
|
||||
image: gcr.io/tough-craft-276813/coturn-dns:latest
|
||||
imagePullPolicy: Always
|
||||
name: coturn-dns
|
||||
resources:
|
||||
limits:
|
||||
memory: 128Mi
|
||||
requests:
|
||||
memory: 128Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
imagePullSecrets:
|
||||
- name: gcr-json-key
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-11-18T15:50:42Z"
|
||||
lastUpdateTime: "2021-11-18T15:50:42Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: "True"
|
||||
type: Available
|
||||
- lastTransitionTime: "2021-11-18T15:50:40Z"
|
||||
lastUpdateTime: "2021-11-18T19:57:38Z"
|
||||
message: ReplicaSet "coturn-dns-df4f7ffc" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: "True"
|
||||
type: Progressing
|
||||
observedGeneration: 4
|
||||
replicas: 1
|
||||
unavailableReplicas: 1
|
||||
updatedReplicas: 1
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "53"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"coturn-dns"},"name":"coturn-dns","namespace":"coturn-dns"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"coturn-dns"}},"strategy":{"rollingUpdate":{"maxSurge":0,"maxUnavailable":1},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"coturn-dns"}},"spec":{"containers":[{"env":[{"name":"AWS_ACCESS_KEY_ID","valueFrom":{"secretKeyRef":{"key":"username","name":"aws-user-coturn-dns"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"password","name":"aws-user-coturn-dns"}}},{"name":"PYTHONUNBUFFERED","value":"1"},{"name":"HOSTED_ZONE","value":"Z00156242SK162FEXDPVF"},{"name":"COTURN_DOMAIN_NAME","value":"coturn.staging.video.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/coturn-dns:latest","imagePullPolicy":"Always","name":"coturn-dns","resources":{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}}],"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
|
||||
creationTimestamp: "2021-11-15T17:02:14Z"
|
||||
generation: 56
|
||||
labels:
|
||||
app.kubernetes.io/instance: coturn-dns
|
||||
name: coturn-dns
|
||||
namespace: coturn-dns
|
||||
resourceVersion: "148191397"
|
||||
uid: d3fd02f4-1f28-4cb4-8e4d-294155213368
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: coturn-dns
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/restartedAt: "2021-11-18T19:56:56Z"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: coturn-dns
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: username
|
||||
name: aws-user-coturn-dns
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: password
|
||||
name: aws-user-coturn-dns
|
||||
- name: PYTHONUNBUFFERED
|
||||
value: "1"
|
||||
- name: HOSTED_ZONE
|
||||
value: Z00156242SK162FEXDPVF
|
||||
- name: COTURN_DOMAIN_NAME
|
||||
value: coturn.staging.video.jamkazam.com
|
||||
image: gcr.io/tough-craft-276813/coturn-dns:latest
|
||||
imagePullPolicy: Always
|
||||
name: coturn-dns
|
||||
resources:
|
||||
limits:
|
||||
memory: 128Mi
|
||||
requests:
|
||||
memory: 128Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
imagePullSecrets:
|
||||
- name: gcr-json-key
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-11-15T17:02:14Z"
|
||||
lastUpdateTime: "2021-11-15T17:02:14Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: "True"
|
||||
type: Available
|
||||
- lastTransitionTime: "2021-11-15T19:12:57Z"
|
||||
lastUpdateTime: "2021-11-18T19:56:58Z"
|
||||
message: ReplicaSet "coturn-dns-686c986557" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: "True"
|
||||
type: Progressing
|
||||
observedGeneration: 56
|
||||
replicas: 1
|
||||
unavailableReplicas: 1
|
||||
updatedReplicas: 1
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# capture current state in prod
|
||||
kubectl get deployment webrtc-be -o yaml --namespace webrtc-be > existing-production-deployment.yaml
|
||||
|
||||
# capture current state in relese
|
||||
kubectl get deployment webrtc-be -o yaml > existing-staging-deployment.yaml
|
||||
|
||||
|
||||
KUBECONFIG=stg
|
||||
cd k8s/webrtc-be
|
||||
kubectl delete secret gcr-json-key -n webrtc-be
|
||||
kubectl delete service webrtc-be-service -n webrtc-be
|
||||
kubectl delete deployment webrtc-be -n webrtc-be
|
||||
kubectl delete HorizontalPodAutoscaler webrtc-be -n webrtc-be
|
||||
kubectl delete ingress webrtc-be -n webrtc-be
|
||||
helm upgrade --install webrtc-be . --values values-staging.yaml --namespace webrtc-be
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "5"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"webrtc-be","app.kubernetes.io/instance":"webrtc-be"},"name":"webrtc-be","namespace":"webrtc-be"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"webrtc-be"}},"template":{"metadata":{"labels":{"app":"webrtc-be"}},"spec":{"containers":[{"env":[{"name":"RTC_MIN_PORT","value":"30000"},{"name":"RTC_MAX_PORT","value":"32768"},{"name":"BACKEND_URL","value":"https://www.jamkazam.com"},{"name":"RESTRICT_AUTH","value":"true"}],"image":"gcr.io/tough-craft-276813/webrtc_be:latest","imagePullPolicy":"Always","name":"webrtc-be","ports":[{"containerPort":5001,"name":"websocket-port"}],"resources":{"limits":{"memory":"3800Mi"},"requests":{"cpu":"1200m","memory":"3800Mi"}}}],"hostNetwork":true,"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
|
||||
creationTimestamp: "2021-11-18T18:07:44Z"
|
||||
generation: 897
|
||||
labels:
|
||||
app: webrtc-be
|
||||
app.kubernetes.io/instance: webrtc-be
|
||||
name: webrtc-be
|
||||
namespace: webrtc-be
|
||||
resourceVersion: "272003497"
|
||||
uid: f82d6e1d-99c5-4b22-a0cf-b8d02bbe3279
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webrtc-be
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/restartedAt: "2024-11-18T01:58:53Z"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: webrtc-be
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: RTC_MIN_PORT
|
||||
value: "30000"
|
||||
- name: RTC_MAX_PORT
|
||||
value: "32768"
|
||||
- name: BACKEND_URL
|
||||
value: https://www.jamkazam.com
|
||||
- name: RESTRICT_AUTH
|
||||
value: "true"
|
||||
image: gcr.io/tough-craft-276813/webrtc_be:latest
|
||||
imagePullPolicy: Always
|
||||
name: webrtc-be
|
||||
ports:
|
||||
- containerPort: 5001
|
||||
hostPort: 5001
|
||||
name: websocket-port
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
memory: 3800Mi
|
||||
requests:
|
||||
cpu: 1200m
|
||||
memory: 3800Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
imagePullSecrets:
|
||||
- name: gcr-json-key
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 1
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-11-18T18:07:44Z"
|
||||
lastUpdateTime: "2024-11-18T01:58:56Z"
|
||||
message: ReplicaSet "webrtc-be-6b649d8cd4" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: "True"
|
||||
type: Progressing
|
||||
- lastTransitionTime: "2024-11-24T15:06:22Z"
|
||||
lastUpdateTime: "2024-11-24T15:06:22Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: "True"
|
||||
type: Available
|
||||
observedGeneration: 897
|
||||
readyReplicas: 1
|
||||
replicas: 1
|
||||
updatedReplicas: 1
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "3"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"webrtc-be","app.kubernetes.io/instance":"webrtc-be"},"name":"webrtc-be","namespace":"webrtc-be"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"webrtc-be"}},"template":{"metadata":{"labels":{"app":"webrtc-be"}},"spec":{"containers":[{"env":[{"name":"RTC_MIN_PORT","value":"30000"},{"name":"RTC_MAX_PORT","value":"32768"},{"name":"BACKEND_URL","value":"https://staging.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/webrtc_be:latest","imagePullPolicy":"Always","name":"webrtc-be","ports":[{"containerPort":5001,"name":"websocket-port"}],"resources":{"limits":{"memory":"3800Mi"},"requests":{"cpu":"1200m","memory":"3800Mi"}}}],"hostNetwork":true,"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
|
||||
creationTimestamp: "2021-11-18T08:57:30Z"
|
||||
generation: 3
|
||||
labels:
|
||||
app: webrtc-be
|
||||
app.kubernetes.io/instance: webrtc-be
|
||||
name: webrtc-be
|
||||
namespace: webrtc-be
|
||||
resourceVersion: "147684262"
|
||||
uid: 4190b38d-99f6-418a-ae75-8d8e2c6ad040
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webrtc-be
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/restartedAt: "2024-11-18T01:57:59Z"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: webrtc-be
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: RTC_MIN_PORT
|
||||
value: "30000"
|
||||
- name: RTC_MAX_PORT
|
||||
value: "32768"
|
||||
- name: BACKEND_URL
|
||||
value: https://staging.jamkazam.com
|
||||
image: gcr.io/tough-craft-276813/webrtc_be:latest
|
||||
imagePullPolicy: Always
|
||||
name: webrtc-be
|
||||
ports:
|
||||
- containerPort: 5001
|
||||
hostPort: 5001
|
||||
name: websocket-port
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
memory: 3800Mi
|
||||
requests:
|
||||
cpu: 1200m
|
||||
memory: 3800Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
imagePullSecrets:
|
||||
- name: gcr-json-key
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 1
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-11-18T08:57:30Z"
|
||||
lastUpdateTime: "2024-11-18T02:01:07Z"
|
||||
message: ReplicaSet "webrtc-be-548f98f65f" has successfully progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: "True"
|
||||
type: Progressing
|
||||
- lastTransitionTime: "2024-11-21T13:15:14Z"
|
||||
lastUpdateTime: "2024-11-21T13:15:14Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: "True"
|
||||
type: Available
|
||||
observedGeneration: 3
|
||||
readyReplicas: 1
|
||||
replicas: 1
|
||||
updatedReplicas: 1
|
||||
|
|
@ -22,7 +22,7 @@ spec:
|
|||
- name: gcr-json-key
|
||||
containers:
|
||||
- name: coturn-dns
|
||||
image: gcr.io/tough-craft-276813/coturn-dns:latest
|
||||
image: gcr.io/tough-craft-276813/coturn-dns:{{ .Values.coturn_dns_image_tag }}
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
|
|
|
|||
|
|
@ -1 +1,3 @@
|
|||
domain: "video.jamkazam.com"
|
||||
# The version of the coturn-dns image in GCR
|
||||
coturn_dns_image_tag: 1.0.9
|
||||
|
|
|
|||
|
|
@ -1 +1,3 @@
|
|||
domain: "staging.video.jamkazam.com"
|
||||
# The docker image tag for coturn-dns in GCR
|
||||
coturn_dns_image_tag: 1.0.9
|
||||
|
|
|
|||
|
|
@ -26,12 +26,6 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
resources:
|
||||
requests:
|
||||
memory: "3800Mi"
|
||||
cpu: "1200m"
|
||||
limits:
|
||||
memory: "3800Mi"
|
||||
ports:
|
||||
- containerPort: 3478
|
||||
name: coturn
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
gcloud container images list-tags gcr.io/tough-craft-276813/webrtc_be
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
# #video-cluster-staging-alerts
|
||||
https://hooks.slack.com/services/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
|
||||
#video-cluster-prd-alerts
|
||||
https://hooks.slack.com/services/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
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
# Ingress
|
||||
domain: "webrtc-be.staging.video.jamkazam.com"
|
||||
# Webrtc env variables
|
||||
backendUrl: "https://staging.jamkazam.com"
|
||||
# Webrtc_be image version
|
||||
webrtc_tag: 1.0.131
|
||||
|
||||
|
|
@ -2,6 +2,7 @@ apiVersion: apps/v1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: webrtc-be
|
||||
namespace: webrtc-be
|
||||
labels:
|
||||
app: webrtc-be
|
||||
spec:
|
||||
|
|
@ -19,14 +20,14 @@ spec:
|
|||
- name: gcr-json-key
|
||||
containers:
|
||||
- name: webrtc-be
|
||||
image: gcr.io/tough-craft-276813/webrtc_be:latest
|
||||
image: gcr.io/tough-craft-276813/webrtc_be:{{ .Values.webrtc_tag }}
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
memory: "3800Mi"
|
||||
cpu: "1200m"
|
||||
limits:
|
||||
memory: "3800Mi"
|
||||
#resources:
|
||||
#requests:
|
||||
# memory: "3800Mi"
|
||||
# cpu: "1200m"
|
||||
#limits:
|
||||
# memory: "3800Mi"
|
||||
env:
|
||||
- name: RTC_MIN_PORT
|
||||
value: "30000"
|
||||
|
|
@ -41,3 +42,19 @@ spec:
|
|||
ports:
|
||||
- name: websocket-port
|
||||
containerPort: 5001
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck
|
||||
port: 5001
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 5001
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
|
|
|
|||
|
|
@ -4,4 +4,5 @@ data:
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: gcr-json-key
|
||||
namespace: webrtc-be
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
|
|
@ -9,14 +9,17 @@ metadata:
|
|||
name: webrtc-be
|
||||
spec:
|
||||
rules:
|
||||
- host: &host {{ .Values.domain }}
|
||||
- host: {{ .Values.domain }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: webrtc-be-service
|
||||
servicePort: 5001
|
||||
path: /
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: webrtc-be-service
|
||||
port:
|
||||
number: 5001
|
||||
tls:
|
||||
- secretName: webrtc-be
|
||||
hosts:
|
||||
- *host
|
||||
- {{ .Values.domain }}
|
||||
|
|
@ -3,3 +3,6 @@ domain: "webrtc-be.video.jamkazam.com"
|
|||
# Webrtc env variables
|
||||
backendUrl: "https://www.jamkazam.com"
|
||||
auth: true
|
||||
# Webrtc_be image version
|
||||
webrtc_tag: 1.0.131
|
||||
|
||||
|
|
|
|||
|
|
@ -2,3 +2,6 @@
|
|||
domain: "webrtc-be.staging.video.jamkazam.com"
|
||||
# Webrtc env variables
|
||||
backendUrl: "https://staging.jamkazam.com"
|
||||
# Webrtc_be image version
|
||||
webrtc_tag: 1.0.131
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
[20241209 12:26:38.613] disconnectPeer 4peH1Pc77rWKD37YAA40 otherPeerId: 4peH1Pc77rWKD37YAA40
|
||||
[20241209 12:26:38.613] disconnectPeer 4peH1Pc77rWKD37YAA40 room: anywhere deleting room
|
||||
[20241209 12:26:38.614] disconnectPeer 4peH1Pc77rWKD37YAA40 room: anywhere num-peers: 0 num-rooms: 0
|
||||
[20241209 12:26:41.344] mediasoup:ERROR:Channel [pid:34] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769]
|
||||
[20241209 12:26:41.351] mediasoup:ERROR:Channel [pid:34] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:41.352] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:362050]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:41.353] mediasoup:ERROR:Router pipeToRouter() | error creating PipeTransport pair:: Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:41.381] This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). The promise rejected with the reason:
|
||||
[20241209 12:26:41.381] Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
at Channel.processResponse (/app/node_modules/mediasoup/node/lib/Channel.js:251:33)
|
||||
at Socket.<anonymous> (/app/node_modules/mediasoup/node/lib/Channel.js:76:34)
|
||||
at Socket.emit (node:events:518:28)
|
||||
at Socket.emit (node:domain:489:12)
|
||||
at addChunk (node:internal/streams/readable:561:12)
|
||||
at readableAddChunkPushByteMode (node:internal/streams/readable:512:3)
|
||||
at Readable.push (node:internal/streams/readable:392:5)
|
||||
at Pipe.onStreamRead (node:internal/stream_base_commons:189:23)
|
||||
[20241209 12:26:49.551] disconnectPeer l1YCvM1T8b_2AjeZAA42
|
||||
[20241209 12:26:49.552] disconnectPeer l1YCvM1T8b_2AjeZAA42 peer not found
|
||||
[20241209 12:26:51.182] mediasoup:ERROR:Channel [pid:28] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769]
|
||||
[20241209 12:26:51.185] mediasoup:ERROR:Channel [pid:28] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:51.186] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:237454]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:51.186] mediasoup:ERROR:Router pipeToRouter() | error creating PipeTransport pair:: Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:51.188] This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). The promise rejected with the reason:
|
||||
[20241209 12:26:51.188] Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
at Channel.processResponse (/app/node_modules/mediasoup/node/lib/Channel.js:251:33)
|
||||
at Socket.<anonymous> (/app/node_modules/mediasoup/node/lib/Channel.js:76:34)
|
||||
at Socket.emit (node:events:518:28)
|
||||
at Socket.emit (node:domain:489:12)
|
||||
at addChunk (node:internal/streams/readable:561:12)
|
||||
at readableAddChunkPushByteMode (node:internal/streams/readable:512:3)
|
||||
at Readable.push (node:internal/streams/readable:392:5)
|
||||
at Pipe.onStreamRead (node:internal/stream_base_commons:189:23)
|
||||
[20241209 12:26:51.200] mediasoup:ERROR:Channel [pid:31] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769]
|
||||
[20241209 12:26:51.201] mediasoup:ERROR:Channel [pid:31] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
[20241209 12:26:51.201] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:290384]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
ENV=$1 # stg or prd
|
||||
|
||||
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
|
||||
|
||||
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
|
||||
|
||||
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec $POD -- cat /proc/net/udp
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
KUBECONFIG=~/Downloads/prd-video-cluster-kubeconfig.yaml
|
||||
|
||||
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
|
||||
|
||||
kubectl --kubeconfig $KUBECONFIG exec $POD --namespace webrtc-be -- cat /proc/net/udp
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
ENV=$1 # stg or prd
|
||||
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
|
||||
|
||||
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
|
||||
echo $POD
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
ENV=$1 # stg or prd
|
||||
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
|
||||
|
||||
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
|
||||
|
||||
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec -it $POD -- /bin/bash
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
ENV=$1 # stg or prd
|
||||
EXEC=$2
|
||||
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
|
||||
|
||||
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
|
||||
|
||||
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec $POD -- $2
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
KUBECONFIG=~/Downloads/stg-video-cluster-kubeconfig.yaml
|
||||
POD=kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"
|
||||
|
||||
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod
|
||||
|
|
@ -0,0 +1 @@
|
|||
open https://video.jamkazam.com/room/anywhere?token=openroom12345
|
||||
|
|
@ -0,0 +1 @@
|
|||
gcloud container images list-tags gcr.io/tough-craft-276813/webrtc_be
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
ENV=$1 # stg or prd
|
||||
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
|
||||
|
||||
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod -o yaml | kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be replace --force -f -
|
||||
Loading…
Reference in New Issue