This commit is contained in:
Seth Call 2025-04-06 17:58:43 -05:00
parent 87ac12ff9d
commit d6a4178f0d
19 changed files with 491 additions and 0 deletions

View File

@ -0,0 +1,16 @@
# capture current state in prod
kubectl get deployment coturn-dns -o yaml --namespace coturn-dns > existing-production-deployment.yaml
# capture current state in relese
kubectl get deployment coturn-dns -o yaml --namespace coturn-dns > existing-staging-deployment.yaml
KUBECONFIG=stg
cd k8s/coturn-dns
kubectl delete secret gcr-json-key -n coturn-dns
kubectl delete ClusterRole pods-list --namespace coturn-dns
kubectl delete ClusterRoleBinding pods-list --namespace coturn-dns
kubectl delete deployment coturn-dns -n coturn-dns
ENV=production/staging
helm upgrade --install coturn-dns . --values values-$ENV.yaml --namespace coturn-dns

View File

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "4"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"coturn-dns"},"name":"coturn-dns","namespace":"coturn-dns"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"coturn-dns"}},"strategy":{"rollingUpdate":{"maxSurge":0,"maxUnavailable":1},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"coturn-dns"}},"spec":{"containers":[{"env":[{"name":"AWS_ACCESS_KEY_ID","valueFrom":{"secretKeyRef":{"key":"username","name":"aws-user-coturn-dns"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"password","name":"aws-user-coturn-dns"}}},{"name":"PYTHONUNBUFFERED","value":"1"},{"name":"HOSTED_ZONE","value":"Z00156242SK162FEXDPVF"},{"name":"COTURN_DOMAIN_NAME","value":"coturn.video.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/coturn-dns:latest","imagePullPolicy":"Always","name":"coturn-dns","resources":{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}}],"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
creationTimestamp: "2021-11-18T15:50:39Z"
generation: 4
labels:
app.kubernetes.io/instance: coturn-dns
name: coturn-dns
namespace: coturn-dns
resourceVersion: "272107840"
uid: 9c0beb6c-43fb-4d01-8ec3-b54110035037
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: coturn-dns
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2021-11-18T19:57:36Z"
creationTimestamp: null
labels:
app: coturn-dns
spec:
containers:
- env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: username
name: aws-user-coturn-dns
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: password
name: aws-user-coturn-dns
- name: PYTHONUNBUFFERED
value: "1"
- name: HOSTED_ZONE
value: Z00156242SK162FEXDPVF
- name: COTURN_DOMAIN_NAME
value: coturn.video.jamkazam.com
image: gcr.io/tough-craft-276813/coturn-dns:latest
imagePullPolicy: Always
name: coturn-dns
resources:
limits:
memory: 128Mi
requests:
memory: 128Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: gcr-json-key
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
conditions:
- lastTransitionTime: "2021-11-18T15:50:42Z"
lastUpdateTime: "2021-11-18T15:50:42Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
- lastTransitionTime: "2021-11-18T15:50:40Z"
lastUpdateTime: "2021-11-18T19:57:38Z"
message: ReplicaSet "coturn-dns-df4f7ffc" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
observedGeneration: 4
replicas: 1
unavailableReplicas: 1
updatedReplicas: 1

View File

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "53"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"coturn-dns"},"name":"coturn-dns","namespace":"coturn-dns"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"coturn-dns"}},"strategy":{"rollingUpdate":{"maxSurge":0,"maxUnavailable":1},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"coturn-dns"}},"spec":{"containers":[{"env":[{"name":"AWS_ACCESS_KEY_ID","valueFrom":{"secretKeyRef":{"key":"username","name":"aws-user-coturn-dns"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"password","name":"aws-user-coturn-dns"}}},{"name":"PYTHONUNBUFFERED","value":"1"},{"name":"HOSTED_ZONE","value":"Z00156242SK162FEXDPVF"},{"name":"COTURN_DOMAIN_NAME","value":"coturn.staging.video.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/coturn-dns:latest","imagePullPolicy":"Always","name":"coturn-dns","resources":{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}}],"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
creationTimestamp: "2021-11-15T17:02:14Z"
generation: 56
labels:
app.kubernetes.io/instance: coturn-dns
name: coturn-dns
namespace: coturn-dns
resourceVersion: "148191397"
uid: d3fd02f4-1f28-4cb4-8e4d-294155213368
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: coturn-dns
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2021-11-18T19:56:56Z"
creationTimestamp: null
labels:
app: coturn-dns
spec:
containers:
- env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: username
name: aws-user-coturn-dns
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: password
name: aws-user-coturn-dns
- name: PYTHONUNBUFFERED
value: "1"
- name: HOSTED_ZONE
value: Z00156242SK162FEXDPVF
- name: COTURN_DOMAIN_NAME
value: coturn.staging.video.jamkazam.com
image: gcr.io/tough-craft-276813/coturn-dns:latest
imagePullPolicy: Always
name: coturn-dns
resources:
limits:
memory: 128Mi
requests:
memory: 128Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: gcr-json-key
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
conditions:
- lastTransitionTime: "2021-11-15T17:02:14Z"
lastUpdateTime: "2021-11-15T17:02:14Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
- lastTransitionTime: "2021-11-15T19:12:57Z"
lastUpdateTime: "2021-11-18T19:56:58Z"
message: ReplicaSet "coturn-dns-686c986557" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
observedGeneration: 56
replicas: 1
unavailableReplicas: 1
updatedReplicas: 1

View File

@ -0,0 +1,15 @@
# capture current state in prod
kubectl get deployment webrtc-be -o yaml --namespace webrtc-be > existing-production-deployment.yaml
# capture current state in relese
kubectl get deployment webrtc-be -o yaml > existing-staging-deployment.yaml
KUBECONFIG=stg
cd k8s/webrtc-be
kubectl delete secret gcr-json-key -n webrtc-be
kubectl delete service webrtc-be-service -n webrtc-be
kubectl delete deployment webrtc-be -n webrtc-be
kubectl delete HorizontalPodAutoscaler webrtc-be -n webrtc-be
kubectl delete ingress webrtc-be -n webrtc-be
helm upgrade --install webrtc-be . --values values-staging.yaml --namespace webrtc-be

View File

@ -0,0 +1,89 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "5"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"webrtc-be","app.kubernetes.io/instance":"webrtc-be"},"name":"webrtc-be","namespace":"webrtc-be"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"webrtc-be"}},"template":{"metadata":{"labels":{"app":"webrtc-be"}},"spec":{"containers":[{"env":[{"name":"RTC_MIN_PORT","value":"30000"},{"name":"RTC_MAX_PORT","value":"32768"},{"name":"BACKEND_URL","value":"https://www.jamkazam.com"},{"name":"RESTRICT_AUTH","value":"true"}],"image":"gcr.io/tough-craft-276813/webrtc_be:latest","imagePullPolicy":"Always","name":"webrtc-be","ports":[{"containerPort":5001,"name":"websocket-port"}],"resources":{"limits":{"memory":"3800Mi"},"requests":{"cpu":"1200m","memory":"3800Mi"}}}],"hostNetwork":true,"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
creationTimestamp: "2021-11-18T18:07:44Z"
generation: 897
labels:
app: webrtc-be
app.kubernetes.io/instance: webrtc-be
name: webrtc-be
namespace: webrtc-be
resourceVersion: "272003497"
uid: f82d6e1d-99c5-4b22-a0cf-b8d02bbe3279
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: webrtc-be
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2024-11-18T01:58:53Z"
creationTimestamp: null
labels:
app: webrtc-be
spec:
containers:
- env:
- name: RTC_MIN_PORT
value: "30000"
- name: RTC_MAX_PORT
value: "32768"
- name: BACKEND_URL
value: https://www.jamkazam.com
- name: RESTRICT_AUTH
value: "true"
image: gcr.io/tough-craft-276813/webrtc_be:latest
imagePullPolicy: Always
name: webrtc-be
ports:
- containerPort: 5001
hostPort: 5001
name: websocket-port
protocol: TCP
resources:
limits:
memory: 3800Mi
requests:
cpu: 1200m
memory: 3800Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
hostNetwork: true
imagePullSecrets:
- name: gcr-json-key
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2021-11-18T18:07:44Z"
lastUpdateTime: "2024-11-18T01:58:56Z"
message: ReplicaSet "webrtc-be-6b649d8cd4" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2024-11-24T15:06:22Z"
lastUpdateTime: "2024-11-24T15:06:22Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 897
readyReplicas: 1
replicas: 1
updatedReplicas: 1

View File

@ -0,0 +1,87 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "3"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"webrtc-be","app.kubernetes.io/instance":"webrtc-be"},"name":"webrtc-be","namespace":"webrtc-be"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"webrtc-be"}},"template":{"metadata":{"labels":{"app":"webrtc-be"}},"spec":{"containers":[{"env":[{"name":"RTC_MIN_PORT","value":"30000"},{"name":"RTC_MAX_PORT","value":"32768"},{"name":"BACKEND_URL","value":"https://staging.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/webrtc_be:latest","imagePullPolicy":"Always","name":"webrtc-be","ports":[{"containerPort":5001,"name":"websocket-port"}],"resources":{"limits":{"memory":"3800Mi"},"requests":{"cpu":"1200m","memory":"3800Mi"}}}],"hostNetwork":true,"imagePullSecrets":[{"name":"gcr-json-key"}]}}}}
creationTimestamp: "2021-11-18T08:57:30Z"
generation: 3
labels:
app: webrtc-be
app.kubernetes.io/instance: webrtc-be
name: webrtc-be
namespace: webrtc-be
resourceVersion: "147684262"
uid: 4190b38d-99f6-418a-ae75-8d8e2c6ad040
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: webrtc-be
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2024-11-18T01:57:59Z"
creationTimestamp: null
labels:
app: webrtc-be
spec:
containers:
- env:
- name: RTC_MIN_PORT
value: "30000"
- name: RTC_MAX_PORT
value: "32768"
- name: BACKEND_URL
value: https://staging.jamkazam.com
image: gcr.io/tough-craft-276813/webrtc_be:latest
imagePullPolicy: Always
name: webrtc-be
ports:
- containerPort: 5001
hostPort: 5001
name: websocket-port
protocol: TCP
resources:
limits:
memory: 3800Mi
requests:
cpu: 1200m
memory: 3800Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
hostNetwork: true
imagePullSecrets:
- name: gcr-json-key
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2021-11-18T08:57:30Z"
lastUpdateTime: "2024-11-18T02:01:07Z"
message: ReplicaSet "webrtc-be-548f98f65f" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2024-11-21T13:15:14Z"
lastUpdateTime: "2024-11-21T13:15:14Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 3
readyReplicas: 1
replicas: 1
updatedReplicas: 1

1
k8s/monitoring/1 Normal file
View File

@ -0,0 +1 @@
gcloud container images list-tags gcr.io/tough-craft-276813/webrtc_be

View File

@ -0,0 +1,4 @@
# #video-cluster-staging-alerts
https://hooks.slack.com/services/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
#video-cluster-prd-alerts
https://hooks.slack.com/services/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

7
k8s/webrtc-be/1 Normal file
View File

@ -0,0 +1,7 @@
# Ingress
domain: "webrtc-be.staging.video.jamkazam.com"
# Webrtc env variables
backendUrl: "https://staging.jamkazam.com"
# Webrtc_be image version
webrtc_tag: 1.0.131

36
scripts/bug.txt Normal file
View File

@ -0,0 +1,36 @@
[20241209 12:26:38.613] disconnectPeer 4peH1Pc77rWKD37YAA40 otherPeerId: 4peH1Pc77rWKD37YAA40
[20241209 12:26:38.613] disconnectPeer 4peH1Pc77rWKD37YAA40 room: anywhere deleting room
[20241209 12:26:38.614] disconnectPeer 4peH1Pc77rWKD37YAA40 room: anywhere num-peers: 0 num-rooms: 0
[20241209 12:26:41.344] mediasoup:ERROR:Channel [pid:34] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769]
[20241209 12:26:41.351] mediasoup:ERROR:Channel [pid:34] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:41.352] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:362050]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:41.353] mediasoup:ERROR:Router pipeToRouter() | error creating PipeTransport pair:: Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:41.381] This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). The promise rejected with the reason:
[20241209 12:26:41.381] Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
at Channel.processResponse (/app/node_modules/mediasoup/node/lib/Channel.js:251:33)
at Socket.<anonymous> (/app/node_modules/mediasoup/node/lib/Channel.js:76:34)
at Socket.emit (node:events:518:28)
at Socket.emit (node:domain:489:12)
at addChunk (node:internal/streams/readable:561:12)
at readableAddChunkPushByteMode (node:internal/streams/readable:512:3)
at Readable.push (node:internal/streams/readable:392:5)
at Pipe.onStreamRead (node:internal/stream_base_commons:189:23)
[20241209 12:26:49.551] disconnectPeer l1YCvM1T8b_2AjeZAA42
[20241209 12:26:49.552] disconnectPeer l1YCvM1T8b_2AjeZAA42 peer not found
[20241209 12:26:51.182] mediasoup:ERROR:Channel [pid:28] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769]
[20241209 12:26:51.185] mediasoup:ERROR:Channel [pid:28] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:51.186] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:237454]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:51.186] mediasoup:ERROR:Router pipeToRouter() | error creating PipeTransport pair:: Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:51.188] This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). The promise rejected with the reason:
[20241209 12:26:51.188] Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
at Channel.processResponse (/app/node_modules/mediasoup/node/lib/Channel.js:251:33)
at Socket.<anonymous> (/app/node_modules/mediasoup/node/lib/Channel.js:76:34)
at Socket.emit (node:events:518:28)
at Socket.emit (node:domain:489:12)
at addChunk (node:internal/streams/readable:561:12)
at readableAddChunkPushByteMode (node:internal/streams/readable:512:3)
at Readable.push (node:internal/streams/readable:392:5)
at Pipe.onStreamRead (node:internal/stream_base_commons:189:23)
[20241209 12:26:51.200] mediasoup:ERROR:Channel [pid:31] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769]
[20241209 12:26:51.201] mediasoup:ERROR:Channel [pid:31] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]
[20241209 12:26:51.201] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:290384]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport]

10
scripts/check-udp Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
set -eu -o pipefail
ENV=$1 # stg or prd
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec $POD -- cat /proc/net/udp

10
scripts/check-udp-prd.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
#
set -eu -o pipefail
KUBECONFIG=~/Downloads/prd-video-cluster-kubeconfig.yaml
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
kubectl --kubeconfig $KUBECONFIG exec $POD --namespace webrtc-be -- cat /proc/net/udp

8
scripts/dump-webrtc-podname Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
set -eu -o pipefail
ENV=$1 # stg or prd
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
echo $POD

9
scripts/exec-mode-webrtc Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
set -eu -o pipefail
ENV=$1 # stg or prd
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec -it $POD -- /bin/bash

10
scripts/exec-webrtc Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
set -eu -o pipefail
ENV=$1 # stg or prd
EXEC=$2
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"`
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec $POD -- $2

View File

@ -0,0 +1,4 @@
KUBECONFIG=~/Downloads/stg-video-cluster-kubeconfig.yaml
POD=kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod

1
scripts/launch-prd-room Normal file
View File

@ -0,0 +1 @@
open https://video.jamkazam.com/room/anywhere?token=openroom12345

1
scripts/list-webrtc-tags Executable file
View File

@ -0,0 +1 @@
gcloud container images list-tags gcr.io/tough-craft-276813/webrtc_be

7
scripts/replace-webrtc Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -eu -o pipefail
ENV=$1 # stg or prd
KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml
kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod -o yaml | kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be replace --force -f -