diff --git a/k8s-notes/coturn-dns/README.md b/k8s-notes/coturn-dns/README.md new file mode 100644 index 0000000..7595cc2 --- /dev/null +++ b/k8s-notes/coturn-dns/README.md @@ -0,0 +1,16 @@ +# capture current state in prod +kubectl get deployment coturn-dns -o yaml --namespace coturn-dns > existing-production-deployment.yaml + +# capture current state in relese +kubectl get deployment coturn-dns -o yaml --namespace coturn-dns > existing-staging-deployment.yaml + + +KUBECONFIG=stg +cd k8s/coturn-dns +kubectl delete secret gcr-json-key -n coturn-dns +kubectl delete ClusterRole pods-list --namespace coturn-dns +kubectl delete ClusterRoleBinding pods-list --namespace coturn-dns +kubectl delete deployment coturn-dns -n coturn-dns + +ENV=production/staging +helm upgrade --install coturn-dns . --values values-$ENV.yaml --namespace coturn-dns diff --git a/k8s-notes/coturn-dns/existing-prd.yaml b/k8s-notes/coturn-dns/existing-prd.yaml new file mode 100644 index 0000000..6ac880a --- /dev/null +++ b/k8s-notes/coturn-dns/existing-prd.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "4" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"coturn-dns"},"name":"coturn-dns","namespace":"coturn-dns"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"coturn-dns"}},"strategy":{"rollingUpdate":{"maxSurge":0,"maxUnavailable":1},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"coturn-dns"}},"spec":{"containers":[{"env":[{"name":"AWS_ACCESS_KEY_ID","valueFrom":{"secretKeyRef":{"key":"username","name":"aws-user-coturn-dns"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"password","name":"aws-user-coturn-dns"}}},{"name":"PYTHONUNBUFFERED","value":"1"},{"name":"HOSTED_ZONE","value":"Z00156242SK162FEXDPVF"},{"name":"COTURN_DOMAIN_NAME","value":"coturn.video.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/coturn-dns:latest","imagePullPolicy":"Always","name":"coturn-dns","resources":{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}}],"imagePullSecrets":[{"name":"gcr-json-key"}]}}}} + creationTimestamp: "2021-11-18T15:50:39Z" + generation: 4 + labels: + app.kubernetes.io/instance: coturn-dns + name: coturn-dns + namespace: coturn-dns + resourceVersion: "272107840" + uid: 9c0beb6c-43fb-4d01-8ec3-b54110035037 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: coturn-dns + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/restartedAt: "2021-11-18T19:57:36Z" + creationTimestamp: null + labels: + app: coturn-dns + spec: + containers: + - env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: username + name: aws-user-coturn-dns + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: password + name: aws-user-coturn-dns + - name: PYTHONUNBUFFERED + value: "1" + - name: HOSTED_ZONE + value: Z00156242SK162FEXDPVF + - name: COTURN_DOMAIN_NAME + value: coturn.video.jamkazam.com + image: gcr.io/tough-craft-276813/coturn-dns:latest + imagePullPolicy: Always + name: coturn-dns + resources: + limits: + memory: 128Mi + requests: + memory: 128Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + imagePullSecrets: + - name: gcr-json-key + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + conditions: + - lastTransitionTime: "2021-11-18T15:50:42Z" + lastUpdateTime: "2021-11-18T15:50:42Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2021-11-18T15:50:40Z" + lastUpdateTime: "2021-11-18T19:57:38Z" + message: ReplicaSet "coturn-dns-df4f7ffc" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 4 + replicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 diff --git a/k8s-notes/coturn-dns/existing-staging.yaml b/k8s-notes/coturn-dns/existing-staging.yaml new file mode 100644 index 0000000..16d6343 --- /dev/null +++ b/k8s-notes/coturn-dns/existing-staging.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "53" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"coturn-dns"},"name":"coturn-dns","namespace":"coturn-dns"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"coturn-dns"}},"strategy":{"rollingUpdate":{"maxSurge":0,"maxUnavailable":1},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"coturn-dns"}},"spec":{"containers":[{"env":[{"name":"AWS_ACCESS_KEY_ID","valueFrom":{"secretKeyRef":{"key":"username","name":"aws-user-coturn-dns"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"password","name":"aws-user-coturn-dns"}}},{"name":"PYTHONUNBUFFERED","value":"1"},{"name":"HOSTED_ZONE","value":"Z00156242SK162FEXDPVF"},{"name":"COTURN_DOMAIN_NAME","value":"coturn.staging.video.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/coturn-dns:latest","imagePullPolicy":"Always","name":"coturn-dns","resources":{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}}],"imagePullSecrets":[{"name":"gcr-json-key"}]}}}} + creationTimestamp: "2021-11-15T17:02:14Z" + generation: 56 + labels: + app.kubernetes.io/instance: coturn-dns + name: coturn-dns + namespace: coturn-dns + resourceVersion: "148191397" + uid: d3fd02f4-1f28-4cb4-8e4d-294155213368 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: coturn-dns + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/restartedAt: "2021-11-18T19:56:56Z" + creationTimestamp: null + labels: + app: coturn-dns + spec: + containers: + - env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: username + name: aws-user-coturn-dns + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: password + name: aws-user-coturn-dns + - name: PYTHONUNBUFFERED + value: "1" + - name: HOSTED_ZONE + value: Z00156242SK162FEXDPVF + - name: COTURN_DOMAIN_NAME + value: coturn.staging.video.jamkazam.com + image: gcr.io/tough-craft-276813/coturn-dns:latest + imagePullPolicy: Always + name: coturn-dns + resources: + limits: + memory: 128Mi + requests: + memory: 128Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + imagePullSecrets: + - name: gcr-json-key + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + conditions: + - lastTransitionTime: "2021-11-15T17:02:14Z" + lastUpdateTime: "2021-11-15T17:02:14Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2021-11-15T19:12:57Z" + lastUpdateTime: "2021-11-18T19:56:58Z" + message: ReplicaSet "coturn-dns-686c986557" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 56 + replicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 diff --git a/k8s-notes/webrtc-be/README.md b/k8s-notes/webrtc-be/README.md new file mode 100644 index 0000000..fedcb84 --- /dev/null +++ b/k8s-notes/webrtc-be/README.md @@ -0,0 +1,15 @@ +# capture current state in prod +kubectl get deployment webrtc-be -o yaml --namespace webrtc-be > existing-production-deployment.yaml + +# capture current state in relese +kubectl get deployment webrtc-be -o yaml > existing-staging-deployment.yaml + + +KUBECONFIG=stg +cd k8s/webrtc-be +kubectl delete secret gcr-json-key -n webrtc-be +kubectl delete service webrtc-be-service -n webrtc-be +kubectl delete deployment webrtc-be -n webrtc-be +kubectl delete HorizontalPodAutoscaler webrtc-be -n webrtc-be +kubectl delete ingress webrtc-be -n webrtc-be +helm upgrade --install webrtc-be . --values values-staging.yaml --namespace webrtc-be diff --git a/k8s-notes/webrtc-be/existing-production-deployment.yaml b/k8s-notes/webrtc-be/existing-production-deployment.yaml new file mode 100644 index 0000000..bd061af --- /dev/null +++ b/k8s-notes/webrtc-be/existing-production-deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "5" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"webrtc-be","app.kubernetes.io/instance":"webrtc-be"},"name":"webrtc-be","namespace":"webrtc-be"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"webrtc-be"}},"template":{"metadata":{"labels":{"app":"webrtc-be"}},"spec":{"containers":[{"env":[{"name":"RTC_MIN_PORT","value":"30000"},{"name":"RTC_MAX_PORT","value":"32768"},{"name":"BACKEND_URL","value":"https://www.jamkazam.com"},{"name":"RESTRICT_AUTH","value":"true"}],"image":"gcr.io/tough-craft-276813/webrtc_be:latest","imagePullPolicy":"Always","name":"webrtc-be","ports":[{"containerPort":5001,"name":"websocket-port"}],"resources":{"limits":{"memory":"3800Mi"},"requests":{"cpu":"1200m","memory":"3800Mi"}}}],"hostNetwork":true,"imagePullSecrets":[{"name":"gcr-json-key"}]}}}} + creationTimestamp: "2021-11-18T18:07:44Z" + generation: 897 + labels: + app: webrtc-be + app.kubernetes.io/instance: webrtc-be + name: webrtc-be + namespace: webrtc-be + resourceVersion: "272003497" + uid: f82d6e1d-99c5-4b22-a0cf-b8d02bbe3279 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: webrtc-be + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/restartedAt: "2024-11-18T01:58:53Z" + creationTimestamp: null + labels: + app: webrtc-be + spec: + containers: + - env: + - name: RTC_MIN_PORT + value: "30000" + - name: RTC_MAX_PORT + value: "32768" + - name: BACKEND_URL + value: https://www.jamkazam.com + - name: RESTRICT_AUTH + value: "true" + image: gcr.io/tough-craft-276813/webrtc_be:latest + imagePullPolicy: Always + name: webrtc-be + ports: + - containerPort: 5001 + hostPort: 5001 + name: websocket-port + protocol: TCP + resources: + limits: + memory: 3800Mi + requests: + cpu: 1200m + memory: 3800Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + hostNetwork: true + imagePullSecrets: + - name: gcr-json-key + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2021-11-18T18:07:44Z" + lastUpdateTime: "2024-11-18T01:58:56Z" + message: ReplicaSet "webrtc-be-6b649d8cd4" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2024-11-24T15:06:22Z" + lastUpdateTime: "2024-11-24T15:06:22Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 897 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/k8s-notes/webrtc-be/existing-staging-deployment.yaml b/k8s-notes/webrtc-be/existing-staging-deployment.yaml new file mode 100644 index 0000000..0f7efc5 --- /dev/null +++ b/k8s-notes/webrtc-be/existing-staging-deployment.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "3" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app":"webrtc-be","app.kubernetes.io/instance":"webrtc-be"},"name":"webrtc-be","namespace":"webrtc-be"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"webrtc-be"}},"template":{"metadata":{"labels":{"app":"webrtc-be"}},"spec":{"containers":[{"env":[{"name":"RTC_MIN_PORT","value":"30000"},{"name":"RTC_MAX_PORT","value":"32768"},{"name":"BACKEND_URL","value":"https://staging.jamkazam.com"}],"image":"gcr.io/tough-craft-276813/webrtc_be:latest","imagePullPolicy":"Always","name":"webrtc-be","ports":[{"containerPort":5001,"name":"websocket-port"}],"resources":{"limits":{"memory":"3800Mi"},"requests":{"cpu":"1200m","memory":"3800Mi"}}}],"hostNetwork":true,"imagePullSecrets":[{"name":"gcr-json-key"}]}}}} + creationTimestamp: "2021-11-18T08:57:30Z" + generation: 3 + labels: + app: webrtc-be + app.kubernetes.io/instance: webrtc-be + name: webrtc-be + namespace: webrtc-be + resourceVersion: "147684262" + uid: 4190b38d-99f6-418a-ae75-8d8e2c6ad040 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: webrtc-be + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/restartedAt: "2024-11-18T01:57:59Z" + creationTimestamp: null + labels: + app: webrtc-be + spec: + containers: + - env: + - name: RTC_MIN_PORT + value: "30000" + - name: RTC_MAX_PORT + value: "32768" + - name: BACKEND_URL + value: https://staging.jamkazam.com + image: gcr.io/tough-craft-276813/webrtc_be:latest + imagePullPolicy: Always + name: webrtc-be + ports: + - containerPort: 5001 + hostPort: 5001 + name: websocket-port + protocol: TCP + resources: + limits: + memory: 3800Mi + requests: + cpu: 1200m + memory: 3800Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + hostNetwork: true + imagePullSecrets: + - name: gcr-json-key + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2021-11-18T08:57:30Z" + lastUpdateTime: "2024-11-18T02:01:07Z" + message: ReplicaSet "webrtc-be-548f98f65f" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + - lastTransitionTime: "2024-11-21T13:15:14Z" + lastUpdateTime: "2024-11-21T13:15:14Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + observedGeneration: 3 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 diff --git a/k8s/monitoring/1 b/k8s/monitoring/1 new file mode 100644 index 0000000..0d78769 --- /dev/null +++ b/k8s/monitoring/1 @@ -0,0 +1 @@ +gcloud container images list-tags gcr.io/tough-craft-276813/webrtc_be diff --git a/k8s/monitoring/slack_webhooks.yaml b/k8s/monitoring/slack_webhooks.yaml new file mode 100644 index 0000000..cbb2f9a --- /dev/null +++ b/k8s/monitoring/slack_webhooks.yaml @@ -0,0 +1,4 @@ +# #video-cluster-staging-alerts +https://hooks.slack.com/services/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 +#video-cluster-prd-alerts +https://hooks.slack.com/services/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 diff --git a/k8s/webrtc-be/1 b/k8s/webrtc-be/1 new file mode 100644 index 0000000..8f5327b --- /dev/null +++ b/k8s/webrtc-be/1 @@ -0,0 +1,7 @@ +# Ingress +domain: "webrtc-be.staging.video.jamkazam.com" +# Webrtc env variables +backendUrl: "https://staging.jamkazam.com" +# Webrtc_be image version +webrtc_tag: 1.0.131 + diff --git a/scripts/bug.txt b/scripts/bug.txt new file mode 100644 index 0000000..d13ac25 --- /dev/null +++ b/scripts/bug.txt @@ -0,0 +1,36 @@ +[20241209 12:26:38.613] disconnectPeer 4peH1Pc77rWKD37YAA40 otherPeerId: 4peH1Pc77rWKD37YAA40 +[20241209 12:26:38.613] disconnectPeer 4peH1Pc77rWKD37YAA40 room: anywhere deleting room +[20241209 12:26:38.614] disconnectPeer 4peH1Pc77rWKD37YAA40 room: anywhere num-peers: 0 num-rooms: 0 +[20241209 12:26:41.344] mediasoup:ERROR:Channel [pid:34] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] +[20241209 12:26:41.351] mediasoup:ERROR:Channel [pid:34] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:41.352] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:362050]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:41.353] mediasoup:ERROR:Router pipeToRouter() | error creating PipeTransport pair:: Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:41.381] This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). The promise rejected with the reason: +[20241209 12:26:41.381] Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] + at Channel.processResponse (/app/node_modules/mediasoup/node/lib/Channel.js:251:33) + at Socket. (/app/node_modules/mediasoup/node/lib/Channel.js:76:34) + at Socket.emit (node:events:518:28) + at Socket.emit (node:domain:489:12) + at addChunk (node:internal/streams/readable:561:12) + at readableAddChunkPushByteMode (node:internal/streams/readable:512:3) + at Readable.push (node:internal/streams/readable:392:5) + at Pipe.onStreamRead (node:internal/stream_base_commons:189:23) +[20241209 12:26:49.551] disconnectPeer l1YCvM1T8b_2AjeZAA42 +[20241209 12:26:49.552] disconnectPeer l1YCvM1T8b_2AjeZAA42 peer not found +[20241209 12:26:51.182] mediasoup:ERROR:Channel [pid:28] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] +[20241209 12:26:51.185] mediasoup:ERROR:Channel [pid:28] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:51.186] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:237454]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:51.186] mediasoup:ERROR:Router pipeToRouter() | error creating PipeTransport pair:: Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:51.188] This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). The promise rejected with the reason: +[20241209 12:26:51.188] Error: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] + at Channel.processResponse (/app/node_modules/mediasoup/node/lib/Channel.js:251:33) + at Socket. (/app/node_modules/mediasoup/node/lib/Channel.js:76:34) + at Socket.emit (node:events:518:28) + at Socket.emit (node:domain:489:12) + at addChunk (node:internal/streams/readable:561:12) + at readableAddChunkPushByteMode (node:internal/streams/readable:512:3) + at Readable.push (node:internal/streams/readable:392:5) + at Pipe.onStreamRead (node:internal/stream_base_commons:189:23) +[20241209 12:26:51.200] mediasoup:ERROR:Channel [pid:31] PortManager::Bind() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] +[20241209 12:26:51.201] mediasoup:ERROR:Channel [pid:31] Worker::HandleRequest() | throwing MediaSoupError: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] +[20241209 12:26:51.201] mediasoup:WARN:Channel request failed [method:ROUTER_CREATE_PIPETRANSPORT, id:290384]: no more available ports [protocol:udp, ip:'127.0.0.1', numAttempt:2769] [method:router.createPipeTransport] diff --git a/scripts/check-udp b/scripts/check-udp new file mode 100755 index 0000000..756d015 --- /dev/null +++ b/scripts/check-udp @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +ENV=$1 # stg or prd + +KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml + +POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"` + +kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec $POD -- cat /proc/net/udp diff --git a/scripts/check-udp-prd.sh b/scripts/check-udp-prd.sh new file mode 100755 index 0000000..4b07ad7 --- /dev/null +++ b/scripts/check-udp-prd.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# + +set -eu -o pipefail + +KUBECONFIG=~/Downloads/prd-video-cluster-kubeconfig.yaml + +POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"` + +kubectl --kubeconfig $KUBECONFIG exec $POD --namespace webrtc-be -- cat /proc/net/udp diff --git a/scripts/dump-webrtc-podname b/scripts/dump-webrtc-podname new file mode 100755 index 0000000..5c92e30 --- /dev/null +++ b/scripts/dump-webrtc-podname @@ -0,0 +1,8 @@ +#!/bin/bash +set -eu -o pipefail + +ENV=$1 # stg or prd +KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml + +POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"` +echo $POD diff --git a/scripts/exec-mode-webrtc b/scripts/exec-mode-webrtc new file mode 100755 index 0000000..78deb9f --- /dev/null +++ b/scripts/exec-mode-webrtc @@ -0,0 +1,9 @@ +#!/bin/bash +set -eu -o pipefail + +ENV=$1 # stg or prd +KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml + +POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"` + +kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec -it $POD -- /bin/bash diff --git a/scripts/exec-webrtc b/scripts/exec-webrtc new file mode 100755 index 0000000..5412d7d --- /dev/null +++ b/scripts/exec-webrtc @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +ENV=$1 # stg or prd +EXEC=$2 +KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml + +POD=`kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name"` + +kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be exec $POD -- $2 diff --git a/scripts/get-stg-webrtc-pod.sh b/scripts/get-stg-webrtc-pod.sh new file mode 100644 index 0000000..83ecaa2 --- /dev/null +++ b/scripts/get-stg-webrtc-pod.sh @@ -0,0 +1,4 @@ +KUBECONFIG=~/Downloads/stg-video-cluster-kubeconfig.yaml +POD=kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod --no-headers -o custom-columns=":metadata.name" + +kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod diff --git a/scripts/launch-prd-room b/scripts/launch-prd-room new file mode 100644 index 0000000..b32ff53 --- /dev/null +++ b/scripts/launch-prd-room @@ -0,0 +1 @@ +open https://video.jamkazam.com/room/anywhere?token=openroom12345 diff --git a/scripts/list-webrtc-tags b/scripts/list-webrtc-tags new file mode 100755 index 0000000..0d78769 --- /dev/null +++ b/scripts/list-webrtc-tags @@ -0,0 +1 @@ +gcloud container images list-tags gcr.io/tough-craft-276813/webrtc_be diff --git a/scripts/replace-webrtc b/scripts/replace-webrtc new file mode 100755 index 0000000..c1618f5 --- /dev/null +++ b/scripts/replace-webrtc @@ -0,0 +1,7 @@ +#!/bin/bash +set -eu -o pipefail + +ENV=$1 # stg or prd +KUBECONFIG=~/Downloads/$ENV-video-cluster-kubeconfig.yaml + +kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be get pod -o yaml | kubectl --kubeconfig $KUBECONFIG --namespace webrtc-be replace --force -f -