this repo has no description
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

De-provision things...

+44 -7
+36
hack/k8s-cilium-exec.sh
··· 1 + #!/usr/bin/env bash 2 + # SPDX-License-Identifier: Apache-2.0 3 + # Copyright Authors of Cilium 4 + 5 + trap cleanup EXIT 6 + 7 + function kill_jobs { 8 + j=$(jobs -p) 9 + if [ ! -z "$j" ]; then 10 + kill -$1 $j 2> /dev/null 11 + fi 12 + } 13 + 14 + function cleanup { 15 + kill_jobs INT 16 + sleep 2s 17 + kill_jobs TERM 18 + } 19 + 20 + function get_cilium_pods { 21 + kubectl -n "${K8S_NAMESPACE}" get pods -l k8s-app=cilium -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName | \ 22 + grep cilium 23 + } 24 + 25 + K8S_NAMESPACE="${K8S_NAMESPACE:-kube-system}" 26 + CONTAINER="${CONTAINER:-cilium-agent}" 27 + 28 + while read -r podName nodeName ; do 29 + ( 30 + title="==== detail from pod $podName , on node $nodeName " 31 + msg=$( kubectl -n "${K8S_NAMESPACE}" exec -c "${CONTAINER}" "${podName}" -- "${@}" 2>&1 ) 32 + echo -e "$title \n$msg\n" 33 + )& 34 + done <<< "$(get_cilium_pods)" 35 + 36 + wait
k8s/nebula/apps/dev/coder/app/externalsecret-store.yaml .archive/nebula/dev/coder/app/externalsecret-store.yaml
k8s/nebula/apps/dev/coder/app/externalsecret.yaml .archive/nebula/dev/coder/app/externalsecret.yaml
k8s/nebula/apps/dev/coder/app/hr.yaml .archive/nebula/dev/coder/app/hr.yaml
k8s/nebula/apps/dev/coder/app/internal-ingress.yaml .archive/nebula/dev/coder/app/internal-ingress.yaml
k8s/nebula/apps/dev/coder/app/kustomization.yaml .archive/nebula/dev/coder/app/kustomization.yaml
k8s/nebula/apps/dev/coder/ks.yaml .archive/nebula/dev/coder/ks.yaml
k8s/nebula/apps/dev/kustomization.yaml .archive/nebula/dev/kustomization.yaml
k8s/nebula/apps/dev/ns.yaml .archive/nebula/dev/ns.yaml
-1
k8s/nebula/apps/kustomization.yaml
··· 11 11 - networking 12 12 - system 13 13 - storage 14 - # - search 15 14 - home 16 15 - databases 17 16 - kube-system
k8s/nebula/apps/search/elk/ks-resources.yaml .archive/nebula/search/elk/ks-resources.yaml
k8s/nebula/apps/search/elk/ks.yaml .archive/nebula/search/elk/ks.yaml
k8s/nebula/apps/search/elk/operator/hr.yaml .archive/nebula/search/elk/operator/hr.yaml
k8s/nebula/apps/search/elk/operator/kustomization.yaml .archive/nebula/search/elk/operator/kustomization.yaml
k8s/nebula/apps/search/elk/resources/es.yaml .archive/nebula/search/elk/resources/es.yaml
k8s/nebula/apps/search/elk/resources/ingress-elk.yaml .archive/nebula/search/elk/resources/ingress-elk.yaml
k8s/nebula/apps/search/elk/resources/kibana.yaml .archive/nebula/search/elk/resources/kibana.yaml
k8s/nebula/apps/search/elk/resources/kustomization.yaml .archive/nebula/search/elk/resources/kustomization.yaml
k8s/nebula/apps/search/kustomization.yaml .archive/nebula/search/kustomization.yaml
k8s/nebula/apps/search/ns.yaml .archive/nebula/search/ns.yaml
+2
provision/ansible/nebula/inventory/group_vars/kubernetes/k3s.yml
··· 27 27 # ...this is set to my kube-vip address 28 28 k3s_registration_address: 10.0.105.34 29 29 30 + node_ula_address: "{{ ansible_all_ipv6_addresses | select('match', '^fd[9a-fA-F]{1}') | select('match', '^fd[0-9a-fA-F]{2}(:[0-9a-fA-F]{0,4}){0,7}$') | reject('match', '^fddf:') | first}}" 31 + 30 32 # 31 33 # /var/lib/rancher/k3s/server/manifests 32 34 #
+4 -4
provision/ansible/nebula/inventory/group_vars/master/k3s.yml
··· 7 7 8 8 # k3s settings for all control-plane nodes 9 9 k3s_server: 10 - node-ip: "{{ ansible_host }},{{ ansible_all_ipv6_addresses | select('match', '^fd[9a-fA-F]{1}') | 11 - select('match', '^fd[0-9a-fA-F]{2}(:[0-9a-fA-F]{0,4}){0,7}$') | reject('match', '^fddf:') | list}}" 12 - node-external-ip: "{{ ansible_default_ipv6.address }}" 10 + node-ip: "{{ ansible_host }},{{ node_ula_address }}" 11 + #node-external-ip: "{{ ansible_default_ipv6.address }}" 13 12 #node-ip: "{{ ansible_default_ipv6.address }},{{ ansible_host }}" 14 13 tls-san: 15 14 - "{{ k3s_registration_address }}" 16 15 - "{{ ansible_default_ipv6.address }}" 16 + - "{{ node_ula_address }}" 17 17 docker: false 18 18 flannel-backend: "none" # This needs to be in quotes 19 19 disable: ··· 30 30 cluster-cidr: "10.244.0.0/16,fddf:f7bc:9670::/48" 31 31 #cluster-cidr: "fddf:f7bc:9670::/48,10.244.0.0/16" 32 32 # Network CIDR to use for service IPs 33 - service-cidr: "10.96.0.0/16,2001:14ba:16fd:961d::1e:0/112" 33 + service-cidr: "10.96.0.0/16,2001:14ba:74ae:3405::1e:0/112" 34 34 #service-cidr: "2001:67c:1104:fdb::/112,10.96.0.0/16" 35 35 kubelet-arg: 36 36 - "feature-gates=GracefulNodeShutdown=true"
+2 -2
provision/ansible/nebula/inventory/hosts.yaml
··· 22 22 ansible_host: 10.0.105.19 23 23 ceph_drives: 24 24 - /dev/disk/by-id/nvme-SAMSUNG_MZ9LQ512HBLU-00BVL_S73XNX0T887767 25 - w-amd-2: 26 - ansible_host: 10.0.105.33 25 + #w-amd-2: 26 + # ansible_host: 10.0.105.33 27 27 w-amd-1: 28 28 ansible_host: 10.0.105.25 29 29 ceph_drives: