[DO-1431] upgrade prod k8s (!6)

DO-1431

Co-authored-by: denis.patrakeev <denis.patrakeev@avroid.tech>
Reviewed-on: https://git.avroid.tech/K8s/k8s-deploy/pulls/6
This commit is contained in:
Denis Patrakeev
2025-01-16 12:31:10 +03:00
parent 057162a0c9
commit 0e922638e5
11 changed files with 40 additions and 41 deletions

2
.gitmodules vendored
View File

@@ -1,4 +1,4 @@
[submodule "env/avroid_prod/k8s-avroid-office.prod.local/kubespray"]
path = env/avroid_prod/k8s-avroid-office.prod.local/kubespray
url = ssh://git@git.avroid.tech:2222/Mirrors/kubespray.git
branch = v2.26.0
branch = v2.27.0

View File

@@ -43,9 +43,7 @@ upstream_dns_servers:
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using openstack-client before starting the playbook.
## If set the possible values only 'external' after K8s v1.31.
# cloud_provider:
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
@@ -76,8 +74,8 @@ upstream_dns_servers:
# skip_http_proxy_on_os_packages: false
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes
## in the no_proxy variable, set below to true:
no_proxy_exclude_workers: false
## Certificate Management

View File

@@ -81,7 +81,7 @@ containerd_registries_mirrors:
capabilities: [ "pull", "resolve" ]
skip_verify: false
# containerd_max_container_log_line_size: -1
# containerd_max_container_log_line_size: 16384
# containerd_registry_auth:
# - registry: 10.0.0.2:5000

View File

@@ -0,0 +1,3 @@
---
node_labels:
node-role.kubernetes.io/build: ""

View File

@@ -104,13 +104,15 @@ gateway_api_enabled: false
ingress_nginx_enabled: true
ingress_nginx_host_network: false
ingress_nginx_service_type: NodePort
# ingress_nginx_service_annotations:
# example.io/loadbalancerIPs: 1.2.3.4
ingress_nginx_service_nodeport_http: 30080
ingress_nginx_service_nodeport_https: 30081
ingress_publish_status_address: ""
ingress_nginx_nodeselector:
node-role.kubernetes.io/ingress-nginx: "true"
ingress_nginx_tolerations:
- key: "node-role.kubernetes.io/control-node"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"

View File

@@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.30.4
kube_version: v1.31.4
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
@@ -140,11 +140,7 @@ kube_proxy_nodeport_addresses: >-
{%- endif -%}
# If non-empty, will use this string as identification instead of the actual hostname
# kube_override_hostname: >-
# {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
# {%- else -%}
# {{ inventory_hostname }}
# {%- endif -%}
# kube_override_hostname: {{ inventory_hostname }}
## Encrypting Secret Data at Rest
kube_encrypt_secret_data: false
@@ -267,7 +263,7 @@ kubectl_localhost: false
# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service"
# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
# Optionally reserve this space for kube daemons.
# Whether to run kubelet and container-engine daemons in a dedicated cgroup.
kube_reserved: true
## Uncomment to override default values
## The following two items need to be set when kube_reserved is true
@@ -277,7 +273,7 @@ kube_memory_reserved: 256Mi
kube_cpu_reserved: 100m
kube_ephemeral_storage_reserved: 2Gi
# kube_pid_reserved: "1000"
# Reservation for master hosts
# Reservation for control plane hosts
kube_master_memory_reserved: 512Mi
kube_master_cpu_reserved: 200m
kube_master_ephemeral_storage_reserved: 2Gi
@@ -371,11 +367,25 @@ auto_renew_certificates: false
# First Monday of each month
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
# kubeadm patches path
kubeadm_patches:
enabled: false
source_dir: "{{ inventory_dir }}/patches"
dest_dir: "{{ kube_config_dir }}/patches"
kubeadm_patches_dir: "{{ kube_config_dir }}/patches"
kubeadm_patches: []
# See https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#patches
# Correspondance with this link
# patchtype = type
# target = target
# suffix -> managed automatically
# extension -> always "yaml"
# kubeadm_patches:
# - target: kube-apiserver|kube-controller-manager|kube-scheduler|etcd|kubeletconfiguration
# type: strategic(default)|json|merge
# patch:
# metadata:
# annotations:
# example.com/test: "true"
# labels:
# example.com/prod_level: "{{ prod_level }}"
# - ...
# Patches are applied in the order they are specified.
# Set to true to remove the role binding to anonymous users created by kubeadm
remove_anonymous_access: false

View File

@@ -30,12 +30,12 @@ k8s-control-01
k8s-control-02
k8s-control-03
[custom_kube_node_with_ingress]
[custom_kube_node_worker_with_ingress]
k8s-worker-01
k8s-worker-02
k8s-worker-03
[kube_node]
[custom_kube_node_for_only_build]
k8s-build-01
k8s-build-02
k8s-build-03
@@ -45,7 +45,8 @@ k8s-build-06
k8s-build-07
[kube_node:children]
custom_kube_node_with_ingress
custom_kube_node_worker_with_ingress
custom_kube_node_for_only_build
#[calico_rr]

View File

@@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10257'

View File

@@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '10259'