[DO-1431] Final config PROD k8s (!3)
DO-1431 Co-authored-by: denis.patrakeev <denis.patrakeev@avroid.tech> Reviewed-on: https://git.avroid.tech/K8s/k8s-deploy/pulls/3
This commit is contained in:
141
env/avroid_prod/k8s-avroid-office.prod.local/inventory/group_vars/all/all.yml
vendored
Normal file
141
env/avroid_prod/k8s-avroid-office.prod.local/inventory/group_vars/all/all.yml
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
---
|
||||
## Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
## The access_ip variable is used to define how other nodes should access
|
||||
## the node. This is used in flannel to allow other flannel nodes to see
|
||||
## this node for example. The access_ip is really useful AWS and Google
|
||||
## environments where the nodes are accessed remotely by the "public" ip,
|
||||
## but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
|
||||
## External LB example config
|
||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
||||
# loadbalancer_apiserver:
|
||||
# address: 1.2.3.4
|
||||
# port: 1234
|
||||
|
||||
## Internal loadbalancers for apiservers
|
||||
loadbalancer_apiserver_localhost: true
|
||||
# valid options are "nginx" or "haproxy"
|
||||
loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
|
||||
|
||||
## Local loadbalancer should use this port
|
||||
## And must be set port 6443
|
||||
loadbalancer_apiserver_port: 6443
|
||||
|
||||
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
|
||||
loadbalancer_apiserver_healthcheck_port: 8081
|
||||
|
||||
### OTHER OPTIONAL VARIABLES
|
||||
|
||||
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
|
||||
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
|
||||
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
|
||||
# disable_host_nameservers: false
|
||||
|
||||
## Upstream dns servers
|
||||
upstream_dns_servers:
|
||||
- 10.2.4.10
|
||||
- 10.2.4.20
|
||||
- 10.3.0.101
|
||||
|
||||
## There are some changes specific to the cloud providers
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using openstack-client before starting the playbook.
|
||||
# cloud_provider:
|
||||
|
||||
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
|
||||
## Supported cloud controllers are: 'openstack', 'vsphere', 'huaweicloud' and 'hcloud'
|
||||
## When openstack or vsphere are used make sure to source in the required fields
|
||||
# external_cloud_provider:
|
||||
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# https_proxy_cert_file: ""
|
||||
|
||||
## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy
|
||||
# no_proxy: ""
|
||||
|
||||
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||
# download_validate_certs: False
|
||||
|
||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||
# additional_no_proxy: ""
|
||||
|
||||
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
|
||||
## skip_http_proxy_on_os_packages to true
|
||||
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
|
||||
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
|
||||
# skip_http_proxy_on_os_packages: false
|
||||
|
||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
||||
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
||||
## no_proxy variable, set below to true:
|
||||
no_proxy_exclude_workers: false
|
||||
|
||||
## Certificate Management
|
||||
## This setting determines whether certs are generated via scripts.
|
||||
## Chose 'none' if you provide your own certificates.
|
||||
## Option is "script", "none"
|
||||
cert_management: script
|
||||
|
||||
## Set to true to allow pre-checks to fail and continue deployment
|
||||
# ignore_assert_errors: false
|
||||
|
||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||
# kube_read_only_port: 10255
|
||||
|
||||
## Set true to download and cache container
|
||||
download_container: true
|
||||
|
||||
## Deploy container engine
|
||||
# Set false if you want to deploy container engine manually.
|
||||
# deploy_container_engine: true
|
||||
|
||||
## Red Hat Enterprise Linux subscription registration
|
||||
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
|
||||
## Update RHEL subscription purpose usage, role and SLA if necessary
|
||||
# rh_subscription_username: ""
|
||||
# rh_subscription_password: ""
|
||||
# rh_subscription_org_id: ""
|
||||
# rh_subscription_activation_key: ""
|
||||
# rh_subscription_usage: "Development"
|
||||
# rh_subscription_role: "Red Hat Enterprise Server"
|
||||
# rh_subscription_sla: "Self-Support"
|
||||
|
||||
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
|
||||
# ping_access_ip: true
|
||||
|
||||
# sysctl_file_path to add sysctl conf to
|
||||
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
|
||||
|
||||
## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
|
||||
kube_webhook_token_auth: false
|
||||
kube_webhook_token_auth_url_skip_tls_verify: false
|
||||
# kube_webhook_token_auth_url: https://...
|
||||
## base64-encoded string of the webhook's CA certificate
|
||||
# kube_webhook_token_auth_ca_data: "LS0t..."
|
||||
|
||||
## NTP Settings
|
||||
# Start the ntpd or chrony service and enable it at system boot.
|
||||
ntp_enabled: true
|
||||
ntp_manage_config: true
|
||||
ntp_servers:
|
||||
- "ntp-01.avroid.tech iburst"
|
||||
- "ntp-02.avroid.tech iburst"
|
||||
- "ntp-03.avroid.tech iburst"
|
||||
# Set timezone
|
||||
ntp_timezone: Europe/Moscow
|
||||
|
||||
## Used to control no_log attribute
|
||||
unsafe_show_logs: false
|
||||
|
||||
## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases.
|
||||
allow_unsupported_distribution_setup: false
|
||||
89
env/avroid_prod/k8s-avroid-office.prod.local/inventory/group_vars/all/containerd.yml
vendored
Normal file
89
env/avroid_prod/k8s-avroid-office.prod.local/inventory/group_vars/all/containerd.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
||||
|
||||
containerd_storage_dir: "/var/lib/containerd"
|
||||
# containerd_state_dir: "/run/containerd"
|
||||
# containerd_oom_score: 0
|
||||
|
||||
# containerd_default_runtime: "runc"
|
||||
# containerd_snapshotter: "native"
|
||||
|
||||
# containerd_runc_runtime:
|
||||
# name: runc
|
||||
# type: "io.containerd.runc.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
|
||||
# containerd_additional_runtimes:
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
|
||||
# containerd_grpc_max_recv_message_size: 16777216
|
||||
# containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
# Containerd debug socket location: unix or tcp format
|
||||
# containerd_debug_address: ""
|
||||
|
||||
# Containerd log level
|
||||
# containerd_debug_level: "info"
|
||||
|
||||
# Containerd logs format, supported values: text, json
|
||||
# containerd_debug_format: ""
|
||||
|
||||
# Containerd debug socket UID
|
||||
# containerd_debug_uid: 0
|
||||
|
||||
# Containerd debug socket GID
|
||||
# containerd_debug_gid: 0
|
||||
|
||||
# containerd_metrics_address: ""
|
||||
|
||||
# containerd_metrics_grpc_histogram: false
|
||||
|
||||
# Registries defined within containerd.
|
||||
containerd_registries_mirrors:
|
||||
- prefix: docker.io
|
||||
mirrors:
|
||||
- host: https://mirror-gcr-io-proxy.avroid.tech
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- host: https://eu-central-1-mirror-aliyuncs-com-proxy.avroid.tech
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- host: https://registry-1.docker.io
|
||||
capabilities: ["pull", "resolve"]
|
||||
skip_verify: false
|
||||
- prefix: quay.io
|
||||
mirrors:
|
||||
- host: https://quay-proxy.avroid.tech
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- host: https://quay.io
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- prefix: ghcr.io
|
||||
mirrors:
|
||||
- host: https://ghcr-proxy.avroid.tech
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- host: https://ghcr.io
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- prefix: registry.k8s.io
|
||||
mirrors:
|
||||
- host: https://registry-k8s-io-proxy.avroid.tech
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
- host: https://registry.k8s.io
|
||||
capabilities: [ "pull", "resolve" ]
|
||||
skip_verify: false
|
||||
|
||||
# containerd_max_container_log_line_size: -1
|
||||
|
||||
# containerd_registry_auth:
|
||||
# - registry: 10.0.0.2:5000
|
||||
# username: user
|
||||
# password: pass
|
||||
16
env/avroid_prod/k8s-avroid-office.prod.local/inventory/group_vars/all/etcd.yml
vendored
Normal file
16
env/avroid_prod/k8s-avroid-office.prod.local/inventory/group_vars/all/etcd.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
## Directory where etcd data stored
|
||||
etcd_data_dir: /data/etcd
|
||||
|
||||
## Container runtime
|
||||
## docker for docker, crio for cri-o and containerd for containerd.
|
||||
## Additionally you can set this to kubeadm if you want to install etcd using kubeadm
|
||||
## Kubeadm etcd deployment is experimental and only available for new deployments
|
||||
## If this is not set, container manager will be inherited from the Kubespray defaults
|
||||
## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want.
|
||||
## Also this makes possible to use different container manager for etcd nodes.
|
||||
# container_manager: containerd
|
||||
|
||||
## Settings for etcd deployment type
|
||||
# Set this to docker if you are using container_manager: docker
|
||||
etcd_deployment_type: host
|
||||
Reference in New Issue
Block a user