Added stuff
This commit is contained in:
		
							
								
								
									
										2
									
								
								hosts
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								hosts
									
									
									
									
									
								
							@@ -19,4 +19,4 @@ edge-cluster-2-worker-2 4096 edge-2 192.168.112.102
 | 
			
		||||
site-emulator-1 2048 site 192.168.113.100
 | 
			
		||||
 | 
			
		||||
# magic router
 | 
			
		||||
magic-router 1024 cloud,edge-1,edge-2,site -
 | 
			
		||||
magic-router 1024 cloud,edge-1,edge-2,site @magic_router_netplan.yaml
 | 
			
		||||
							
								
								
									
										118
									
								
								kubespray_inventory/cloud/group_vars/all/all.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								kubespray_inventory/cloud/group_vars/all/all.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,118 @@
 | 
			
		||||
---
 | 
			
		||||
## Directory where etcd data stored
 | 
			
		||||
etcd_data_dir: /var/lib/etcd
 | 
			
		||||
 | 
			
		||||
## Experimental kubeadm etcd deployment mode. Available only for new deployment
 | 
			
		||||
etcd_kubeadm_enabled: false
 | 
			
		||||
 | 
			
		||||
## Directory where the binaries will be installed
 | 
			
		||||
bin_dir: /usr/local/bin
 | 
			
		||||
 | 
			
		||||
## The access_ip variable is used to define how other nodes should access
 | 
			
		||||
## the node.  This is used in flannel to allow other flannel nodes to see
 | 
			
		||||
## this node for example.  The access_ip is really useful AWS and Google
 | 
			
		||||
## environments where the nodes are accessed remotely by the "public" ip,
 | 
			
		||||
## but don't know about that address themselves.
 | 
			
		||||
# access_ip: 1.1.1.1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## External LB example config
 | 
			
		||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
 | 
			
		||||
# loadbalancer_apiserver:
 | 
			
		||||
#   address: 1.2.3.4
 | 
			
		||||
#   port: 1234
 | 
			
		||||
 | 
			
		||||
## Internal loadbalancers for apiservers
 | 
			
		||||
# loadbalancer_apiserver_localhost: true
 | 
			
		||||
# valid options are "nginx" or "haproxy"
 | 
			
		||||
# loadbalancer_apiserver_type: nginx  # valid values "nginx" or "haproxy"
 | 
			
		||||
 | 
			
		||||
## If the cilium is going to be used in strict mode, we can use the
 | 
			
		||||
## localhost connection and not use the external LB. If this parameter is
 | 
			
		||||
## not specified, the first node to connect to kubeapi will be used.
 | 
			
		||||
# use_localhost_as_kubeapi_loadbalancer: true
 | 
			
		||||
 | 
			
		||||
## Local loadbalancer should use this port
 | 
			
		||||
## And must be set port 6443
 | 
			
		||||
loadbalancer_apiserver_port: 6443
 | 
			
		||||
 | 
			
		||||
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
 | 
			
		||||
loadbalancer_apiserver_healthcheck_port: 8081
 | 
			
		||||
 | 
			
		||||
### OTHER OPTIONAL VARIABLES
 | 
			
		||||
 | 
			
		||||
## Upstream dns servers
 | 
			
		||||
# upstream_dns_servers:
 | 
			
		||||
#   - 8.8.8.8
 | 
			
		||||
#   - 8.8.4.4
 | 
			
		||||
 | 
			
		||||
## There are some changes specific to the cloud providers
 | 
			
		||||
## for instance we need to encapsulate packets with some network plugins
 | 
			
		||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
 | 
			
		||||
## When openstack is used make sure to source in the openstack credentials
 | 
			
		||||
## like you would do when using openstack-client before starting the playbook.
 | 
			
		||||
# cloud_provider:
 | 
			
		||||
 | 
			
		||||
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
 | 
			
		||||
## Supported cloud controllers are: 'openstack' and 'vsphere'
 | 
			
		||||
## When openstack or vsphere are used make sure to source in the required fields
 | 
			
		||||
# external_cloud_provider:
 | 
			
		||||
 | 
			
		||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
 | 
			
		||||
# http_proxy: ""
 | 
			
		||||
# https_proxy: ""
 | 
			
		||||
 | 
			
		||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
 | 
			
		||||
# no_proxy: ""
 | 
			
		||||
 | 
			
		||||
## Some problems may occur when downloading files over https proxy due to ansible bug
 | 
			
		||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
 | 
			
		||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
 | 
			
		||||
# download_validate_certs: False
 | 
			
		||||
 | 
			
		||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
 | 
			
		||||
# additional_no_proxy: ""
 | 
			
		||||
 | 
			
		||||
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
 | 
			
		||||
## skip_http_proxy_on_os_packages to true
 | 
			
		||||
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
 | 
			
		||||
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
 | 
			
		||||
# skip_http_proxy_on_os_packages: false
 | 
			
		||||
 | 
			
		||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
 | 
			
		||||
## pods will restart) when adding or removing workers.  To override this behaviour by only including master nodes in the
 | 
			
		||||
## no_proxy variable, set below to true:
 | 
			
		||||
no_proxy_exclude_workers: false
 | 
			
		||||
 | 
			
		||||
## Certificate Management
 | 
			
		||||
## This setting determines whether certs are generated via scripts.
 | 
			
		||||
## Chose 'none' if you provide your own certificates.
 | 
			
		||||
## Option is  "script", "none"
 | 
			
		||||
# cert_management: script
 | 
			
		||||
 | 
			
		||||
## Set to true to allow pre-checks to fail and continue deployment
 | 
			
		||||
# ignore_assert_errors: false
 | 
			
		||||
 | 
			
		||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
 | 
			
		||||
# kube_read_only_port: 10255
 | 
			
		||||
 | 
			
		||||
## Set true to download and cache container
 | 
			
		||||
# download_container: true
 | 
			
		||||
 | 
			
		||||
## Deploy container engine
 | 
			
		||||
# Set false if you want to deploy container engine manually.
 | 
			
		||||
# deploy_container_engine: true
 | 
			
		||||
 | 
			
		||||
## Red Hat Enterprise Linux subscription registration
 | 
			
		||||
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
 | 
			
		||||
## Update RHEL subscription purpose usage, role and SLA if necessary
 | 
			
		||||
# rh_subscription_username: ""
 | 
			
		||||
# rh_subscription_password: ""
 | 
			
		||||
# rh_subscription_org_id: ""
 | 
			
		||||
# rh_subscription_activation_key: ""
 | 
			
		||||
# rh_subscription_usage: "Development"
 | 
			
		||||
# rh_subscription_role: "Red Hat Enterprise Server"
 | 
			
		||||
# rh_subscription_sla: "Self-Support"
 | 
			
		||||
 | 
			
		||||
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
 | 
			
		||||
# ping_access_ip: true
 | 
			
		||||
							
								
								
									
										9
									
								
								kubespray_inventory/cloud/group_vars/all/aws.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								kubespray_inventory/cloud/group_vars/all/aws.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
 | 
			
		||||
## and configure the parameters below
 | 
			
		||||
# aws_ebs_csi_enabled: true
 | 
			
		||||
# aws_ebs_csi_enable_volume_scheduling: true
 | 
			
		||||
# aws_ebs_csi_enable_volume_snapshot: false
 | 
			
		||||
# aws_ebs_csi_enable_volume_resizing: false
 | 
			
		||||
# aws_ebs_csi_controller_replicas: 1
 | 
			
		||||
# aws_ebs_csi_plugin_image_tag: latest
 | 
			
		||||
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
 | 
			
		||||
							
								
								
									
										40
									
								
								kubespray_inventory/cloud/group_vars/all/azure.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								kubespray_inventory/cloud/group_vars/all/azure.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
## When azure is used, you need to also set the following variables.
 | 
			
		||||
## see docs/azure.md for details on how to get these values
 | 
			
		||||
 | 
			
		||||
# azure_cloud:
 | 
			
		||||
# azure_tenant_id:
 | 
			
		||||
# azure_subscription_id:
 | 
			
		||||
# azure_aad_client_id:
 | 
			
		||||
# azure_aad_client_secret:
 | 
			
		||||
# azure_resource_group:
 | 
			
		||||
# azure_location:
 | 
			
		||||
# azure_subnet_name:
 | 
			
		||||
# azure_security_group_name:
 | 
			
		||||
# azure_security_group_resource_group:
 | 
			
		||||
# azure_vnet_name:
 | 
			
		||||
# azure_vnet_resource_group:
 | 
			
		||||
# azure_route_table_name:
 | 
			
		||||
# azure_route_table_resource_group:
 | 
			
		||||
# supported values are 'standard' or 'vmss'
 | 
			
		||||
# azure_vmtype: standard
 | 
			
		||||
 | 
			
		||||
## Azure Disk CSI credentials and parameters
 | 
			
		||||
## see docs/azure-csi.md for details on how to get these values
 | 
			
		||||
 | 
			
		||||
# azure_csi_tenant_id:
 | 
			
		||||
# azure_csi_subscription_id:
 | 
			
		||||
# azure_csi_aad_client_id:
 | 
			
		||||
# azure_csi_aad_client_secret:
 | 
			
		||||
# azure_csi_location:
 | 
			
		||||
# azure_csi_resource_group:
 | 
			
		||||
# azure_csi_vnet_name:
 | 
			
		||||
# azure_csi_vnet_resource_group:
 | 
			
		||||
# azure_csi_subnet_name:
 | 
			
		||||
# azure_csi_security_group_name:
 | 
			
		||||
# azure_csi_use_instance_metadata:
 | 
			
		||||
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
 | 
			
		||||
 | 
			
		||||
## To enable Azure Disk CSI, uncomment below
 | 
			
		||||
# azure_csi_enabled: true
 | 
			
		||||
# azure_csi_controller_replicas: 1
 | 
			
		||||
# azure_csi_plugin_image_tag: latest
 | 
			
		||||
							
								
								
									
										44
									
								
								kubespray_inventory/cloud/group_vars/all/containerd.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								kubespray_inventory/cloud/group_vars/all/containerd.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,44 @@
 | 
			
		||||
---
 | 
			
		||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
 | 
			
		||||
 | 
			
		||||
# containerd_storage_dir: "/var/lib/containerd"
 | 
			
		||||
# containerd_state_dir: "/run/containerd"
 | 
			
		||||
# containerd_oom_score: 0
 | 
			
		||||
 | 
			
		||||
# containerd_default_runtime: "runc"
 | 
			
		||||
# containerd_snapshotter: "native"
 | 
			
		||||
 | 
			
		||||
# containerd_runtimes:
 | 
			
		||||
#   - name: runc
 | 
			
		||||
#     type: "io.containerd.runc.v2"
 | 
			
		||||
#     engine: ""
 | 
			
		||||
#     root: ""
 | 
			
		||||
# Example for Kata Containers as additional runtime:
 | 
			
		||||
#   - name: kata
 | 
			
		||||
#     type: "io.containerd.kata.v2"
 | 
			
		||||
#     engine: ""
 | 
			
		||||
#     root: ""
 | 
			
		||||
 | 
			
		||||
# containerd_grpc_max_recv_message_size: 16777216
 | 
			
		||||
# containerd_grpc_max_send_message_size: 16777216
 | 
			
		||||
 | 
			
		||||
# containerd_debug_level: "info"
 | 
			
		||||
 | 
			
		||||
# containerd_metrics_address: ""
 | 
			
		||||
 | 
			
		||||
# containerd_metrics_grpc_histogram: false
 | 
			
		||||
 | 
			
		||||
# containerd_registries:
 | 
			
		||||
#   "docker.io": "https://registry-1.docker.io"
 | 
			
		||||
 | 
			
		||||
# containerd_max_container_log_line_size: -1
 | 
			
		||||
 | 
			
		||||
#containerd_registry_auth:
 | 
			
		||||
#  - registry: 
 | 
			
		||||
#    username: user
 | 
			
		||||
#    password: pass
 | 
			
		||||
 | 
			
		||||
containerd_registries:
 | 
			
		||||
  "docker.io":
 | 
			
		||||
    - "https://mirror.gcr.io"
 | 
			
		||||
    - "https://registry-1.docker.io"
 | 
			
		||||
							
								
								
									
										2
									
								
								kubespray_inventory/cloud/group_vars/all/coreos.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								kubespray_inventory/cloud/group_vars/all/coreos.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
## Does coreos need auto upgrade, default is true
 | 
			
		||||
# coreos_auto_upgrade: true
 | 
			
		||||
							
								
								
									
										6
									
								
								kubespray_inventory/cloud/group_vars/all/cri-o.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								kubespray_inventory/cloud/group_vars/all/cri-o.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
# crio_insecure_registries:
 | 
			
		||||
#   - 10.0.0.2:5000
 | 
			
		||||
# crio_registry_auth:
 | 
			
		||||
#   - registry: 10.0.0.2:5000
 | 
			
		||||
#     username: user
 | 
			
		||||
#     password: pass
 | 
			
		||||
							
								
								
									
										59
									
								
								kubespray_inventory/cloud/group_vars/all/docker.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								kubespray_inventory/cloud/group_vars/all/docker.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
---
 | 
			
		||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
 | 
			
		||||
## Please note that overlay2 is only supported on newer kernels
 | 
			
		||||
# docker_storage_options: -s overlay2
 | 
			
		||||
 | 
			
		||||
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
 | 
			
		||||
docker_container_storage_setup: false
 | 
			
		||||
 | 
			
		||||
## It must be define a disk path for docker_container_storage_setup_devs.
 | 
			
		||||
## Otherwise docker-storage-setup will be executed incorrectly.
 | 
			
		||||
# docker_container_storage_setup_devs: /dev/vdb
 | 
			
		||||
 | 
			
		||||
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
 | 
			
		||||
## Valid options are systemd or cgroupfs, default is systemd
 | 
			
		||||
# docker_cgroup_driver: systemd
 | 
			
		||||
 | 
			
		||||
## Only set this if you have more than 3 nameservers:
 | 
			
		||||
## If true Kubespray will only use the first 3, otherwise it will fail
 | 
			
		||||
docker_dns_servers_strict: false
 | 
			
		||||
 | 
			
		||||
# Path used to store Docker data
 | 
			
		||||
docker_daemon_graph: "/var/lib/docker"
 | 
			
		||||
 | 
			
		||||
## Used to set docker daemon iptables options to true
 | 
			
		||||
docker_iptables_enabled: "false"
 | 
			
		||||
 | 
			
		||||
# Docker log options
 | 
			
		||||
# Rotate container stderr/stdout logs at 50m and keep last 5
 | 
			
		||||
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
 | 
			
		||||
 | 
			
		||||
# define docker bin_dir
 | 
			
		||||
docker_bin_dir: "/usr/bin"
 | 
			
		||||
 | 
			
		||||
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
 | 
			
		||||
# kubespray deletes the docker package on each run, so caching the package makes sense
 | 
			
		||||
docker_rpm_keepcache: 1
 | 
			
		||||
 | 
			
		||||
## An obvious use case is allowing insecure-registry access to self hosted registries.
 | 
			
		||||
## Can be ipaddress and domain_name.
 | 
			
		||||
## example define 172.19.16.11 or mirror.registry.io
 | 
			
		||||
# docker_insecure_registries:
 | 
			
		||||
#   - mirror.registry.io
 | 
			
		||||
#   - 172.19.16.11
 | 
			
		||||
 | 
			
		||||
## Add other registry,example China registry mirror.
 | 
			
		||||
# docker_registry_mirrors:
 | 
			
		||||
#   - https://registry.docker-cn.com
 | 
			
		||||
#   - https://mirror.aliyuncs.com
 | 
			
		||||
 | 
			
		||||
## If non-empty will override default system MountFlags value.
 | 
			
		||||
## This option takes a mount propagation flag: shared, slave
 | 
			
		||||
## or private, which control whether mounts in the file system
 | 
			
		||||
## namespace set up for docker will receive or propagate mounts
 | 
			
		||||
## and unmounts. Leave empty for system default
 | 
			
		||||
# docker_mount_flags:
 | 
			
		||||
 | 
			
		||||
## A string of extra options to pass to the docker daemon.
 | 
			
		||||
## This string should be exactly as you wish it to appear.
 | 
			
		||||
# docker_options: ""
 | 
			
		||||
							
								
								
									
										10
									
								
								kubespray_inventory/cloud/group_vars/all/gcp.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								kubespray_inventory/cloud/group_vars/all/gcp.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
## GCP compute Persistent Disk CSI Driver credentials and parameters
 | 
			
		||||
## See docs/gcp-pd-csi.md for information about the implementation
 | 
			
		||||
 | 
			
		||||
## Specify the path to the file containing the service account credentials
 | 
			
		||||
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
 | 
			
		||||
 | 
			
		||||
## To enable GCP Persistent Disk CSI driver, uncomment below
 | 
			
		||||
# gcp_pd_csi_enabled: true
 | 
			
		||||
# gcp_pd_csi_controller_replicas: 1
 | 
			
		||||
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
 | 
			
		||||
							
								
								
									
										28
									
								
								kubespray_inventory/cloud/group_vars/all/oci.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								kubespray_inventory/cloud/group_vars/all/oci.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
			
		||||
## When Oracle Cloud Infrastructure is used, set these variables
 | 
			
		||||
# oci_private_key:
 | 
			
		||||
# oci_region_id:
 | 
			
		||||
# oci_tenancy_id:
 | 
			
		||||
# oci_user_id:
 | 
			
		||||
# oci_user_fingerprint:
 | 
			
		||||
# oci_compartment_id:
 | 
			
		||||
# oci_vnc_id:
 | 
			
		||||
# oci_subnet1_id:
 | 
			
		||||
# oci_subnet2_id:
 | 
			
		||||
## Override these default/optional behaviors if you wish
 | 
			
		||||
# oci_security_list_management: All
 | 
			
		||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
 | 
			
		||||
# oci_security_lists:
 | 
			
		||||
#   ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
 | 
			
		||||
#   ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
 | 
			
		||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
 | 
			
		||||
# oci_use_instance_principals: false
 | 
			
		||||
# oci_cloud_controller_version: 0.6.0
 | 
			
		||||
## If you would like to control OCI query rate limits for the controller
 | 
			
		||||
# oci_rate_limit:
 | 
			
		||||
#   rate_limit_qps_read:
 | 
			
		||||
#   rate_limit_qps_write:
 | 
			
		||||
#   rate_limit_bucket_read:
 | 
			
		||||
#   rate_limit_bucket_write:
 | 
			
		||||
## Other optional variables
 | 
			
		||||
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
 | 
			
		||||
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
 | 
			
		||||
							
								
								
									
										84
									
								
								kubespray_inventory/cloud/group_vars/all/offline.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								kubespray_inventory/cloud/group_vars/all/offline.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,84 @@
 | 
			
		||||
---
 | 
			
		||||
## Global Offline settings
 | 
			
		||||
### Private Container Image Registry
 | 
			
		||||
# registry_host: "myprivateregisry.com"
 | 
			
		||||
# files_repo: "http://myprivatehttpd"
 | 
			
		||||
### If using CentOS, RedHat, AlmaLinux or Fedora
 | 
			
		||||
# yum_repo: "http://myinternalyumrepo"
 | 
			
		||||
### If using Debian
 | 
			
		||||
# debian_repo: "http://myinternaldebianrepo"
 | 
			
		||||
### If using Ubuntu
 | 
			
		||||
# ubuntu_repo: "http://myinternalubunturepo"
 | 
			
		||||
 | 
			
		||||
## Container Registry overrides
 | 
			
		||||
# kube_image_repo: "{{ registry_host }}"
 | 
			
		||||
# gcr_image_repo: "{{ registry_host }}"
 | 
			
		||||
# github_image_repo: "{{ registry_host }}"
 | 
			
		||||
# docker_image_repo: "{{ registry_host }}"
 | 
			
		||||
# quay_image_repo: "{{ registry_host }}"
 | 
			
		||||
 | 
			
		||||
## Kubernetes components
 | 
			
		||||
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
 | 
			
		||||
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
 | 
			
		||||
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
 | 
			
		||||
 | 
			
		||||
## CNI Plugins
 | 
			
		||||
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
 | 
			
		||||
 | 
			
		||||
## cri-tools
 | 
			
		||||
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
 | 
			
		||||
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] Calico: If using Calico network plugin
 | 
			
		||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
 | 
			
		||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
 | 
			
		||||
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] helm: only if you set helm_enabled: true
 | 
			
		||||
# helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] crun: only if you set crun_enabled: true
 | 
			
		||||
# crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
 | 
			
		||||
 | 
			
		||||
# [Optional] kata: only if you set kata_containers_enabled: true
 | 
			
		||||
# kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
 | 
			
		||||
 | 
			
		||||
# [Optional] cri-o: only if you set container_manager: crio
 | 
			
		||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
 | 
			
		||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
 | 
			
		||||
 | 
			
		||||
## CentOS/Redhat/AlmaLinux
 | 
			
		||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
 | 
			
		||||
### By default we enable those repo automatically
 | 
			
		||||
# rhel_enable_repos: false
 | 
			
		||||
### Docker / Containerd
 | 
			
		||||
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
 | 
			
		||||
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
 | 
			
		||||
## Fedora
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
 | 
			
		||||
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
 | 
			
		||||
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
 | 
			
		||||
## Debian
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
 | 
			
		||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
 | 
			
		||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
 | 
			
		||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
 | 
			
		||||
 | 
			
		||||
## Ubuntu
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
 | 
			
		||||
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
 | 
			
		||||
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
 | 
			
		||||
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
 | 
			
		||||
							
								
								
									
										49
									
								
								kubespray_inventory/cloud/group_vars/all/openstack.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								kubespray_inventory/cloud/group_vars/all/openstack.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
 | 
			
		||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
 | 
			
		||||
# openstack_blockstorage_ignore_volume_az: yes
 | 
			
		||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
 | 
			
		||||
# openstack_lbaas_enabled: True
 | 
			
		||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
 | 
			
		||||
## To enable automatic floating ip provisioning, specify a subnet.
 | 
			
		||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
 | 
			
		||||
## Override default LBaaS behavior
 | 
			
		||||
# openstack_lbaas_use_octavia: False
 | 
			
		||||
# openstack_lbaas_method: "ROUND_ROBIN"
 | 
			
		||||
# openstack_lbaas_provider: "haproxy"
 | 
			
		||||
# openstack_lbaas_create_monitor: "yes"
 | 
			
		||||
# openstack_lbaas_monitor_delay: "1m"
 | 
			
		||||
# openstack_lbaas_monitor_timeout: "30s"
 | 
			
		||||
# openstack_lbaas_monitor_max_retries: "3"
 | 
			
		||||
 | 
			
		||||
## Values for the external OpenStack Cloud Controller
 | 
			
		||||
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
 | 
			
		||||
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
 | 
			
		||||
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
 | 
			
		||||
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
 | 
			
		||||
# external_openstack_lbaas_method: "ROUND_ROBIN"
 | 
			
		||||
# external_openstack_lbaas_provider: "octavia"
 | 
			
		||||
# external_openstack_lbaas_create_monitor: false
 | 
			
		||||
# external_openstack_lbaas_monitor_delay: "1m"
 | 
			
		||||
# external_openstack_lbaas_monitor_timeout: "30s"
 | 
			
		||||
# external_openstack_lbaas_monitor_max_retries: "3"
 | 
			
		||||
# external_openstack_lbaas_manage_security_groups: false
 | 
			
		||||
# external_openstack_lbaas_internal_lb: false
 | 
			
		||||
# external_openstack_network_ipv6_disabled: false
 | 
			
		||||
# external_openstack_network_internal_networks: []
 | 
			
		||||
# external_openstack_network_public_networks: []
 | 
			
		||||
# external_openstack_metadata_search_order: "configDrive,metadataService"
 | 
			
		||||
 | 
			
		||||
## Application credentials to authenticate against Keystone API
 | 
			
		||||
## Those settings will take precedence over username and password that might be set your environment
 | 
			
		||||
## All of them are required
 | 
			
		||||
# external_openstack_application_credential_name:
 | 
			
		||||
# external_openstack_application_credential_id:
 | 
			
		||||
# external_openstack_application_credential_secret:
 | 
			
		||||
 | 
			
		||||
## The tag of the external OpenStack Cloud Controller image
 | 
			
		||||
# external_openstack_cloud_controller_image_tag: "latest"
 | 
			
		||||
 | 
			
		||||
## To use Cinder CSI plugin to provision volumes set this value to true
 | 
			
		||||
## Make sure to source in the openstack credentials
 | 
			
		||||
# cinder_csi_enabled: true
 | 
			
		||||
# cinder_csi_controller_replicas: 1
 | 
			
		||||
							
								
								
									
										32
									
								
								kubespray_inventory/cloud/group_vars/all/vsphere.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								kubespray_inventory/cloud/group_vars/all/vsphere.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
## Values for the external vSphere Cloud Provider
 | 
			
		||||
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
 | 
			
		||||
# external_vsphere_vcenter_port: "443"
 | 
			
		||||
# external_vsphere_insecure: "true"
 | 
			
		||||
# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
 | 
			
		||||
# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
 | 
			
		||||
# external_vsphere_datacenter: "DATACENTER_name"
 | 
			
		||||
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
 | 
			
		||||
 | 
			
		||||
## Vsphere version where located VMs
 | 
			
		||||
# external_vsphere_version: "6.7u3"
 | 
			
		||||
 | 
			
		||||
## Tags for the external vSphere Cloud Provider images
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/cpi/release/manager
 | 
			
		||||
# external_vsphere_cloud_controller_image_tag: "latest"
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/csi/release/syncer
 | 
			
		||||
# vsphere_syncer_image_tag: "v2.2.1"
 | 
			
		||||
## quay.io/k8scsi/csi-attacher
 | 
			
		||||
# vsphere_csi_attacher_image_tag: "v3.1.0"
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/csi/release/driver
 | 
			
		||||
# vsphere_csi_controller: "v2.2.1"
 | 
			
		||||
## quay.io/k8scsi/livenessprobe
 | 
			
		||||
# vsphere_csi_liveness_probe_image_tag: "v2.2.0"
 | 
			
		||||
## quay.io/k8scsi/csi-provisioner
 | 
			
		||||
# vsphere_csi_provisioner_image_tag: "v2.1.0"
 | 
			
		||||
## quay.io/k8scsi/csi-resizer
 | 
			
		||||
## makes sense only for vSphere version >=7.0
 | 
			
		||||
# vsphere_csi_resizer_tag: "v1.1.0"
 | 
			
		||||
 | 
			
		||||
## To use vSphere CSI plugin to provision volumes set this value to true
 | 
			
		||||
# vsphere_csi_enabled: true
 | 
			
		||||
# vsphere_csi_controller_replicas: 1
 | 
			
		||||
							
								
								
									
										22
									
								
								kubespray_inventory/cloud/group_vars/etcd.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								kubespray_inventory/cloud/group_vars/etcd.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
---
 | 
			
		||||
## Etcd auto compaction retention for mvcc key value store in hour
 | 
			
		||||
# etcd_compaction_retention: 0
 | 
			
		||||
 | 
			
		||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
 | 
			
		||||
# etcd_metrics: basic
 | 
			
		||||
 | 
			
		||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
 | 
			
		||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
 | 
			
		||||
# etcd_memory_limit: "512M"
 | 
			
		||||
 | 
			
		||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
 | 
			
		||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
 | 
			
		||||
## etcd documentation for more information.
 | 
			
		||||
# etcd_quota_backend_bytes: "2147483648"
 | 
			
		||||
 | 
			
		||||
### ETCD: disable peer client cert authentication.
 | 
			
		||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
 | 
			
		||||
# etcd_peer_client_auth: true
 | 
			
		||||
 | 
			
		||||
## Settings for etcd deployment type
 | 
			
		||||
etcd_deployment_type: host
 | 
			
		||||
							
								
								
									
										187
									
								
								kubespray_inventory/cloud/group_vars/k8s_cluster/addons.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										187
									
								
								kubespray_inventory/cloud/group_vars/k8s_cluster/addons.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,187 @@
 | 
			
		||||
---
 | 
			
		||||
# Kubernetes dashboard
 | 
			
		||||
# RBAC required. see docs/getting-started.md for access details.
 | 
			
		||||
# dashboard_enabled: false
 | 
			
		||||
 | 
			
		||||
# Helm deployment
 | 
			
		||||
helm_enabled: false
 | 
			
		||||
 | 
			
		||||
# Registry deployment
 | 
			
		||||
registry_enabled: false
 | 
			
		||||
# registry_namespace: kube-system
 | 
			
		||||
# registry_storage_class: ""
 | 
			
		||||
# registry_disk_size: "10Gi"
 | 
			
		||||
 | 
			
		||||
# Metrics Server deployment
 | 
			
		||||
metrics_server_enabled: false
 | 
			
		||||
# metrics_server_resizer: false
 | 
			
		||||
# metrics_server_kubelet_insecure_tls: true
 | 
			
		||||
# metrics_server_metric_resolution: 15s
 | 
			
		||||
# metrics_server_kubelet_preferred_address_types: "InternalIP"
 | 
			
		||||
 | 
			
		||||
# Rancher Local Path Provisioner
 | 
			
		||||
local_path_provisioner_enabled: false
 | 
			
		||||
# local_path_provisioner_namespace: "local-path-storage"
 | 
			
		||||
# local_path_provisioner_storage_class: "local-path"
 | 
			
		||||
# local_path_provisioner_reclaim_policy: Delete
 | 
			
		||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
 | 
			
		||||
# local_path_provisioner_debug: false
 | 
			
		||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
 | 
			
		||||
# local_path_provisioner_image_tag: "v0.0.19"
 | 
			
		||||
# local_path_provisioner_helper_image_repo: "busybox"
 | 
			
		||||
# local_path_provisioner_helper_image_tag: "latest"
 | 
			
		||||
 | 
			
		||||
# Local volume provisioner deployment
 | 
			
		||||
local_volume_provisioner_enabled: false
 | 
			
		||||
# local_volume_provisioner_namespace: kube-system
 | 
			
		||||
# local_volume_provisioner_nodelabels:
 | 
			
		||||
#   - kubernetes.io/hostname
 | 
			
		||||
#   - topology.kubernetes.io/region
 | 
			
		||||
#   - topology.kubernetes.io/zone
 | 
			
		||||
# local_volume_provisioner_storage_classes:
 | 
			
		||||
#   local-storage:
 | 
			
		||||
#     host_dir: /mnt/disks
 | 
			
		||||
#     mount_dir: /mnt/disks
 | 
			
		||||
#     volume_mode: Filesystem
 | 
			
		||||
#     fs_type: ext4
 | 
			
		||||
#   fast-disks:
 | 
			
		||||
#     host_dir: /mnt/fast-disks
 | 
			
		||||
#     mount_dir: /mnt/fast-disks
 | 
			
		||||
#     block_cleaner_command:
 | 
			
		||||
#       - "/scripts/shred.sh"
 | 
			
		||||
#       - "2"
 | 
			
		||||
#     volume_mode: Filesystem
 | 
			
		||||
#     fs_type: ext4
 | 
			
		||||
 | 
			
		||||
# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots
 | 
			
		||||
# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller
 | 
			
		||||
# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray
 | 
			
		||||
# csi_snapshot_controller_enabled: false
 | 
			
		||||
 | 
			
		||||
# CephFS provisioner deployment
 | 
			
		||||
cephfs_provisioner_enabled: false
 | 
			
		||||
# cephfs_provisioner_namespace: "cephfs-provisioner"
 | 
			
		||||
# cephfs_provisioner_cluster: ceph
 | 
			
		||||
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
 | 
			
		||||
# cephfs_provisioner_admin_id: admin
 | 
			
		||||
# cephfs_provisioner_secret: secret
 | 
			
		||||
# cephfs_provisioner_storage_class: cephfs
 | 
			
		||||
# cephfs_provisioner_reclaim_policy: Delete
 | 
			
		||||
# cephfs_provisioner_claim_root: /volumes
 | 
			
		||||
# cephfs_provisioner_deterministic_names: true
 | 
			
		||||
 | 
			
		||||
# RBD provisioner deployment
 | 
			
		||||
rbd_provisioner_enabled: false
 | 
			
		||||
# rbd_provisioner_namespace: rbd-provisioner
 | 
			
		||||
# rbd_provisioner_replicas: 2
 | 
			
		||||
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
 | 
			
		||||
# rbd_provisioner_pool: kube
 | 
			
		||||
# rbd_provisioner_admin_id: admin
 | 
			
		||||
# rbd_provisioner_secret_name: ceph-secret-admin
 | 
			
		||||
# rbd_provisioner_secret: ceph-key-admin
 | 
			
		||||
# rbd_provisioner_user_id: kube
 | 
			
		||||
# rbd_provisioner_user_secret_name: ceph-secret-user
 | 
			
		||||
# rbd_provisioner_user_secret: ceph-key-user
 | 
			
		||||
# rbd_provisioner_user_secret_namespace: rbd-provisioner
 | 
			
		||||
# rbd_provisioner_fs_type: ext4
 | 
			
		||||
# rbd_provisioner_image_format: "2"
 | 
			
		||||
# rbd_provisioner_image_features: layering
 | 
			
		||||
# rbd_provisioner_storage_class: rbd
 | 
			
		||||
# rbd_provisioner_reclaim_policy: Delete
 | 
			
		||||
 | 
			
		||||
# Nginx ingress controller deployment
 | 
			
		||||
ingress_nginx_enabled: false
 | 
			
		||||
# ingress_nginx_host_network: false
 | 
			
		||||
ingress_publish_status_address: ""
 | 
			
		||||
# ingress_nginx_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# ingress_nginx_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# ingress_nginx_namespace: "ingress-nginx"
 | 
			
		||||
# ingress_nginx_insecure_port: 80
 | 
			
		||||
# ingress_nginx_secure_port: 443
 | 
			
		||||
# ingress_nginx_configmap:
 | 
			
		||||
#   map-hash-bucket-size: "128"
 | 
			
		||||
#   ssl-protocols: "TLSv1.2 TLSv1.3"
 | 
			
		||||
# ingress_nginx_configmap_tcp_services:
 | 
			
		||||
#   9000: "default/example-go:8080"
 | 
			
		||||
# ingress_nginx_configmap_udp_services:
 | 
			
		||||
#   53: "kube-system/coredns:53"
 | 
			
		||||
# ingress_nginx_extra_args:
 | 
			
		||||
#   - --default-ssl-certificate=default/foo-tls
 | 
			
		||||
# ingress_nginx_class: nginx
 | 
			
		||||
 | 
			
		||||
# ambassador ingress controller deployment
 | 
			
		||||
ingress_ambassador_enabled: false
 | 
			
		||||
# ingress_ambassador_namespace: "ambassador"
 | 
			
		||||
# ingress_ambassador_version: "*"
 | 
			
		||||
# ingress_ambassador_multi_namespaces: false
 | 
			
		||||
 | 
			
		||||
# ALB ingress controller deployment
 | 
			
		||||
ingress_alb_enabled: false
 | 
			
		||||
# alb_ingress_aws_region: "us-east-1"
 | 
			
		||||
# alb_ingress_restrict_scheme: "false"
 | 
			
		||||
# Enables logging on all outbound requests sent to the AWS API.
 | 
			
		||||
# If logging is desired, set to true.
 | 
			
		||||
# alb_ingress_aws_debug: "false"
 | 
			
		||||
 | 
			
		||||
# Cert manager deployment
 | 
			
		||||
cert_manager_enabled: false
 | 
			
		||||
# cert_manager_namespace: "cert-manager"
 | 
			
		||||
 | 
			
		||||
# MetalLB deployment
 | 
			
		||||
metallb_enabled: false
 | 
			
		||||
metallb_speaker_enabled: true
 | 
			
		||||
# metallb_ip_range:
 | 
			
		||||
#   - "10.5.0.50-10.5.0.99"
 | 
			
		||||
# metallb_speaker_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# metallb_controller_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# metallb_speaker_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# metallb_controller_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# metallb_version: v0.10.2
 | 
			
		||||
# metallb_protocol: "layer2"
 | 
			
		||||
# metallb_port: "7472"
 | 
			
		||||
# metallb_memberlist_port: "7946"
 | 
			
		||||
# metallb_additional_address_pools:
 | 
			
		||||
#   kube_service_pool:
 | 
			
		||||
#     ip_range:
 | 
			
		||||
#       - "10.5.1.50-10.5.1.99"
 | 
			
		||||
#     protocol: "layer2"
 | 
			
		||||
#     auto_assign: false
 | 
			
		||||
# metallb_protocol: "bgp"
 | 
			
		||||
# metallb_peers:
 | 
			
		||||
#   - peer_address: 192.0.2.1
 | 
			
		||||
#     peer_asn: 64512
 | 
			
		||||
#     my_asn: 4200000000
 | 
			
		||||
#   - peer_address: 192.0.2.2
 | 
			
		||||
#     peer_asn: 64513
 | 
			
		||||
#     my_asn: 4200000000
 | 
			
		||||
 | 
			
		||||
# The plugin manager for kubectl
 | 
			
		||||
krew_enabled: false
 | 
			
		||||
krew_root_dir: "/usr/local/krew"
 | 
			
		||||
							
								
								
									
										318
									
								
								kubespray_inventory/cloud/group_vars/k8s_cluster/k8s-cluster.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										318
									
								
								kubespray_inventory/cloud/group_vars/k8s_cluster/k8s-cluster.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,318 @@
 | 
			
		||||
---
 | 
			
		||||
# Kubernetes configuration dirs and system namespace.
 | 
			
		||||
# Those are where all the additional config stuff goes
 | 
			
		||||
# the kubernetes normally puts in /srv/kubernetes.
 | 
			
		||||
# This puts them in a same location and namespace.
 | 
			
		||||
# Editing those values will almost surely break something.
 | 
			
		||||
kube_config_dir: /etc/kubernetes
 | 
			
		||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 | 
			
		||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
 | 
			
		||||
 | 
			
		||||
# This is where all the cert scripts and certs will be located
 | 
			
		||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
 | 
			
		||||
 | 
			
		||||
# This is where all of the bearer tokens will be stored
 | 
			
		||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
 | 
			
		||||
 | 
			
		||||
kube_api_anonymous_auth: true
 | 
			
		||||
 | 
			
		||||
## Change this to use another Kubernetes version, e.g. a current beta release
 | 
			
		||||
kube_version: v1.21.6
 | 
			
		||||
 | 
			
		||||
# Where the binaries will be downloaded.
 | 
			
		||||
# Note: ensure that you've enough disk space (about 1G)
 | 
			
		||||
local_release_dir: "/tmp/releases"
 | 
			
		||||
# Random shifts for retrying failed ops like pushing/downloading
 | 
			
		||||
retry_stagger: 5
 | 
			
		||||
 | 
			
		||||
# This is the group that the cert creation scripts chgrp the
 | 
			
		||||
# cert files to. Not really changeable...
 | 
			
		||||
kube_cert_group: kube-cert
 | 
			
		||||
 | 
			
		||||
# Cluster Loglevel configuration
 | 
			
		||||
kube_log_level: 2
 | 
			
		||||
 | 
			
		||||
# Directory where credentials will be stored
 | 
			
		||||
credentials_dir: "{{ inventory_dir }}/credentials"
 | 
			
		||||
 | 
			
		||||
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
 | 
			
		||||
# kube_oidc_auth: false
 | 
			
		||||
# kube_token_auth: false
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
 | 
			
		||||
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
 | 
			
		||||
 | 
			
		||||
# kube_oidc_url: https:// ...
 | 
			
		||||
# kube_oidc_client_id: kubernetes
 | 
			
		||||
## Optional settings for OIDC
 | 
			
		||||
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
 | 
			
		||||
# kube_oidc_username_claim: sub
 | 
			
		||||
# kube_oidc_username_prefix: 'oidc:'
 | 
			
		||||
# kube_oidc_groups_claim: groups
 | 
			
		||||
# kube_oidc_groups_prefix: 'oidc:'
 | 
			
		||||
 | 
			
		||||
## Variables to control webhook authn/authz
 | 
			
		||||
# kube_webhook_token_auth: false
 | 
			
		||||
# kube_webhook_token_auth_url: https://...
 | 
			
		||||
# kube_webhook_token_auth_url_skip_tls_verify: false
 | 
			
		||||
 | 
			
		||||
## For webhook authorization, authorization_modes must include Webhook
 | 
			
		||||
# kube_webhook_authorization: false
 | 
			
		||||
# kube_webhook_authorization_url: https://...
 | 
			
		||||
# kube_webhook_authorization_url_skip_tls_verify: false
 | 
			
		||||
 | 
			
		||||
# Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
 | 
			
		||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
 | 
			
		||||
kube_network_plugin: weave
 | 
			
		||||
 | 
			
		||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
 | 
			
		||||
kube_network_plugin_multus: false
 | 
			
		||||
 | 
			
		||||
# Kubernetes internal network for services, unused block of space.
 | 
			
		||||
kube_service_addresses: 10.233.0.0/18
 | 
			
		||||
 | 
			
		||||
# internal network. When used, it will assign IP
 | 
			
		||||
# addresses from this range to individual pods.
 | 
			
		||||
# This network must be unused in your network infrastructure!
 | 
			
		||||
kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
 | 
			
		||||
# internal network node size allocation (optional). This is the size allocated
 | 
			
		||||
# to each node for pod IP address allocation. Note that the number of pods per node is
 | 
			
		||||
# also limited by the kubelet_max_pods variable which defaults to 110.
 | 
			
		||||
#
 | 
			
		||||
# Example:
 | 
			
		||||
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
 | 
			
		||||
#  - kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
#  - kube_network_node_prefix: 24
 | 
			
		||||
#  - kubelet_max_pods: 110
 | 
			
		||||
#
 | 
			
		||||
# Example:
 | 
			
		||||
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
 | 
			
		||||
#  - kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
#  - kube_network_node_prefix: 25
 | 
			
		||||
#  - kubelet_max_pods: 110
 | 
			
		||||
kube_network_node_prefix: 24
 | 
			
		||||
 | 
			
		||||
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
 | 
			
		||||
enable_dual_stack_networks: false
 | 
			
		||||
 | 
			
		||||
# Kubernetes internal network for IPv6 services, unused block of space.
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true
 | 
			
		||||
# This provides 4096 IPv6 IPs
 | 
			
		||||
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
 | 
			
		||||
 | 
			
		||||
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
 | 
			
		||||
# This network must not already be in your network infrastructure!
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true.
 | 
			
		||||
# This provides room for 256 nodes with 254 pods per node.
 | 
			
		||||
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 | 
			
		||||
 | 
			
		||||
# IPv6 subnet size allocated to each for pods.
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true
 | 
			
		||||
# This provides room for 254 pods per node.
 | 
			
		||||
kube_network_node_prefix_ipv6: 120
 | 
			
		||||
 | 
			
		||||
# The port the API Server will be listening on.
 | 
			
		||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
 | 
			
		||||
kube_apiserver_port: 6443  # (https)
 | 
			
		||||
# kube_apiserver_insecure_port: 8080  # (http)
 | 
			
		||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
 | 
			
		||||
kube_apiserver_insecure_port: 0  # (disabled)
 | 
			
		||||
 | 
			
		||||
# Kube-proxy proxyMode configuration.
 | 
			
		||||
# Can be ipvs, iptables
 | 
			
		||||
kube_proxy_mode: ipvs
 | 
			
		||||
 | 
			
		||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
 | 
			
		||||
# must be set to true for MetalLB to work
 | 
			
		||||
kube_proxy_strict_arp: false
 | 
			
		||||
 | 
			
		||||
# A string slice of values which specify the addresses to use for NodePorts.
 | 
			
		||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
 | 
			
		||||
# The default empty string slice ([]) means to use all local addresses.
 | 
			
		||||
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
 | 
			
		||||
kube_proxy_nodeport_addresses: >-
 | 
			
		||||
  {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
 | 
			
		||||
  [{{ kube_proxy_nodeport_addresses_cidr }}]
 | 
			
		||||
  {%- else -%}
 | 
			
		||||
  []
 | 
			
		||||
  {%- endif -%}
 | 
			
		||||
 | 
			
		||||
# If non-empty, will use this string as identification instead of the actual hostname
 | 
			
		||||
# kube_override_hostname: >-
 | 
			
		||||
#   {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
 | 
			
		||||
#   {%- else -%}
 | 
			
		||||
#   {{ inventory_hostname }}
 | 
			
		||||
#   {%- endif -%}
 | 
			
		||||
 | 
			
		||||
## Encrypting Secret Data at Rest (experimental)
 | 
			
		||||
kube_encrypt_secret_data: false
 | 
			
		||||
 | 
			
		||||
# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
 | 
			
		||||
# kubelet_shutdown_grace_period: 60s
 | 
			
		||||
# kubelet_shutdown_grace_period_critical_pods: 20s
 | 
			
		||||
 | 
			
		||||
# DNS configuration.
 | 
			
		||||
# Kubernetes cluster name, also will be used as DNS domain
 | 
			
		||||
cluster_name: cloud.local
 | 
			
		||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
 | 
			
		||||
ndots: 2
 | 
			
		||||
# Can be coredns, coredns_dual, manual or none
 | 
			
		||||
dns_mode: coredns
 | 
			
		||||
# Set manual server if using a custom cluster DNS server
 | 
			
		||||
# manual_dns_server: 10.x.x.x
 | 
			
		||||
# Enable nodelocal dns cache
 | 
			
		||||
enable_nodelocaldns: true
 | 
			
		||||
nodelocaldns_ip: 169.254.25.10
 | 
			
		||||
nodelocaldns_health_port: 9254
 | 
			
		||||
nodelocaldns_bind_metrics_host_ip: false
 | 
			
		||||
# nodelocaldns_external_zones:
 | 
			
		||||
# - zones:
 | 
			
		||||
#   - example.com
 | 
			
		||||
#   - example.io:1053
 | 
			
		||||
#   nameservers:
 | 
			
		||||
#   - 1.1.1.1
 | 
			
		||||
#   - 2.2.2.2
 | 
			
		||||
#   cache: 5
 | 
			
		||||
# - zones:
 | 
			
		||||
#   - https://mycompany.local:4453
 | 
			
		||||
#   nameservers:
 | 
			
		||||
#   - 192.168.0.53
 | 
			
		||||
#   cache: 0
 | 
			
		||||
# Enable k8s_external plugin for CoreDNS
 | 
			
		||||
enable_coredns_k8s_external: false
 | 
			
		||||
coredns_k8s_external_zone: k8s_external.local
 | 
			
		||||
# Enable endpoint_pod_names option for kubernetes plugin
 | 
			
		||||
enable_coredns_k8s_endpoint_pod_names: false
 | 
			
		||||
 | 
			
		||||
# Can be docker_dns, host_resolvconf or none
 | 
			
		||||
resolvconf_mode: docker_dns
 | 
			
		||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
 | 
			
		||||
deploy_netchecker: false
 | 
			
		||||
# Ip address of the kubernetes skydns service
 | 
			
		||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
 | 
			
		||||
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
 | 
			
		||||
dns_domain: "{{ cluster_name }}"
 | 
			
		||||
 | 
			
		||||
## Container runtime
 | 
			
		||||
## docker for docker, crio for cri-o and containerd for containerd.
 | 
			
		||||
container_manager: containerd
 | 
			
		||||
 | 
			
		||||
# Additional container runtimes
 | 
			
		||||
kata_containers_enabled: false
 | 
			
		||||
 | 
			
		||||
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
 | 
			
		||||
 | 
			
		||||
# K8s image pull policy (imagePullPolicy)
 | 
			
		||||
k8s_image_pull_policy: IfNotPresent
 | 
			
		||||
 | 
			
		||||
# audit log for kubernetes
 | 
			
		||||
kubernetes_audit: false
 | 
			
		||||
 | 
			
		||||
# dynamic kubelet configuration
 | 
			
		||||
# Note: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA.
 | 
			
		||||
# It is planned to be removed from Kubernetes in the version 1.23.
 | 
			
		||||
# Please use alternative ways to update kubelet configuration.
 | 
			
		||||
dynamic_kubelet_configuration: false
 | 
			
		||||
 | 
			
		||||
# define kubelet config dir for dynamic kubelet
 | 
			
		||||
# kubelet_config_dir:
 | 
			
		||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
 | 
			
		||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
 | 
			
		||||
 | 
			
		||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
 | 
			
		||||
podsecuritypolicy_enabled: false
 | 
			
		||||
 | 
			
		||||
# Custom PodSecurityPolicySpec for restricted policy
 | 
			
		||||
# podsecuritypolicy_restricted_spec: {}
 | 
			
		||||
 | 
			
		||||
# Custom PodSecurityPolicySpec for privileged policy
 | 
			
		||||
# podsecuritypolicy_privileged_spec: {}
 | 
			
		||||
 | 
			
		||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 | 
			
		||||
# kubeconfig_localhost: false
 | 
			
		||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 | 
			
		||||
# kubectl_localhost: false
 | 
			
		||||
 | 
			
		||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
 | 
			
		||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
 | 
			
		||||
# kubelet_enforce_node_allocatable: pods
 | 
			
		||||
 | 
			
		||||
## Optionally reserve resources for OS system daemons.
 | 
			
		||||
# system_reserved: true
 | 
			
		||||
## Uncomment to override default values
 | 
			
		||||
# system_memory_reserved: 512Mi
 | 
			
		||||
# system_cpu_reserved: 500m
 | 
			
		||||
## Reservation for master hosts
 | 
			
		||||
# system_master_memory_reserved: 256Mi
 | 
			
		||||
# system_master_cpu_reserved: 250m
 | 
			
		||||
 | 
			
		||||
# An alternative flexvolume plugin directory
 | 
			
		||||
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
 | 
			
		||||
 | 
			
		||||
## Supplementary addresses that can be added in kubernetes ssl keys.
 | 
			
		||||
## That can be useful for example to setup a keepalived virtual IP
 | 
			
		||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
 | 
			
		||||
 | 
			
		||||
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
 | 
			
		||||
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
 | 
			
		||||
## Set this variable to true to get rid of this issue
 | 
			
		||||
volume_cross_zone_attachment: false
 | 
			
		||||
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
 | 
			
		||||
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
 | 
			
		||||
persistent_volumes_enabled: false
 | 
			
		||||
 | 
			
		||||
## Container Engine Acceleration
 | 
			
		||||
## Enable container acceleration feature, for example use gpu acceleration in containers
 | 
			
		||||
# nvidia_accelerator_enabled: true
 | 
			
		||||
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
 | 
			
		||||
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
 | 
			
		||||
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
 | 
			
		||||
## Labels and taints won't be set to nodes if they are not in the array.
 | 
			
		||||
# nvidia_gpu_nodes:
 | 
			
		||||
#   - kube-gpu-001
 | 
			
		||||
# nvidia_driver_version: "384.111"
 | 
			
		||||
## flavor can be tesla or gtx
 | 
			
		||||
# nvidia_gpu_flavor: gtx
 | 
			
		||||
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
 | 
			
		||||
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
 | 
			
		||||
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
 | 
			
		||||
## NVIDIA GPU device plugin image.
 | 
			
		||||
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
 | 
			
		||||
 | 
			
		||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
 | 
			
		||||
# tls_min_version: ""
 | 
			
		||||
 | 
			
		||||
## Support tls cipher suites.
 | 
			
		||||
# tls_cipher_suites: {}
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_RC4_128_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_3DES_EDE_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_RSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_RSA_WITH_RC4_128_SHA
 | 
			
		||||
 | 
			
		||||
## Amount of time to retain events. (default 1h0m0s)
 | 
			
		||||
event_ttl_duration: "1h0m0s"
 | 
			
		||||
 | 
			
		||||
## Automatically renew K8S control plane certificates on first Monday of each month
 | 
			
		||||
auto_renew_certificates: false
 | 
			
		||||
# First Monday of each month
 | 
			
		||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
 | 
			
		||||
@@ -0,0 +1,109 @@
 | 
			
		||||
# see roles/network_plugin/calico/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
## With calico it is possible to distributed routes with border routers of the datacenter.
 | 
			
		||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
 | 
			
		||||
## The subnets of each nodes will be distributed by the datacenter router
 | 
			
		||||
# peer_with_router: false
 | 
			
		||||
 | 
			
		||||
# Enables Internet connectivity from containers
 | 
			
		||||
# nat_outgoing: true
 | 
			
		||||
 | 
			
		||||
# Enables Calico CNI "host-local" IPAM plugin
 | 
			
		||||
# calico_ipam_host_local: true
 | 
			
		||||
 | 
			
		||||
# add default ippool name
 | 
			
		||||
# calico_pool_name: "default-pool"
 | 
			
		||||
 | 
			
		||||
# add default ippool blockSize (defaults kube_network_node_prefix)
 | 
			
		||||
# calico_pool_blocksize: 24
 | 
			
		||||
 | 
			
		||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
 | 
			
		||||
# calico_pool_cidr: 1.2.3.4/5
 | 
			
		||||
 | 
			
		||||
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
 | 
			
		||||
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 | 
			
		||||
 | 
			
		||||
# Global as_num (/calico/bgp/v1/global/as_num)
 | 
			
		||||
# global_as_num: "64512"
 | 
			
		||||
 | 
			
		||||
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
 | 
			
		||||
# to be true.  All other cases, false.
 | 
			
		||||
# calico_no_global_as_num: false
 | 
			
		||||
 | 
			
		||||
# You can set MTU value here. If left undefined or empty, it will
 | 
			
		||||
# not be specified in calico CNI config, so Calico will use built-in
 | 
			
		||||
# defaults. The value should be a number, not a string.
 | 
			
		||||
# calico_mtu: 1500
 | 
			
		||||
 | 
			
		||||
# Configure the MTU to use for workload interfaces and tunnels.
 | 
			
		||||
# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440)
 | 
			
		||||
# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450)
 | 
			
		||||
# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480)
 | 
			
		||||
# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500)
 | 
			
		||||
# calico_veth_mtu: 1440
 | 
			
		||||
 | 
			
		||||
# Advertise Cluster IPs
 | 
			
		||||
# calico_advertise_cluster_ips: true
 | 
			
		||||
 | 
			
		||||
# Advertise Service External IPs
 | 
			
		||||
# calico_advertise_service_external_ips:
 | 
			
		||||
# - x.x.x.x/24
 | 
			
		||||
# - y.y.y.y/32
 | 
			
		||||
 | 
			
		||||
# Adveritse Service LoadBalancer IPs
 | 
			
		||||
# calico_advertise_service_loadbalancer_ips:
 | 
			
		||||
# - x.x.x.x/24
 | 
			
		||||
# - y.y.y.y/16
 | 
			
		||||
 | 
			
		||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
 | 
			
		||||
# calico_datastore: "kdd"
 | 
			
		||||
 | 
			
		||||
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
 | 
			
		||||
# calico_iptables_backend: "Legacy"
 | 
			
		||||
 | 
			
		||||
# Use typha (only with kdd)
 | 
			
		||||
# typha_enabled: false
 | 
			
		||||
 | 
			
		||||
# Generate TLS certs for secure typha<->calico-node communication
 | 
			
		||||
# typha_secure: false
 | 
			
		||||
 | 
			
		||||
# Scaling typha: 1 replica per 100 nodes is adequate
 | 
			
		||||
# Number of typha replicas
 | 
			
		||||
# typha_replicas: 1
 | 
			
		||||
 | 
			
		||||
# Set max typha connections
 | 
			
		||||
# typha_max_connections_lower_limit: 300
 | 
			
		||||
 | 
			
		||||
# Set calico network backend: "bird", "vxlan" or "none"
 | 
			
		||||
# bird enable BGP routing, required for ipip mode.
 | 
			
		||||
# calico_network_backend: bird
 | 
			
		||||
 | 
			
		||||
# IP in IP and VXLAN is mutualy exclusive modes.
 | 
			
		||||
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
 | 
			
		||||
# calico_ipip_mode: 'Always'
 | 
			
		||||
 | 
			
		||||
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
 | 
			
		||||
# calico_vxlan_mode: 'Never'
 | 
			
		||||
 | 
			
		||||
# set VXLAN port and VNI
 | 
			
		||||
# calico_vxlan_vni: 4096
 | 
			
		||||
# calico_vxlan_port: 4789
 | 
			
		||||
 | 
			
		||||
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
 | 
			
		||||
# * can-reach=DESTINATION
 | 
			
		||||
# * interface=INTERFACE-REGEX
 | 
			
		||||
# see https://docs.projectcalico.org/reference/node/configuration
 | 
			
		||||
# calico_ip_auto_method: "interface=eth.*"
 | 
			
		||||
# Choose the iptables insert mode for Calico: "Insert" or "Append".
 | 
			
		||||
# calico_felix_chaininsertmode: Insert
 | 
			
		||||
 | 
			
		||||
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
 | 
			
		||||
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
 | 
			
		||||
# calico_use_default_route_src_ipaddr: false
 | 
			
		||||
 | 
			
		||||
# Enable calico traffic encryption with wireguard
 | 
			
		||||
# calico_wireguard_enabled: false
 | 
			
		||||
 | 
			
		||||
# Under certain situations liveness and readiness probes may need tunning
 | 
			
		||||
# calico_node_livenessprobe_timeout: 10
 | 
			
		||||
# calico_node_readinessprobe_timeout: 10
 | 
			
		||||
@@ -0,0 +1,10 @@
 | 
			
		||||
# see roles/network_plugin/canal/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# The interface used by canal for host <-> host communication.
 | 
			
		||||
# If left blank, then the interface is choosing using the node's
 | 
			
		||||
# default route.
 | 
			
		||||
# canal_iface: ""
 | 
			
		||||
 | 
			
		||||
# Whether or not to masquerade traffic to destinations not within
 | 
			
		||||
# the pod network.
 | 
			
		||||
# canal_masquerade: "true"
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
# see roles/network_plugin/cilium/defaults/main.yml
 | 
			
		||||
@@ -0,0 +1,18 @@
 | 
			
		||||
# see roles/network_plugin/flannel/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
## interface that should be used for flannel operations
 | 
			
		||||
## This is actually an inventory cluster-level item
 | 
			
		||||
# flannel_interface:
 | 
			
		||||
 | 
			
		||||
## Select interface that should be used for flannel operations by regexp on Name or IP
 | 
			
		||||
## This is actually an inventory cluster-level item
 | 
			
		||||
## example: select interface with ip from net 10.0.0.0/23
 | 
			
		||||
## single quote and escape backslashes
 | 
			
		||||
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
 | 
			
		||||
 | 
			
		||||
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
 | 
			
		||||
# for experimental backend
 | 
			
		||||
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
 | 
			
		||||
# flannel_backend_type: "vxlan"
 | 
			
		||||
# flannel_vxlan_vni: 1
 | 
			
		||||
# flannel_vxlan_port: 8472
 | 
			
		||||
@@ -0,0 +1,61 @@
 | 
			
		||||
# See roles/network_plugin/kube-router//defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
 | 
			
		||||
# kube_router_run_router: true
 | 
			
		||||
 | 
			
		||||
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
 | 
			
		||||
# kube_router_run_firewall: true
 | 
			
		||||
 | 
			
		||||
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
 | 
			
		||||
# see docs/kube-router.md "Caveats" section
 | 
			
		||||
# kube_router_run_service_proxy: false
 | 
			
		||||
 | 
			
		||||
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
 | 
			
		||||
# kube_router_advertise_cluster_ip: false
 | 
			
		||||
 | 
			
		||||
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
 | 
			
		||||
# kube_router_advertise_external_ip: false
 | 
			
		||||
 | 
			
		||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
 | 
			
		||||
# kube_router_advertise_loadbalancer_ip: false
 | 
			
		||||
 | 
			
		||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
 | 
			
		||||
# kube_router_enable_dsr: false
 | 
			
		||||
 | 
			
		||||
# Array of arbitrary extra arguments to kube-router, see
 | 
			
		||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
 | 
			
		||||
# kube_router_extra_args: []
 | 
			
		||||
 | 
			
		||||
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
 | 
			
		||||
# kube_router_peer_router_asns: ~
 | 
			
		||||
 | 
			
		||||
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
 | 
			
		||||
# kube_router_peer_router_ips: ~
 | 
			
		||||
 | 
			
		||||
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
 | 
			
		||||
# kube_router_peer_router_ports: ~
 | 
			
		||||
 | 
			
		||||
# Setups node CNI to allow hairpin mode, requires node reboots, see
 | 
			
		||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
 | 
			
		||||
# kube_router_support_hairpin_mode: false
 | 
			
		||||
 | 
			
		||||
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
 | 
			
		||||
# kube_router_dns_policy: ClusterFirstWithHostNet
 | 
			
		||||
 | 
			
		||||
# Array of annotations for master
 | 
			
		||||
# kube_router_annotations_master: []
 | 
			
		||||
 | 
			
		||||
# Array of annotations for every node
 | 
			
		||||
# kube_router_annotations_node: []
 | 
			
		||||
 | 
			
		||||
# Array of common annotations for every node
 | 
			
		||||
# kube_router_annotations_all: []
 | 
			
		||||
 | 
			
		||||
# Enables scraping kube-router metrics with Prometheus
 | 
			
		||||
# kube_router_enable_metrics: false
 | 
			
		||||
 | 
			
		||||
# Path to serve Prometheus metrics on
 | 
			
		||||
# kube_router_metrics_path: /metrics
 | 
			
		||||
 | 
			
		||||
# Prometheus metrics port to use
 | 
			
		||||
# kube_router_metrics_port: 9255
 | 
			
		||||
@@ -0,0 +1,6 @@
 | 
			
		||||
---
 | 
			
		||||
# private interface, on a l2-network
 | 
			
		||||
macvlan_interface: "eth1"
 | 
			
		||||
 | 
			
		||||
# Enable nat in default gateway network interface
 | 
			
		||||
enable_nat_default_gateway: true
 | 
			
		||||
@@ -0,0 +1,61 @@
 | 
			
		||||
# see roles/network_plugin/weave/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# Weave's network password for encryption, if null then no network encryption.
 | 
			
		||||
# weave_password: ~
 | 
			
		||||
 | 
			
		||||
# If set to 1, disable checking for new Weave Net versions (default is blank,
 | 
			
		||||
# i.e. check is enabled)
 | 
			
		||||
# weave_checkpoint_disable: false
 | 
			
		||||
 | 
			
		||||
# Soft limit on the number of connections between peers. Defaults to 100.
 | 
			
		||||
# weave_conn_limit: 100
 | 
			
		||||
 | 
			
		||||
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
 | 
			
		||||
# for containers attached. If you need to disable hairpin, e.g. your kernel is
 | 
			
		||||
# one of those that can panic if hairpin is enabled, then you can disable it by
 | 
			
		||||
# setting `HAIRPIN_MODE=false`.
 | 
			
		||||
# weave_hairpin_mode: true
 | 
			
		||||
 | 
			
		||||
# The range of IP addresses used by Weave Net and the subnet they are placed in
 | 
			
		||||
# (CIDR format; default 10.32.0.0/12)
 | 
			
		||||
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
 | 
			
		||||
 | 
			
		||||
# Set to 0 to disable Network Policy Controller (default is on)
 | 
			
		||||
# weave_expect_npc: "{{ enable_network_policy }}"
 | 
			
		||||
 | 
			
		||||
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
 | 
			
		||||
# list from the api-server)
 | 
			
		||||
# weave_kube_peers: ~
 | 
			
		||||
 | 
			
		||||
# Set the initialization mode of the IP Address Manager (defaults to consensus
 | 
			
		||||
# amongst the KUBE_PEERS)
 | 
			
		||||
# weave_ipalloc_init: ~
 | 
			
		||||
 | 
			
		||||
# Set the IP address used as a gateway from the Weave network to the host
 | 
			
		||||
# network - this is useful if you are configuring the addon as a static pod.
 | 
			
		||||
# weave_expose_ip: ~
 | 
			
		||||
 | 
			
		||||
# Address and port that the Weave Net daemon will serve Prometheus-style
 | 
			
		||||
# metrics on (defaults to 0.0.0.0:6782)
 | 
			
		||||
# weave_metrics_addr: ~
 | 
			
		||||
 | 
			
		||||
# Address and port that the Weave Net daemon will serve status requests on
 | 
			
		||||
# (defaults to disabled)
 | 
			
		||||
# weave_status_addr: ~
 | 
			
		||||
 | 
			
		||||
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
 | 
			
		||||
# underlying network has a tighter limit, or set a larger size for better
 | 
			
		||||
# performance if your network supports jumbo frames (e.g. 8916)
 | 
			
		||||
# weave_mtu: 1376
 | 
			
		||||
 | 
			
		||||
# Set to 1 to preserve the client source IP address when accessing Service
 | 
			
		||||
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
 | 
			
		||||
# only with Weave IPAM (default).
 | 
			
		||||
# weave_no_masq_local: true
 | 
			
		||||
 | 
			
		||||
# set to nft to use nftables backend for iptables (default is iptables)
 | 
			
		||||
# weave_iptables_backend: iptables
 | 
			
		||||
 | 
			
		||||
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
 | 
			
		||||
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
 | 
			
		||||
# weave_extra_args: ~
 | 
			
		||||
							
								
								
									
										40
									
								
								kubespray_inventory/cloud/hosts.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								kubespray_inventory/cloud/hosts.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
all:
 | 
			
		||||
  vars:
 | 
			
		||||
    ansible_user: ubuntu
 | 
			
		||||
    ansible_become: true
 | 
			
		||||
  hosts:
 | 
			
		||||
    cloud-cluster-1-master-1:
 | 
			
		||||
      ansible_host: cloud-cluster-1-master-1
 | 
			
		||||
      ip: 192.168.110.100
 | 
			
		||||
      access_ip: 192.168.110.100
 | 
			
		||||
 | 
			
		||||
    cloud-cluster-1-worker-1:
 | 
			
		||||
      ansible_host: cloud-cluster-1-worker-1
 | 
			
		||||
      ip: 192.168.110.101
 | 
			
		||||
      access_ip: 192.168.110.101
 | 
			
		||||
 | 
			
		||||
    cloud-cluster-1-worker-2:
 | 
			
		||||
      ansible_host: cloud-cluster-1-worker-2
 | 
			
		||||
      ip: 192.168.110.102
 | 
			
		||||
      access_ip: 192.168.110.102
 | 
			
		||||
 | 
			
		||||
  children:
 | 
			
		||||
    kube_control_plane:
 | 
			
		||||
      hosts:
 | 
			
		||||
        cloud-cluster-1-master-1:
 | 
			
		||||
    kube_node:
 | 
			
		||||
      hosts:
 | 
			
		||||
        cloud-cluster-1-master-1:
 | 
			
		||||
        cloud-cluster-1-worker-1:
 | 
			
		||||
        cloud-cluster-1-worker-2:
 | 
			
		||||
    etcd:
 | 
			
		||||
      hosts:
 | 
			
		||||
        cloud-cluster-1-master-1:
 | 
			
		||||
        cloud-cluster-1-worker-1:
 | 
			
		||||
        cloud-cluster-1-worker-2:
 | 
			
		||||
    k8s_cluster:
 | 
			
		||||
      children:
 | 
			
		||||
        kube_control_plane:
 | 
			
		||||
        kube_node:
 | 
			
		||||
    calico_rr:
 | 
			
		||||
      hosts: {}
 | 
			
		||||
							
								
								
									
										118
									
								
								kubespray_inventory/edge-1/group_vars/all/all.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								kubespray_inventory/edge-1/group_vars/all/all.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,118 @@
 | 
			
		||||
---
 | 
			
		||||
## Directory where etcd data stored
 | 
			
		||||
etcd_data_dir: /var/lib/etcd
 | 
			
		||||
 | 
			
		||||
## Experimental kubeadm etcd deployment mode. Available only for new deployment
 | 
			
		||||
etcd_kubeadm_enabled: false
 | 
			
		||||
 | 
			
		||||
## Directory where the binaries will be installed
 | 
			
		||||
bin_dir: /usr/local/bin
 | 
			
		||||
 | 
			
		||||
## The access_ip variable is used to define how other nodes should access
 | 
			
		||||
## the node.  This is used in flannel to allow other flannel nodes to see
 | 
			
		||||
## this node for example.  The access_ip is really useful AWS and Google
 | 
			
		||||
## environments where the nodes are accessed remotely by the "public" ip,
 | 
			
		||||
## but don't know about that address themselves.
 | 
			
		||||
# access_ip: 1.1.1.1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## External LB example config
 | 
			
		||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
 | 
			
		||||
# loadbalancer_apiserver:
 | 
			
		||||
#   address: 1.2.3.4
 | 
			
		||||
#   port: 1234
 | 
			
		||||
 | 
			
		||||
## Internal loadbalancers for apiservers
 | 
			
		||||
# loadbalancer_apiserver_localhost: true
 | 
			
		||||
# valid options are "nginx" or "haproxy"
 | 
			
		||||
# loadbalancer_apiserver_type: nginx  # valid values "nginx" or "haproxy"
 | 
			
		||||
 | 
			
		||||
## If the cilium is going to be used in strict mode, we can use the
 | 
			
		||||
## localhost connection and not use the external LB. If this parameter is
 | 
			
		||||
## not specified, the first node to connect to kubeapi will be used.
 | 
			
		||||
# use_localhost_as_kubeapi_loadbalancer: true
 | 
			
		||||
 | 
			
		||||
## Local loadbalancer should use this port
 | 
			
		||||
## And must be set port 6443
 | 
			
		||||
loadbalancer_apiserver_port: 6443
 | 
			
		||||
 | 
			
		||||
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
 | 
			
		||||
loadbalancer_apiserver_healthcheck_port: 8081
 | 
			
		||||
 | 
			
		||||
### OTHER OPTIONAL VARIABLES
 | 
			
		||||
 | 
			
		||||
## Upstream dns servers
 | 
			
		||||
# upstream_dns_servers:
 | 
			
		||||
#   - 8.8.8.8
 | 
			
		||||
#   - 8.8.4.4
 | 
			
		||||
 | 
			
		||||
## There are some changes specific to the cloud providers
 | 
			
		||||
## for instance we need to encapsulate packets with some network plugins
 | 
			
		||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
 | 
			
		||||
## When openstack is used make sure to source in the openstack credentials
 | 
			
		||||
## like you would do when using openstack-client before starting the playbook.
 | 
			
		||||
# cloud_provider:
 | 
			
		||||
 | 
			
		||||
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
 | 
			
		||||
## Supported cloud controllers are: 'openstack' and 'vsphere'
 | 
			
		||||
## When openstack or vsphere are used make sure to source in the required fields
 | 
			
		||||
# external_cloud_provider:
 | 
			
		||||
 | 
			
		||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
 | 
			
		||||
# http_proxy: ""
 | 
			
		||||
# https_proxy: ""
 | 
			
		||||
 | 
			
		||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
 | 
			
		||||
# no_proxy: ""
 | 
			
		||||
 | 
			
		||||
## Some problems may occur when downloading files over https proxy due to ansible bug
 | 
			
		||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
 | 
			
		||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
 | 
			
		||||
# download_validate_certs: False
 | 
			
		||||
 | 
			
		||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
 | 
			
		||||
# additional_no_proxy: ""
 | 
			
		||||
 | 
			
		||||
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
 | 
			
		||||
## skip_http_proxy_on_os_packages to true
 | 
			
		||||
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
 | 
			
		||||
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
 | 
			
		||||
# skip_http_proxy_on_os_packages: false
 | 
			
		||||
 | 
			
		||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
 | 
			
		||||
## pods will restart) when adding or removing workers.  To override this behaviour by only including master nodes in the
 | 
			
		||||
## no_proxy variable, set below to true:
 | 
			
		||||
no_proxy_exclude_workers: false
 | 
			
		||||
 | 
			
		||||
## Certificate Management
 | 
			
		||||
## This setting determines whether certs are generated via scripts.
 | 
			
		||||
## Chose 'none' if you provide your own certificates.
 | 
			
		||||
## Option is  "script", "none"
 | 
			
		||||
# cert_management: script
 | 
			
		||||
 | 
			
		||||
## Set to true to allow pre-checks to fail and continue deployment
 | 
			
		||||
# ignore_assert_errors: false
 | 
			
		||||
 | 
			
		||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
 | 
			
		||||
# kube_read_only_port: 10255
 | 
			
		||||
 | 
			
		||||
## Set true to download and cache container
 | 
			
		||||
# download_container: true
 | 
			
		||||
 | 
			
		||||
## Deploy container engine
 | 
			
		||||
# Set false if you want to deploy container engine manually.
 | 
			
		||||
# deploy_container_engine: true
 | 
			
		||||
 | 
			
		||||
## Red Hat Enterprise Linux subscription registration
 | 
			
		||||
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
 | 
			
		||||
## Update RHEL subscription purpose usage, role and SLA if necessary
 | 
			
		||||
# rh_subscription_username: ""
 | 
			
		||||
# rh_subscription_password: ""
 | 
			
		||||
# rh_subscription_org_id: ""
 | 
			
		||||
# rh_subscription_activation_key: ""
 | 
			
		||||
# rh_subscription_usage: "Development"
 | 
			
		||||
# rh_subscription_role: "Red Hat Enterprise Server"
 | 
			
		||||
# rh_subscription_sla: "Self-Support"
 | 
			
		||||
 | 
			
		||||
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
 | 
			
		||||
# ping_access_ip: true
 | 
			
		||||
							
								
								
									
										9
									
								
								kubespray_inventory/edge-1/group_vars/all/aws.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								kubespray_inventory/edge-1/group_vars/all/aws.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
 | 
			
		||||
## and configure the parameters below
 | 
			
		||||
# aws_ebs_csi_enabled: true
 | 
			
		||||
# aws_ebs_csi_enable_volume_scheduling: true
 | 
			
		||||
# aws_ebs_csi_enable_volume_snapshot: false
 | 
			
		||||
# aws_ebs_csi_enable_volume_resizing: false
 | 
			
		||||
# aws_ebs_csi_controller_replicas: 1
 | 
			
		||||
# aws_ebs_csi_plugin_image_tag: latest
 | 
			
		||||
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
 | 
			
		||||
							
								
								
									
										40
									
								
								kubespray_inventory/edge-1/group_vars/all/azure.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								kubespray_inventory/edge-1/group_vars/all/azure.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
## When azure is used, you need to also set the following variables.
 | 
			
		||||
## see docs/azure.md for details on how to get these values
 | 
			
		||||
 | 
			
		||||
# azure_cloud:
 | 
			
		||||
# azure_tenant_id:
 | 
			
		||||
# azure_subscription_id:
 | 
			
		||||
# azure_aad_client_id:
 | 
			
		||||
# azure_aad_client_secret:
 | 
			
		||||
# azure_resource_group:
 | 
			
		||||
# azure_location:
 | 
			
		||||
# azure_subnet_name:
 | 
			
		||||
# azure_security_group_name:
 | 
			
		||||
# azure_security_group_resource_group:
 | 
			
		||||
# azure_vnet_name:
 | 
			
		||||
# azure_vnet_resource_group:
 | 
			
		||||
# azure_route_table_name:
 | 
			
		||||
# azure_route_table_resource_group:
 | 
			
		||||
# supported values are 'standard' or 'vmss'
 | 
			
		||||
# azure_vmtype: standard
 | 
			
		||||
 | 
			
		||||
## Azure Disk CSI credentials and parameters
 | 
			
		||||
## see docs/azure-csi.md for details on how to get these values
 | 
			
		||||
 | 
			
		||||
# azure_csi_tenant_id:
 | 
			
		||||
# azure_csi_subscription_id:
 | 
			
		||||
# azure_csi_aad_client_id:
 | 
			
		||||
# azure_csi_aad_client_secret:
 | 
			
		||||
# azure_csi_location:
 | 
			
		||||
# azure_csi_resource_group:
 | 
			
		||||
# azure_csi_vnet_name:
 | 
			
		||||
# azure_csi_vnet_resource_group:
 | 
			
		||||
# azure_csi_subnet_name:
 | 
			
		||||
# azure_csi_security_group_name:
 | 
			
		||||
# azure_csi_use_instance_metadata:
 | 
			
		||||
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
 | 
			
		||||
 | 
			
		||||
## To enable Azure Disk CSI, uncomment below
 | 
			
		||||
# azure_csi_enabled: true
 | 
			
		||||
# azure_csi_controller_replicas: 1
 | 
			
		||||
# azure_csi_plugin_image_tag: latest
 | 
			
		||||
							
								
								
									
										44
									
								
								kubespray_inventory/edge-1/group_vars/all/containerd.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								kubespray_inventory/edge-1/group_vars/all/containerd.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,44 @@
 | 
			
		||||
---
 | 
			
		||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
 | 
			
		||||
 | 
			
		||||
# containerd_storage_dir: "/var/lib/containerd"
 | 
			
		||||
# containerd_state_dir: "/run/containerd"
 | 
			
		||||
# containerd_oom_score: 0
 | 
			
		||||
 | 
			
		||||
# containerd_default_runtime: "runc"
 | 
			
		||||
# containerd_snapshotter: "native"
 | 
			
		||||
 | 
			
		||||
# containerd_runtimes:
 | 
			
		||||
#   - name: runc
 | 
			
		||||
#     type: "io.containerd.runc.v2"
 | 
			
		||||
#     engine: ""
 | 
			
		||||
#     root: ""
 | 
			
		||||
# Example for Kata Containers as additional runtime:
 | 
			
		||||
#   - name: kata
 | 
			
		||||
#     type: "io.containerd.kata.v2"
 | 
			
		||||
#     engine: ""
 | 
			
		||||
#     root: ""
 | 
			
		||||
 | 
			
		||||
# containerd_grpc_max_recv_message_size: 16777216
 | 
			
		||||
# containerd_grpc_max_send_message_size: 16777216
 | 
			
		||||
 | 
			
		||||
# containerd_debug_level: "info"
 | 
			
		||||
 | 
			
		||||
# containerd_metrics_address: ""
 | 
			
		||||
 | 
			
		||||
# containerd_metrics_grpc_histogram: false
 | 
			
		||||
 | 
			
		||||
# containerd_registries:
 | 
			
		||||
#   "docker.io": "https://registry-1.docker.io"
 | 
			
		||||
 | 
			
		||||
# containerd_max_container_log_line_size: -1
 | 
			
		||||
 | 
			
		||||
#containerd_registry_auth:
 | 
			
		||||
#  - registry: 
 | 
			
		||||
#    username: user
 | 
			
		||||
#    password: pass
 | 
			
		||||
 | 
			
		||||
containerd_registries:
 | 
			
		||||
  "docker.io":
 | 
			
		||||
    - "https://mirror.gcr.io"
 | 
			
		||||
    - "https://registry-1.docker.io"
 | 
			
		||||
							
								
								
									
										2
									
								
								kubespray_inventory/edge-1/group_vars/all/coreos.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								kubespray_inventory/edge-1/group_vars/all/coreos.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
## Does coreos need auto upgrade, default is true
 | 
			
		||||
# coreos_auto_upgrade: true
 | 
			
		||||
							
								
								
									
										6
									
								
								kubespray_inventory/edge-1/group_vars/all/cri-o.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								kubespray_inventory/edge-1/group_vars/all/cri-o.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
# crio_insecure_registries:
 | 
			
		||||
#   - 10.0.0.2:5000
 | 
			
		||||
# crio_registry_auth:
 | 
			
		||||
#   - registry: 10.0.0.2:5000
 | 
			
		||||
#     username: user
 | 
			
		||||
#     password: pass
 | 
			
		||||
							
								
								
									
										59
									
								
								kubespray_inventory/edge-1/group_vars/all/docker.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								kubespray_inventory/edge-1/group_vars/all/docker.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
---
 | 
			
		||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
 | 
			
		||||
## Please note that overlay2 is only supported on newer kernels
 | 
			
		||||
# docker_storage_options: -s overlay2
 | 
			
		||||
 | 
			
		||||
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
 | 
			
		||||
docker_container_storage_setup: false
 | 
			
		||||
 | 
			
		||||
## It must be define a disk path for docker_container_storage_setup_devs.
 | 
			
		||||
## Otherwise docker-storage-setup will be executed incorrectly.
 | 
			
		||||
# docker_container_storage_setup_devs: /dev/vdb
 | 
			
		||||
 | 
			
		||||
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
 | 
			
		||||
## Valid options are systemd or cgroupfs, default is systemd
 | 
			
		||||
# docker_cgroup_driver: systemd
 | 
			
		||||
 | 
			
		||||
## Only set this if you have more than 3 nameservers:
 | 
			
		||||
## If true Kubespray will only use the first 3, otherwise it will fail
 | 
			
		||||
docker_dns_servers_strict: false
 | 
			
		||||
 | 
			
		||||
# Path used to store Docker data
 | 
			
		||||
docker_daemon_graph: "/var/lib/docker"
 | 
			
		||||
 | 
			
		||||
## Used to set docker daemon iptables options to true
 | 
			
		||||
docker_iptables_enabled: "false"
 | 
			
		||||
 | 
			
		||||
# Docker log options
 | 
			
		||||
# Rotate container stderr/stdout logs at 50m and keep last 5
 | 
			
		||||
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
 | 
			
		||||
 | 
			
		||||
# define docker bin_dir
 | 
			
		||||
docker_bin_dir: "/usr/bin"
 | 
			
		||||
 | 
			
		||||
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
 | 
			
		||||
# kubespray deletes the docker package on each run, so caching the package makes sense
 | 
			
		||||
docker_rpm_keepcache: 1
 | 
			
		||||
 | 
			
		||||
## An obvious use case is allowing insecure-registry access to self hosted registries.
 | 
			
		||||
## Can be ipaddress and domain_name.
 | 
			
		||||
## example define 172.19.16.11 or mirror.registry.io
 | 
			
		||||
# docker_insecure_registries:
 | 
			
		||||
#   - mirror.registry.io
 | 
			
		||||
#   - 172.19.16.11
 | 
			
		||||
 | 
			
		||||
## Add other registry,example China registry mirror.
 | 
			
		||||
# docker_registry_mirrors:
 | 
			
		||||
#   - https://registry.docker-cn.com
 | 
			
		||||
#   - https://mirror.aliyuncs.com
 | 
			
		||||
 | 
			
		||||
## If non-empty will override default system MountFlags value.
 | 
			
		||||
## This option takes a mount propagation flag: shared, slave
 | 
			
		||||
## or private, which control whether mounts in the file system
 | 
			
		||||
## namespace set up for docker will receive or propagate mounts
 | 
			
		||||
## and unmounts. Leave empty for system default
 | 
			
		||||
# docker_mount_flags:
 | 
			
		||||
 | 
			
		||||
## A string of extra options to pass to the docker daemon.
 | 
			
		||||
## This string should be exactly as you wish it to appear.
 | 
			
		||||
# docker_options: ""
 | 
			
		||||
							
								
								
									
										10
									
								
								kubespray_inventory/edge-1/group_vars/all/gcp.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								kubespray_inventory/edge-1/group_vars/all/gcp.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
## GCP compute Persistent Disk CSI Driver credentials and parameters
 | 
			
		||||
## See docs/gcp-pd-csi.md for information about the implementation
 | 
			
		||||
 | 
			
		||||
## Specify the path to the file containing the service account credentials
 | 
			
		||||
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
 | 
			
		||||
 | 
			
		||||
## To enable GCP Persistent Disk CSI driver, uncomment below
 | 
			
		||||
# gcp_pd_csi_enabled: true
 | 
			
		||||
# gcp_pd_csi_controller_replicas: 1
 | 
			
		||||
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
 | 
			
		||||
							
								
								
									
										28
									
								
								kubespray_inventory/edge-1/group_vars/all/oci.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								kubespray_inventory/edge-1/group_vars/all/oci.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
			
		||||
## When Oracle Cloud Infrastructure is used, set these variables
 | 
			
		||||
# oci_private_key:
 | 
			
		||||
# oci_region_id:
 | 
			
		||||
# oci_tenancy_id:
 | 
			
		||||
# oci_user_id:
 | 
			
		||||
# oci_user_fingerprint:
 | 
			
		||||
# oci_compartment_id:
 | 
			
		||||
# oci_vnc_id:
 | 
			
		||||
# oci_subnet1_id:
 | 
			
		||||
# oci_subnet2_id:
 | 
			
		||||
## Override these default/optional behaviors if you wish
 | 
			
		||||
# oci_security_list_management: All
 | 
			
		||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
 | 
			
		||||
# oci_security_lists:
 | 
			
		||||
#   ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
 | 
			
		||||
#   ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
 | 
			
		||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
 | 
			
		||||
# oci_use_instance_principals: false
 | 
			
		||||
# oci_cloud_controller_version: 0.6.0
 | 
			
		||||
## If you would like to control OCI query rate limits for the controller
 | 
			
		||||
# oci_rate_limit:
 | 
			
		||||
#   rate_limit_qps_read:
 | 
			
		||||
#   rate_limit_qps_write:
 | 
			
		||||
#   rate_limit_bucket_read:
 | 
			
		||||
#   rate_limit_bucket_write:
 | 
			
		||||
## Other optional variables
 | 
			
		||||
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
 | 
			
		||||
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
 | 
			
		||||
							
								
								
									
										84
									
								
								kubespray_inventory/edge-1/group_vars/all/offline.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								kubespray_inventory/edge-1/group_vars/all/offline.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,84 @@
 | 
			
		||||
---
 | 
			
		||||
## Global Offline settings
 | 
			
		||||
### Private Container Image Registry
 | 
			
		||||
# registry_host: "myprivateregisry.com"
 | 
			
		||||
# files_repo: "http://myprivatehttpd"
 | 
			
		||||
### If using CentOS, RedHat, AlmaLinux or Fedora
 | 
			
		||||
# yum_repo: "http://myinternalyumrepo"
 | 
			
		||||
### If using Debian
 | 
			
		||||
# debian_repo: "http://myinternaldebianrepo"
 | 
			
		||||
### If using Ubuntu
 | 
			
		||||
# ubuntu_repo: "http://myinternalubunturepo"
 | 
			
		||||
 | 
			
		||||
## Container Registry overrides
 | 
			
		||||
# kube_image_repo: "{{ registry_host }}"
 | 
			
		||||
# gcr_image_repo: "{{ registry_host }}"
 | 
			
		||||
# github_image_repo: "{{ registry_host }}"
 | 
			
		||||
# docker_image_repo: "{{ registry_host }}"
 | 
			
		||||
# quay_image_repo: "{{ registry_host }}"
 | 
			
		||||
 | 
			
		||||
## Kubernetes components
 | 
			
		||||
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
 | 
			
		||||
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
 | 
			
		||||
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
 | 
			
		||||
 | 
			
		||||
## CNI Plugins
 | 
			
		||||
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
 | 
			
		||||
 | 
			
		||||
## cri-tools
 | 
			
		||||
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
 | 
			
		||||
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] Calico: If using Calico network plugin
 | 
			
		||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
 | 
			
		||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
 | 
			
		||||
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] helm: only if you set helm_enabled: true
 | 
			
		||||
# helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] crun: only if you set crun_enabled: true
 | 
			
		||||
# crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
 | 
			
		||||
 | 
			
		||||
# [Optional] kata: only if you set kata_containers_enabled: true
 | 
			
		||||
# kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
 | 
			
		||||
 | 
			
		||||
# [Optional] cri-o: only if you set container_manager: crio
 | 
			
		||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
 | 
			
		||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
 | 
			
		||||
 | 
			
		||||
## CentOS/Redhat/AlmaLinux
 | 
			
		||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
 | 
			
		||||
### By default we enable those repo automatically
 | 
			
		||||
# rhel_enable_repos: false
 | 
			
		||||
### Docker / Containerd
 | 
			
		||||
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
 | 
			
		||||
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
 | 
			
		||||
## Fedora
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
 | 
			
		||||
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
 | 
			
		||||
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
 | 
			
		||||
## Debian
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
 | 
			
		||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
 | 
			
		||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
 | 
			
		||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
 | 
			
		||||
 | 
			
		||||
## Ubuntu
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
 | 
			
		||||
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
 | 
			
		||||
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
 | 
			
		||||
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
 | 
			
		||||
							
								
								
									
										49
									
								
								kubespray_inventory/edge-1/group_vars/all/openstack.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								kubespray_inventory/edge-1/group_vars/all/openstack.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
 | 
			
		||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
 | 
			
		||||
# openstack_blockstorage_ignore_volume_az: yes
 | 
			
		||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
 | 
			
		||||
# openstack_lbaas_enabled: True
 | 
			
		||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
 | 
			
		||||
## To enable automatic floating ip provisioning, specify a subnet.
 | 
			
		||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
 | 
			
		||||
## Override default LBaaS behavior
 | 
			
		||||
# openstack_lbaas_use_octavia: False
 | 
			
		||||
# openstack_lbaas_method: "ROUND_ROBIN"
 | 
			
		||||
# openstack_lbaas_provider: "haproxy"
 | 
			
		||||
# openstack_lbaas_create_monitor: "yes"
 | 
			
		||||
# openstack_lbaas_monitor_delay: "1m"
 | 
			
		||||
# openstack_lbaas_monitor_timeout: "30s"
 | 
			
		||||
# openstack_lbaas_monitor_max_retries: "3"
 | 
			
		||||
 | 
			
		||||
## Values for the external OpenStack Cloud Controller
 | 
			
		||||
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
 | 
			
		||||
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
 | 
			
		||||
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
 | 
			
		||||
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
 | 
			
		||||
# external_openstack_lbaas_method: "ROUND_ROBIN"
 | 
			
		||||
# external_openstack_lbaas_provider: "octavia"
 | 
			
		||||
# external_openstack_lbaas_create_monitor: false
 | 
			
		||||
# external_openstack_lbaas_monitor_delay: "1m"
 | 
			
		||||
# external_openstack_lbaas_monitor_timeout: "30s"
 | 
			
		||||
# external_openstack_lbaas_monitor_max_retries: "3"
 | 
			
		||||
# external_openstack_lbaas_manage_security_groups: false
 | 
			
		||||
# external_openstack_lbaas_internal_lb: false
 | 
			
		||||
# external_openstack_network_ipv6_disabled: false
 | 
			
		||||
# external_openstack_network_internal_networks: []
 | 
			
		||||
# external_openstack_network_public_networks: []
 | 
			
		||||
# external_openstack_metadata_search_order: "configDrive,metadataService"
 | 
			
		||||
 | 
			
		||||
## Application credentials to authenticate against Keystone API
 | 
			
		||||
## Those settings will take precedence over username and password that might be set your environment
 | 
			
		||||
## All of them are required
 | 
			
		||||
# external_openstack_application_credential_name:
 | 
			
		||||
# external_openstack_application_credential_id:
 | 
			
		||||
# external_openstack_application_credential_secret:
 | 
			
		||||
 | 
			
		||||
## The tag of the external OpenStack Cloud Controller image
 | 
			
		||||
# external_openstack_cloud_controller_image_tag: "latest"
 | 
			
		||||
 | 
			
		||||
## To use Cinder CSI plugin to provision volumes set this value to true
 | 
			
		||||
## Make sure to source in the openstack credentials
 | 
			
		||||
# cinder_csi_enabled: true
 | 
			
		||||
# cinder_csi_controller_replicas: 1
 | 
			
		||||
							
								
								
									
										32
									
								
								kubespray_inventory/edge-1/group_vars/all/vsphere.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								kubespray_inventory/edge-1/group_vars/all/vsphere.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
## Values for the external vSphere Cloud Provider
 | 
			
		||||
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
 | 
			
		||||
# external_vsphere_vcenter_port: "443"
 | 
			
		||||
# external_vsphere_insecure: "true"
 | 
			
		||||
# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
 | 
			
		||||
# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
 | 
			
		||||
# external_vsphere_datacenter: "DATACENTER_name"
 | 
			
		||||
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
 | 
			
		||||
 | 
			
		||||
## Vsphere version where located VMs
 | 
			
		||||
# external_vsphere_version: "6.7u3"
 | 
			
		||||
 | 
			
		||||
## Tags for the external vSphere Cloud Provider images
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/cpi/release/manager
 | 
			
		||||
# external_vsphere_cloud_controller_image_tag: "latest"
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/csi/release/syncer
 | 
			
		||||
# vsphere_syncer_image_tag: "v2.2.1"
 | 
			
		||||
## quay.io/k8scsi/csi-attacher
 | 
			
		||||
# vsphere_csi_attacher_image_tag: "v3.1.0"
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/csi/release/driver
 | 
			
		||||
# vsphere_csi_controller: "v2.2.1"
 | 
			
		||||
## quay.io/k8scsi/livenessprobe
 | 
			
		||||
# vsphere_csi_liveness_probe_image_tag: "v2.2.0"
 | 
			
		||||
## quay.io/k8scsi/csi-provisioner
 | 
			
		||||
# vsphere_csi_provisioner_image_tag: "v2.1.0"
 | 
			
		||||
## quay.io/k8scsi/csi-resizer
 | 
			
		||||
## makes sense only for vSphere version >=7.0
 | 
			
		||||
# vsphere_csi_resizer_tag: "v1.1.0"
 | 
			
		||||
 | 
			
		||||
## To use vSphere CSI plugin to provision volumes set this value to true
 | 
			
		||||
# vsphere_csi_enabled: true
 | 
			
		||||
# vsphere_csi_controller_replicas: 1
 | 
			
		||||
							
								
								
									
										22
									
								
								kubespray_inventory/edge-1/group_vars/etcd.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								kubespray_inventory/edge-1/group_vars/etcd.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
---
 | 
			
		||||
## Etcd auto compaction retention for mvcc key value store in hour
 | 
			
		||||
# etcd_compaction_retention: 0
 | 
			
		||||
 | 
			
		||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
 | 
			
		||||
# etcd_metrics: basic
 | 
			
		||||
 | 
			
		||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
 | 
			
		||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
 | 
			
		||||
# etcd_memory_limit: "512M"
 | 
			
		||||
 | 
			
		||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
 | 
			
		||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
 | 
			
		||||
## etcd documentation for more information.
 | 
			
		||||
# etcd_quota_backend_bytes: "2147483648"
 | 
			
		||||
 | 
			
		||||
### ETCD: disable peer client cert authentication.
 | 
			
		||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
 | 
			
		||||
# etcd_peer_client_auth: true
 | 
			
		||||
 | 
			
		||||
## Settings for etcd deployment type
 | 
			
		||||
etcd_deployment_type: host
 | 
			
		||||
							
								
								
									
										187
									
								
								kubespray_inventory/edge-1/group_vars/k8s_cluster/addons.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										187
									
								
								kubespray_inventory/edge-1/group_vars/k8s_cluster/addons.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,187 @@
 | 
			
		||||
---
 | 
			
		||||
# Kubernetes dashboard
 | 
			
		||||
# RBAC required. see docs/getting-started.md for access details.
 | 
			
		||||
# dashboard_enabled: false
 | 
			
		||||
 | 
			
		||||
# Helm deployment
 | 
			
		||||
helm_enabled: false
 | 
			
		||||
 | 
			
		||||
# Registry deployment
 | 
			
		||||
registry_enabled: false
 | 
			
		||||
# registry_namespace: kube-system
 | 
			
		||||
# registry_storage_class: ""
 | 
			
		||||
# registry_disk_size: "10Gi"
 | 
			
		||||
 | 
			
		||||
# Metrics Server deployment
 | 
			
		||||
metrics_server_enabled: false
 | 
			
		||||
# metrics_server_resizer: false
 | 
			
		||||
# metrics_server_kubelet_insecure_tls: true
 | 
			
		||||
# metrics_server_metric_resolution: 15s
 | 
			
		||||
# metrics_server_kubelet_preferred_address_types: "InternalIP"
 | 
			
		||||
 | 
			
		||||
# Rancher Local Path Provisioner
 | 
			
		||||
local_path_provisioner_enabled: false
 | 
			
		||||
# local_path_provisioner_namespace: "local-path-storage"
 | 
			
		||||
# local_path_provisioner_storage_class: "local-path"
 | 
			
		||||
# local_path_provisioner_reclaim_policy: Delete
 | 
			
		||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
 | 
			
		||||
# local_path_provisioner_debug: false
 | 
			
		||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
 | 
			
		||||
# local_path_provisioner_image_tag: "v0.0.19"
 | 
			
		||||
# local_path_provisioner_helper_image_repo: "busybox"
 | 
			
		||||
# local_path_provisioner_helper_image_tag: "latest"
 | 
			
		||||
 | 
			
		||||
# Local volume provisioner deployment
 | 
			
		||||
local_volume_provisioner_enabled: false
 | 
			
		||||
# local_volume_provisioner_namespace: kube-system
 | 
			
		||||
# local_volume_provisioner_nodelabels:
 | 
			
		||||
#   - kubernetes.io/hostname
 | 
			
		||||
#   - topology.kubernetes.io/region
 | 
			
		||||
#   - topology.kubernetes.io/zone
 | 
			
		||||
# local_volume_provisioner_storage_classes:
 | 
			
		||||
#   local-storage:
 | 
			
		||||
#     host_dir: /mnt/disks
 | 
			
		||||
#     mount_dir: /mnt/disks
 | 
			
		||||
#     volume_mode: Filesystem
 | 
			
		||||
#     fs_type: ext4
 | 
			
		||||
#   fast-disks:
 | 
			
		||||
#     host_dir: /mnt/fast-disks
 | 
			
		||||
#     mount_dir: /mnt/fast-disks
 | 
			
		||||
#     block_cleaner_command:
 | 
			
		||||
#       - "/scripts/shred.sh"
 | 
			
		||||
#       - "2"
 | 
			
		||||
#     volume_mode: Filesystem
 | 
			
		||||
#     fs_type: ext4
 | 
			
		||||
 | 
			
		||||
# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots
 | 
			
		||||
# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller
 | 
			
		||||
# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray
 | 
			
		||||
# csi_snapshot_controller_enabled: false
 | 
			
		||||
 | 
			
		||||
# CephFS provisioner deployment
 | 
			
		||||
cephfs_provisioner_enabled: false
 | 
			
		||||
# cephfs_provisioner_namespace: "cephfs-provisioner"
 | 
			
		||||
# cephfs_provisioner_cluster: ceph
 | 
			
		||||
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
 | 
			
		||||
# cephfs_provisioner_admin_id: admin
 | 
			
		||||
# cephfs_provisioner_secret: secret
 | 
			
		||||
# cephfs_provisioner_storage_class: cephfs
 | 
			
		||||
# cephfs_provisioner_reclaim_policy: Delete
 | 
			
		||||
# cephfs_provisioner_claim_root: /volumes
 | 
			
		||||
# cephfs_provisioner_deterministic_names: true
 | 
			
		||||
 | 
			
		||||
# RBD provisioner deployment
 | 
			
		||||
rbd_provisioner_enabled: false
 | 
			
		||||
# rbd_provisioner_namespace: rbd-provisioner
 | 
			
		||||
# rbd_provisioner_replicas: 2
 | 
			
		||||
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
 | 
			
		||||
# rbd_provisioner_pool: kube
 | 
			
		||||
# rbd_provisioner_admin_id: admin
 | 
			
		||||
# rbd_provisioner_secret_name: ceph-secret-admin
 | 
			
		||||
# rbd_provisioner_secret: ceph-key-admin
 | 
			
		||||
# rbd_provisioner_user_id: kube
 | 
			
		||||
# rbd_provisioner_user_secret_name: ceph-secret-user
 | 
			
		||||
# rbd_provisioner_user_secret: ceph-key-user
 | 
			
		||||
# rbd_provisioner_user_secret_namespace: rbd-provisioner
 | 
			
		||||
# rbd_provisioner_fs_type: ext4
 | 
			
		||||
# rbd_provisioner_image_format: "2"
 | 
			
		||||
# rbd_provisioner_image_features: layering
 | 
			
		||||
# rbd_provisioner_storage_class: rbd
 | 
			
		||||
# rbd_provisioner_reclaim_policy: Delete
 | 
			
		||||
 | 
			
		||||
# Nginx ingress controller deployment
 | 
			
		||||
ingress_nginx_enabled: false
 | 
			
		||||
# ingress_nginx_host_network: false
 | 
			
		||||
ingress_publish_status_address: ""
 | 
			
		||||
# ingress_nginx_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# ingress_nginx_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# ingress_nginx_namespace: "ingress-nginx"
 | 
			
		||||
# ingress_nginx_insecure_port: 80
 | 
			
		||||
# ingress_nginx_secure_port: 443
 | 
			
		||||
# ingress_nginx_configmap:
 | 
			
		||||
#   map-hash-bucket-size: "128"
 | 
			
		||||
#   ssl-protocols: "TLSv1.2 TLSv1.3"
 | 
			
		||||
# ingress_nginx_configmap_tcp_services:
 | 
			
		||||
#   9000: "default/example-go:8080"
 | 
			
		||||
# ingress_nginx_configmap_udp_services:
 | 
			
		||||
#   53: "kube-system/coredns:53"
 | 
			
		||||
# ingress_nginx_extra_args:
 | 
			
		||||
#   - --default-ssl-certificate=default/foo-tls
 | 
			
		||||
# ingress_nginx_class: nginx
 | 
			
		||||
 | 
			
		||||
# ambassador ingress controller deployment
 | 
			
		||||
ingress_ambassador_enabled: false
 | 
			
		||||
# ingress_ambassador_namespace: "ambassador"
 | 
			
		||||
# ingress_ambassador_version: "*"
 | 
			
		||||
# ingress_ambassador_multi_namespaces: false
 | 
			
		||||
 | 
			
		||||
# ALB ingress controller deployment
 | 
			
		||||
ingress_alb_enabled: false
 | 
			
		||||
# alb_ingress_aws_region: "us-east-1"
 | 
			
		||||
# alb_ingress_restrict_scheme: "false"
 | 
			
		||||
# Enables logging on all outbound requests sent to the AWS API.
 | 
			
		||||
# If logging is desired, set to true.
 | 
			
		||||
# alb_ingress_aws_debug: "false"
 | 
			
		||||
 | 
			
		||||
# Cert manager deployment
 | 
			
		||||
cert_manager_enabled: false
 | 
			
		||||
# cert_manager_namespace: "cert-manager"
 | 
			
		||||
 | 
			
		||||
# MetalLB deployment
 | 
			
		||||
metallb_enabled: false
 | 
			
		||||
metallb_speaker_enabled: true
 | 
			
		||||
# metallb_ip_range:
 | 
			
		||||
#   - "10.5.0.50-10.5.0.99"
 | 
			
		||||
# metallb_speaker_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# metallb_controller_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# metallb_speaker_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# metallb_controller_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# metallb_version: v0.10.2
 | 
			
		||||
# metallb_protocol: "layer2"
 | 
			
		||||
# metallb_port: "7472"
 | 
			
		||||
# metallb_memberlist_port: "7946"
 | 
			
		||||
# metallb_additional_address_pools:
 | 
			
		||||
#   kube_service_pool:
 | 
			
		||||
#     ip_range:
 | 
			
		||||
#       - "10.5.1.50-10.5.1.99"
 | 
			
		||||
#     protocol: "layer2"
 | 
			
		||||
#     auto_assign: false
 | 
			
		||||
# metallb_protocol: "bgp"
 | 
			
		||||
# metallb_peers:
 | 
			
		||||
#   - peer_address: 192.0.2.1
 | 
			
		||||
#     peer_asn: 64512
 | 
			
		||||
#     my_asn: 4200000000
 | 
			
		||||
#   - peer_address: 192.0.2.2
 | 
			
		||||
#     peer_asn: 64513
 | 
			
		||||
#     my_asn: 4200000000
 | 
			
		||||
 | 
			
		||||
# The plugin manager for kubectl
 | 
			
		||||
krew_enabled: false
 | 
			
		||||
krew_root_dir: "/usr/local/krew"
 | 
			
		||||
@@ -0,0 +1,318 @@
 | 
			
		||||
---
 | 
			
		||||
# Kubernetes configuration dirs and system namespace.
 | 
			
		||||
# Those are where all the additional config stuff goes
 | 
			
		||||
# the kubernetes normally puts in /srv/kubernetes.
 | 
			
		||||
# This puts them in a same location and namespace.
 | 
			
		||||
# Editing those values will almost surely break something.
 | 
			
		||||
kube_config_dir: /etc/kubernetes
 | 
			
		||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 | 
			
		||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
 | 
			
		||||
 | 
			
		||||
# This is where all the cert scripts and certs will be located
 | 
			
		||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
 | 
			
		||||
 | 
			
		||||
# This is where all of the bearer tokens will be stored
 | 
			
		||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
 | 
			
		||||
 | 
			
		||||
kube_api_anonymous_auth: true
 | 
			
		||||
 | 
			
		||||
## Change this to use another Kubernetes version, e.g. a current beta release
 | 
			
		||||
kube_version: v1.21.6
 | 
			
		||||
 | 
			
		||||
# Where the binaries will be downloaded.
 | 
			
		||||
# Note: ensure that you've enough disk space (about 1G)
 | 
			
		||||
local_release_dir: "/tmp/releases"
 | 
			
		||||
# Random shifts for retrying failed ops like pushing/downloading
 | 
			
		||||
retry_stagger: 5
 | 
			
		||||
 | 
			
		||||
# This is the group that the cert creation scripts chgrp the
 | 
			
		||||
# cert files to. Not really changeable...
 | 
			
		||||
kube_cert_group: kube-cert
 | 
			
		||||
 | 
			
		||||
# Cluster Loglevel configuration
 | 
			
		||||
kube_log_level: 2
 | 
			
		||||
 | 
			
		||||
# Directory where credentials will be stored
 | 
			
		||||
credentials_dir: "{{ inventory_dir }}/credentials"
 | 
			
		||||
 | 
			
		||||
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
 | 
			
		||||
# kube_oidc_auth: false
 | 
			
		||||
# kube_token_auth: false
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
 | 
			
		||||
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
 | 
			
		||||
 | 
			
		||||
# kube_oidc_url: https:// ...
 | 
			
		||||
# kube_oidc_client_id: kubernetes
 | 
			
		||||
## Optional settings for OIDC
 | 
			
		||||
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
 | 
			
		||||
# kube_oidc_username_claim: sub
 | 
			
		||||
# kube_oidc_username_prefix: 'oidc:'
 | 
			
		||||
# kube_oidc_groups_claim: groups
 | 
			
		||||
# kube_oidc_groups_prefix: 'oidc:'
 | 
			
		||||
 | 
			
		||||
## Variables to control webhook authn/authz
 | 
			
		||||
# kube_webhook_token_auth: false
 | 
			
		||||
# kube_webhook_token_auth_url: https://...
 | 
			
		||||
# kube_webhook_token_auth_url_skip_tls_verify: false
 | 
			
		||||
 | 
			
		||||
## For webhook authorization, authorization_modes must include Webhook
 | 
			
		||||
# kube_webhook_authorization: false
 | 
			
		||||
# kube_webhook_authorization_url: https://...
 | 
			
		||||
# kube_webhook_authorization_url_skip_tls_verify: false
 | 
			
		||||
 | 
			
		||||
# Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
 | 
			
		||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
 | 
			
		||||
kube_network_plugin: weave
 | 
			
		||||
 | 
			
		||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
 | 
			
		||||
kube_network_plugin_multus: false
 | 
			
		||||
 | 
			
		||||
# Kubernetes internal network for services, unused block of space.
 | 
			
		||||
kube_service_addresses: 10.233.0.0/18
 | 
			
		||||
 | 
			
		||||
# internal network. When used, it will assign IP
 | 
			
		||||
# addresses from this range to individual pods.
 | 
			
		||||
# This network must be unused in your network infrastructure!
 | 
			
		||||
kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
 | 
			
		||||
# internal network node size allocation (optional). This is the size allocated
 | 
			
		||||
# to each node for pod IP address allocation. Note that the number of pods per node is
 | 
			
		||||
# also limited by the kubelet_max_pods variable which defaults to 110.
 | 
			
		||||
#
 | 
			
		||||
# Example:
 | 
			
		||||
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
 | 
			
		||||
#  - kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
#  - kube_network_node_prefix: 24
 | 
			
		||||
#  - kubelet_max_pods: 110
 | 
			
		||||
#
 | 
			
		||||
# Example:
 | 
			
		||||
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
 | 
			
		||||
#  - kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
#  - kube_network_node_prefix: 25
 | 
			
		||||
#  - kubelet_max_pods: 110
 | 
			
		||||
kube_network_node_prefix: 24
 | 
			
		||||
 | 
			
		||||
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
 | 
			
		||||
enable_dual_stack_networks: false
 | 
			
		||||
 | 
			
		||||
# Kubernetes internal network for IPv6 services, unused block of space.
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true
 | 
			
		||||
# This provides 4096 IPv6 IPs
 | 
			
		||||
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
 | 
			
		||||
 | 
			
		||||
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
 | 
			
		||||
# This network must not already be in your network infrastructure!
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true.
 | 
			
		||||
# This provides room for 256 nodes with 254 pods per node.
 | 
			
		||||
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 | 
			
		||||
 | 
			
		||||
# IPv6 subnet size allocated to each for pods.
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true
 | 
			
		||||
# This provides room for 254 pods per node.
 | 
			
		||||
kube_network_node_prefix_ipv6: 120
 | 
			
		||||
 | 
			
		||||
# The port the API Server will be listening on.
 | 
			
		||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
 | 
			
		||||
kube_apiserver_port: 6443  # (https)
 | 
			
		||||
# kube_apiserver_insecure_port: 8080  # (http)
 | 
			
		||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
 | 
			
		||||
kube_apiserver_insecure_port: 0  # (disabled)
 | 
			
		||||
 | 
			
		||||
# Kube-proxy proxyMode configuration.
 | 
			
		||||
# Can be ipvs, iptables
 | 
			
		||||
kube_proxy_mode: ipvs
 | 
			
		||||
 | 
			
		||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
 | 
			
		||||
# must be set to true for MetalLB to work
 | 
			
		||||
kube_proxy_strict_arp: false
 | 
			
		||||
 | 
			
		||||
# A string slice of values which specify the addresses to use for NodePorts.
 | 
			
		||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
 | 
			
		||||
# The default empty string slice ([]) means to use all local addresses.
 | 
			
		||||
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
 | 
			
		||||
kube_proxy_nodeport_addresses: >-
 | 
			
		||||
  {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
 | 
			
		||||
  [{{ kube_proxy_nodeport_addresses_cidr }}]
 | 
			
		||||
  {%- else -%}
 | 
			
		||||
  []
 | 
			
		||||
  {%- endif -%}
 | 
			
		||||
 | 
			
		||||
# If non-empty, will use this string as identification instead of the actual hostname
 | 
			
		||||
# kube_override_hostname: >-
 | 
			
		||||
#   {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
 | 
			
		||||
#   {%- else -%}
 | 
			
		||||
#   {{ inventory_hostname }}
 | 
			
		||||
#   {%- endif -%}
 | 
			
		||||
 | 
			
		||||
## Encrypting Secret Data at Rest (experimental)
 | 
			
		||||
kube_encrypt_secret_data: false
 | 
			
		||||
 | 
			
		||||
# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
 | 
			
		||||
# kubelet_shutdown_grace_period: 60s
 | 
			
		||||
# kubelet_shutdown_grace_period_critical_pods: 20s
 | 
			
		||||
 | 
			
		||||
# DNS configuration.
 | 
			
		||||
# Kubernetes cluster name, also will be used as DNS domain
 | 
			
		||||
cluster_name: cloud.local
 | 
			
		||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
 | 
			
		||||
ndots: 2
 | 
			
		||||
# Can be coredns, coredns_dual, manual or none
 | 
			
		||||
dns_mode: coredns
 | 
			
		||||
# Set manual server if using a custom cluster DNS server
 | 
			
		||||
# manual_dns_server: 10.x.x.x
 | 
			
		||||
# Enable nodelocal dns cache
 | 
			
		||||
enable_nodelocaldns: true
 | 
			
		||||
nodelocaldns_ip: 169.254.25.10
 | 
			
		||||
nodelocaldns_health_port: 9254
 | 
			
		||||
nodelocaldns_bind_metrics_host_ip: false
 | 
			
		||||
# nodelocaldns_external_zones:
 | 
			
		||||
# - zones:
 | 
			
		||||
#   - example.com
 | 
			
		||||
#   - example.io:1053
 | 
			
		||||
#   nameservers:
 | 
			
		||||
#   - 1.1.1.1
 | 
			
		||||
#   - 2.2.2.2
 | 
			
		||||
#   cache: 5
 | 
			
		||||
# - zones:
 | 
			
		||||
#   - https://mycompany.local:4453
 | 
			
		||||
#   nameservers:
 | 
			
		||||
#   - 192.168.0.53
 | 
			
		||||
#   cache: 0
 | 
			
		||||
# Enable k8s_external plugin for CoreDNS
 | 
			
		||||
enable_coredns_k8s_external: false
 | 
			
		||||
coredns_k8s_external_zone: k8s_external.local
 | 
			
		||||
# Enable endpoint_pod_names option for kubernetes plugin
 | 
			
		||||
enable_coredns_k8s_endpoint_pod_names: false
 | 
			
		||||
 | 
			
		||||
# Can be docker_dns, host_resolvconf or none
 | 
			
		||||
resolvconf_mode: docker_dns
 | 
			
		||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
 | 
			
		||||
deploy_netchecker: false
 | 
			
		||||
# Ip address of the kubernetes skydns service
 | 
			
		||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
 | 
			
		||||
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
 | 
			
		||||
dns_domain: "{{ cluster_name }}"
 | 
			
		||||
 | 
			
		||||
## Container runtime
 | 
			
		||||
## docker for docker, crio for cri-o and containerd for containerd.
 | 
			
		||||
container_manager: containerd
 | 
			
		||||
 | 
			
		||||
# Additional container runtimes
 | 
			
		||||
kata_containers_enabled: false
 | 
			
		||||
 | 
			
		||||
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
 | 
			
		||||
 | 
			
		||||
# K8s image pull policy (imagePullPolicy)
 | 
			
		||||
k8s_image_pull_policy: IfNotPresent
 | 
			
		||||
 | 
			
		||||
# audit log for kubernetes
 | 
			
		||||
kubernetes_audit: false
 | 
			
		||||
 | 
			
		||||
# dynamic kubelet configuration
 | 
			
		||||
# Note: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA.
 | 
			
		||||
# It is planned to be removed from Kubernetes in the version 1.23.
 | 
			
		||||
# Please use alternative ways to update kubelet configuration.
 | 
			
		||||
dynamic_kubelet_configuration: false
 | 
			
		||||
 | 
			
		||||
# define kubelet config dir for dynamic kubelet
 | 
			
		||||
# kubelet_config_dir:
 | 
			
		||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
 | 
			
		||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
 | 
			
		||||
 | 
			
		||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
 | 
			
		||||
podsecuritypolicy_enabled: false
 | 
			
		||||
 | 
			
		||||
# Custom PodSecurityPolicySpec for restricted policy
 | 
			
		||||
# podsecuritypolicy_restricted_spec: {}
 | 
			
		||||
 | 
			
		||||
# Custom PodSecurityPolicySpec for privileged policy
 | 
			
		||||
# podsecuritypolicy_privileged_spec: {}
 | 
			
		||||
 | 
			
		||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 | 
			
		||||
# kubeconfig_localhost: false
 | 
			
		||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 | 
			
		||||
# kubectl_localhost: false
 | 
			
		||||
 | 
			
		||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
 | 
			
		||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
 | 
			
		||||
# kubelet_enforce_node_allocatable: pods
 | 
			
		||||
 | 
			
		||||
## Optionally reserve resources for OS system daemons.
 | 
			
		||||
# system_reserved: true
 | 
			
		||||
## Uncomment to override default values
 | 
			
		||||
# system_memory_reserved: 512Mi
 | 
			
		||||
# system_cpu_reserved: 500m
 | 
			
		||||
## Reservation for master hosts
 | 
			
		||||
# system_master_memory_reserved: 256Mi
 | 
			
		||||
# system_master_cpu_reserved: 250m
 | 
			
		||||
 | 
			
		||||
# An alternative flexvolume plugin directory
 | 
			
		||||
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
 | 
			
		||||
 | 
			
		||||
## Supplementary addresses that can be added in kubernetes ssl keys.
 | 
			
		||||
## That can be useful for example to setup a keepalived virtual IP
 | 
			
		||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
 | 
			
		||||
 | 
			
		||||
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
 | 
			
		||||
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
 | 
			
		||||
## Set this variable to true to get rid of this issue
 | 
			
		||||
volume_cross_zone_attachment: false
 | 
			
		||||
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
 | 
			
		||||
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
 | 
			
		||||
persistent_volumes_enabled: false
 | 
			
		||||
 | 
			
		||||
## Container Engine Acceleration
 | 
			
		||||
## Enable container acceleration feature, for example use gpu acceleration in containers
 | 
			
		||||
# nvidia_accelerator_enabled: true
 | 
			
		||||
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
 | 
			
		||||
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
 | 
			
		||||
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
 | 
			
		||||
## Labels and taints won't be set to nodes if they are not in the array.
 | 
			
		||||
# nvidia_gpu_nodes:
 | 
			
		||||
#   - kube-gpu-001
 | 
			
		||||
# nvidia_driver_version: "384.111"
 | 
			
		||||
## flavor can be tesla or gtx
 | 
			
		||||
# nvidia_gpu_flavor: gtx
 | 
			
		||||
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
 | 
			
		||||
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
 | 
			
		||||
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
 | 
			
		||||
## NVIDIA GPU device plugin image.
 | 
			
		||||
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
 | 
			
		||||
 | 
			
		||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
 | 
			
		||||
# tls_min_version: ""
 | 
			
		||||
 | 
			
		||||
## Support tls cipher suites.
 | 
			
		||||
# tls_cipher_suites: {}
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_RC4_128_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_3DES_EDE_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_RSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_RSA_WITH_RC4_128_SHA
 | 
			
		||||
 | 
			
		||||
## Amount of time to retain events. (default 1h0m0s)
 | 
			
		||||
event_ttl_duration: "1h0m0s"
 | 
			
		||||
 | 
			
		||||
## Automatically renew K8S control plane certificates on first Monday of each month
 | 
			
		||||
auto_renew_certificates: false
 | 
			
		||||
# First Monday of each month
 | 
			
		||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
 | 
			
		||||
@@ -0,0 +1,109 @@
 | 
			
		||||
# see roles/network_plugin/calico/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
## With calico it is possible to distributed routes with border routers of the datacenter.
 | 
			
		||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
 | 
			
		||||
## The subnets of each nodes will be distributed by the datacenter router
 | 
			
		||||
# peer_with_router: false
 | 
			
		||||
 | 
			
		||||
# Enables Internet connectivity from containers
 | 
			
		||||
# nat_outgoing: true
 | 
			
		||||
 | 
			
		||||
# Enables Calico CNI "host-local" IPAM plugin
 | 
			
		||||
# calico_ipam_host_local: true
 | 
			
		||||
 | 
			
		||||
# add default ippool name
 | 
			
		||||
# calico_pool_name: "default-pool"
 | 
			
		||||
 | 
			
		||||
# add default ippool blockSize (defaults kube_network_node_prefix)
 | 
			
		||||
# calico_pool_blocksize: 24
 | 
			
		||||
 | 
			
		||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
 | 
			
		||||
# calico_pool_cidr: 1.2.3.4/5
 | 
			
		||||
 | 
			
		||||
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
 | 
			
		||||
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 | 
			
		||||
 | 
			
		||||
# Global as_num (/calico/bgp/v1/global/as_num)
 | 
			
		||||
# global_as_num: "64512"
 | 
			
		||||
 | 
			
		||||
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
 | 
			
		||||
# to be true.  All other cases, false.
 | 
			
		||||
# calico_no_global_as_num: false
 | 
			
		||||
 | 
			
		||||
# You can set MTU value here. If left undefined or empty, it will
 | 
			
		||||
# not be specified in calico CNI config, so Calico will use built-in
 | 
			
		||||
# defaults. The value should be a number, not a string.
 | 
			
		||||
# calico_mtu: 1500
 | 
			
		||||
 | 
			
		||||
# Configure the MTU to use for workload interfaces and tunnels.
 | 
			
		||||
# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440)
 | 
			
		||||
# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450)
 | 
			
		||||
# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480)
 | 
			
		||||
# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500)
 | 
			
		||||
# calico_veth_mtu: 1440
 | 
			
		||||
 | 
			
		||||
# Advertise Cluster IPs
 | 
			
		||||
# calico_advertise_cluster_ips: true
 | 
			
		||||
 | 
			
		||||
# Advertise Service External IPs
 | 
			
		||||
# calico_advertise_service_external_ips:
 | 
			
		||||
# - x.x.x.x/24
 | 
			
		||||
# - y.y.y.y/32
 | 
			
		||||
 | 
			
		||||
# Adveritse Service LoadBalancer IPs
 | 
			
		||||
# calico_advertise_service_loadbalancer_ips:
 | 
			
		||||
# - x.x.x.x/24
 | 
			
		||||
# - y.y.y.y/16
 | 
			
		||||
 | 
			
		||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
 | 
			
		||||
# calico_datastore: "kdd"
 | 
			
		||||
 | 
			
		||||
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
 | 
			
		||||
# calico_iptables_backend: "Legacy"
 | 
			
		||||
 | 
			
		||||
# Use typha (only with kdd)
 | 
			
		||||
# typha_enabled: false
 | 
			
		||||
 | 
			
		||||
# Generate TLS certs for secure typha<->calico-node communication
 | 
			
		||||
# typha_secure: false
 | 
			
		||||
 | 
			
		||||
# Scaling typha: 1 replica per 100 nodes is adequate
 | 
			
		||||
# Number of typha replicas
 | 
			
		||||
# typha_replicas: 1
 | 
			
		||||
 | 
			
		||||
# Set max typha connections
 | 
			
		||||
# typha_max_connections_lower_limit: 300
 | 
			
		||||
 | 
			
		||||
# Set calico network backend: "bird", "vxlan" or "none"
 | 
			
		||||
# bird enable BGP routing, required for ipip mode.
 | 
			
		||||
# calico_network_backend: bird
 | 
			
		||||
 | 
			
		||||
# IP in IP and VXLAN is mutualy exclusive modes.
 | 
			
		||||
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
 | 
			
		||||
# calico_ipip_mode: 'Always'
 | 
			
		||||
 | 
			
		||||
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
 | 
			
		||||
# calico_vxlan_mode: 'Never'
 | 
			
		||||
 | 
			
		||||
# set VXLAN port and VNI
 | 
			
		||||
# calico_vxlan_vni: 4096
 | 
			
		||||
# calico_vxlan_port: 4789
 | 
			
		||||
 | 
			
		||||
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
 | 
			
		||||
# * can-reach=DESTINATION
 | 
			
		||||
# * interface=INTERFACE-REGEX
 | 
			
		||||
# see https://docs.projectcalico.org/reference/node/configuration
 | 
			
		||||
# calico_ip_auto_method: "interface=eth.*"
 | 
			
		||||
# Choose the iptables insert mode for Calico: "Insert" or "Append".
 | 
			
		||||
# calico_felix_chaininsertmode: Insert
 | 
			
		||||
 | 
			
		||||
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
 | 
			
		||||
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
 | 
			
		||||
# calico_use_default_route_src_ipaddr: false
 | 
			
		||||
 | 
			
		||||
# Enable calico traffic encryption with wireguard
 | 
			
		||||
# calico_wireguard_enabled: false
 | 
			
		||||
 | 
			
		||||
# Under certain situations liveness and readiness probes may need tunning
 | 
			
		||||
# calico_node_livenessprobe_timeout: 10
 | 
			
		||||
# calico_node_readinessprobe_timeout: 10
 | 
			
		||||
@@ -0,0 +1,10 @@
 | 
			
		||||
# see roles/network_plugin/canal/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# The interface used by canal for host <-> host communication.
 | 
			
		||||
# If left blank, then the interface is choosing using the node's
 | 
			
		||||
# default route.
 | 
			
		||||
# canal_iface: ""
 | 
			
		||||
 | 
			
		||||
# Whether or not to masquerade traffic to destinations not within
 | 
			
		||||
# the pod network.
 | 
			
		||||
# canal_masquerade: "true"
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
# see roles/network_plugin/cilium/defaults/main.yml
 | 
			
		||||
@@ -0,0 +1,18 @@
 | 
			
		||||
# see roles/network_plugin/flannel/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
## interface that should be used for flannel operations
 | 
			
		||||
## This is actually an inventory cluster-level item
 | 
			
		||||
# flannel_interface:
 | 
			
		||||
 | 
			
		||||
## Select interface that should be used for flannel operations by regexp on Name or IP
 | 
			
		||||
## This is actually an inventory cluster-level item
 | 
			
		||||
## example: select interface with ip from net 10.0.0.0/23
 | 
			
		||||
## single quote and escape backslashes
 | 
			
		||||
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
 | 
			
		||||
 | 
			
		||||
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
 | 
			
		||||
# for experimental backend
 | 
			
		||||
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
 | 
			
		||||
# flannel_backend_type: "vxlan"
 | 
			
		||||
# flannel_vxlan_vni: 1
 | 
			
		||||
# flannel_vxlan_port: 8472
 | 
			
		||||
@@ -0,0 +1,61 @@
 | 
			
		||||
# See roles/network_plugin/kube-router//defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
 | 
			
		||||
# kube_router_run_router: true
 | 
			
		||||
 | 
			
		||||
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
 | 
			
		||||
# kube_router_run_firewall: true
 | 
			
		||||
 | 
			
		||||
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
 | 
			
		||||
# see docs/kube-router.md "Caveats" section
 | 
			
		||||
# kube_router_run_service_proxy: false
 | 
			
		||||
 | 
			
		||||
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
 | 
			
		||||
# kube_router_advertise_cluster_ip: false
 | 
			
		||||
 | 
			
		||||
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
 | 
			
		||||
# kube_router_advertise_external_ip: false
 | 
			
		||||
 | 
			
		||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
 | 
			
		||||
# kube_router_advertise_loadbalancer_ip: false
 | 
			
		||||
 | 
			
		||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
 | 
			
		||||
# kube_router_enable_dsr: false
 | 
			
		||||
 | 
			
		||||
# Array of arbitrary extra arguments to kube-router, see
 | 
			
		||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
 | 
			
		||||
# kube_router_extra_args: []
 | 
			
		||||
 | 
			
		||||
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
 | 
			
		||||
# kube_router_peer_router_asns: ~
 | 
			
		||||
 | 
			
		||||
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
 | 
			
		||||
# kube_router_peer_router_ips: ~
 | 
			
		||||
 | 
			
		||||
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
 | 
			
		||||
# kube_router_peer_router_ports: ~
 | 
			
		||||
 | 
			
		||||
# Setups node CNI to allow hairpin mode, requires node reboots, see
 | 
			
		||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
 | 
			
		||||
# kube_router_support_hairpin_mode: false
 | 
			
		||||
 | 
			
		||||
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
 | 
			
		||||
# kube_router_dns_policy: ClusterFirstWithHostNet
 | 
			
		||||
 | 
			
		||||
# Array of annotations for master
 | 
			
		||||
# kube_router_annotations_master: []
 | 
			
		||||
 | 
			
		||||
# Array of annotations for every node
 | 
			
		||||
# kube_router_annotations_node: []
 | 
			
		||||
 | 
			
		||||
# Array of common annotations for every node
 | 
			
		||||
# kube_router_annotations_all: []
 | 
			
		||||
 | 
			
		||||
# Enables scraping kube-router metrics with Prometheus
 | 
			
		||||
# kube_router_enable_metrics: false
 | 
			
		||||
 | 
			
		||||
# Path to serve Prometheus metrics on
 | 
			
		||||
# kube_router_metrics_path: /metrics
 | 
			
		||||
 | 
			
		||||
# Prometheus metrics port to use
 | 
			
		||||
# kube_router_metrics_port: 9255
 | 
			
		||||
@@ -0,0 +1,6 @@
 | 
			
		||||
---
 | 
			
		||||
# private interface, on a l2-network
 | 
			
		||||
macvlan_interface: "eth1"
 | 
			
		||||
 | 
			
		||||
# Enable nat in default gateway network interface
 | 
			
		||||
enable_nat_default_gateway: true
 | 
			
		||||
@@ -0,0 +1,61 @@
 | 
			
		||||
# see roles/network_plugin/weave/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# Weave's network password for encryption, if null then no network encryption.
 | 
			
		||||
# weave_password: ~
 | 
			
		||||
 | 
			
		||||
# If set to 1, disable checking for new Weave Net versions (default is blank,
 | 
			
		||||
# i.e. check is enabled)
 | 
			
		||||
# weave_checkpoint_disable: false
 | 
			
		||||
 | 
			
		||||
# Soft limit on the number of connections between peers. Defaults to 100.
 | 
			
		||||
# weave_conn_limit: 100
 | 
			
		||||
 | 
			
		||||
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
 | 
			
		||||
# for containers attached. If you need to disable hairpin, e.g. your kernel is
 | 
			
		||||
# one of those that can panic if hairpin is enabled, then you can disable it by
 | 
			
		||||
# setting `HAIRPIN_MODE=false`.
 | 
			
		||||
# weave_hairpin_mode: true
 | 
			
		||||
 | 
			
		||||
# The range of IP addresses used by Weave Net and the subnet they are placed in
 | 
			
		||||
# (CIDR format; default 10.32.0.0/12)
 | 
			
		||||
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
 | 
			
		||||
 | 
			
		||||
# Set to 0 to disable Network Policy Controller (default is on)
 | 
			
		||||
# weave_expect_npc: "{{ enable_network_policy }}"
 | 
			
		||||
 | 
			
		||||
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
 | 
			
		||||
# list from the api-server)
 | 
			
		||||
# weave_kube_peers: ~
 | 
			
		||||
 | 
			
		||||
# Set the initialization mode of the IP Address Manager (defaults to consensus
 | 
			
		||||
# amongst the KUBE_PEERS)
 | 
			
		||||
# weave_ipalloc_init: ~
 | 
			
		||||
 | 
			
		||||
# Set the IP address used as a gateway from the Weave network to the host
 | 
			
		||||
# network - this is useful if you are configuring the addon as a static pod.
 | 
			
		||||
# weave_expose_ip: ~
 | 
			
		||||
 | 
			
		||||
# Address and port that the Weave Net daemon will serve Prometheus-style
 | 
			
		||||
# metrics on (defaults to 0.0.0.0:6782)
 | 
			
		||||
# weave_metrics_addr: ~
 | 
			
		||||
 | 
			
		||||
# Address and port that the Weave Net daemon will serve status requests on
 | 
			
		||||
# (defaults to disabled)
 | 
			
		||||
# weave_status_addr: ~
 | 
			
		||||
 | 
			
		||||
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
 | 
			
		||||
# underlying network has a tighter limit, or set a larger size for better
 | 
			
		||||
# performance if your network supports jumbo frames (e.g. 8916)
 | 
			
		||||
# weave_mtu: 1376
 | 
			
		||||
 | 
			
		||||
# Set to 1 to preserve the client source IP address when accessing Service
 | 
			
		||||
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
 | 
			
		||||
# only with Weave IPAM (default).
 | 
			
		||||
# weave_no_masq_local: true
 | 
			
		||||
 | 
			
		||||
# set to nft to use nftables backend for iptables (default is iptables)
 | 
			
		||||
# weave_iptables_backend: iptables
 | 
			
		||||
 | 
			
		||||
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
 | 
			
		||||
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
 | 
			
		||||
# weave_extra_args: ~
 | 
			
		||||
							
								
								
									
										40
									
								
								kubespray_inventory/edge-1/hosts.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								kubespray_inventory/edge-1/hosts.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
all:
 | 
			
		||||
  vars:
 | 
			
		||||
    ansible_user: ubuntu
 | 
			
		||||
    ansible_become: true
 | 
			
		||||
  hosts:
 | 
			
		||||
    edge-cluster-1-master-1:
 | 
			
		||||
      ansible_host: edge-cluster-1-master-1
 | 
			
		||||
      ip: 192.168.111.100
 | 
			
		||||
      access_ip: 192.168.111.100
 | 
			
		||||
    
 | 
			
		||||
    edge-cluster-1-worker-1:
 | 
			
		||||
      ansible_host: edge-cluster-1-worker-1
 | 
			
		||||
      ip: 192.168.111.101
 | 
			
		||||
      access_ip: 192.168.111.101
 | 
			
		||||
    
 | 
			
		||||
    edge-cluster-1-worker-2:
 | 
			
		||||
      ansible_host: edge-cluster-1-worker-2
 | 
			
		||||
      ip: 192.168.111.102
 | 
			
		||||
      access_ip: 192.168.111.102
 | 
			
		||||
 | 
			
		||||
  children:
 | 
			
		||||
    kube_control_plane:
 | 
			
		||||
      hosts:
 | 
			
		||||
        edge-cluster-1-master-1:
 | 
			
		||||
    kube_node:
 | 
			
		||||
      hosts:
 | 
			
		||||
        edge-cluster-1-master-1:
 | 
			
		||||
        edge-cluster-1-worker-1:
 | 
			
		||||
        edge-cluster-1-worker-2:
 | 
			
		||||
    etcd:
 | 
			
		||||
      hosts:
 | 
			
		||||
        edge-cluster-1-master-1:
 | 
			
		||||
        edge-cluster-1-worker-1:
 | 
			
		||||
        edge-cluster-1-worker-2:
 | 
			
		||||
    k8s_cluster:
 | 
			
		||||
      children:
 | 
			
		||||
        kube_control_plane:
 | 
			
		||||
        kube_node:
 | 
			
		||||
    calico_rr:
 | 
			
		||||
      hosts: {}
 | 
			
		||||
							
								
								
									
										118
									
								
								kubespray_inventory/edge-2/group_vars/all/all.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								kubespray_inventory/edge-2/group_vars/all/all.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,118 @@
 | 
			
		||||
---
 | 
			
		||||
## Directory where etcd data stored
 | 
			
		||||
etcd_data_dir: /var/lib/etcd
 | 
			
		||||
 | 
			
		||||
## Experimental kubeadm etcd deployment mode. Available only for new deployment
 | 
			
		||||
etcd_kubeadm_enabled: false
 | 
			
		||||
 | 
			
		||||
## Directory where the binaries will be installed
 | 
			
		||||
bin_dir: /usr/local/bin
 | 
			
		||||
 | 
			
		||||
## The access_ip variable is used to define how other nodes should access
 | 
			
		||||
## the node.  This is used in flannel to allow other flannel nodes to see
 | 
			
		||||
## this node for example.  The access_ip is really useful AWS and Google
 | 
			
		||||
## environments where the nodes are accessed remotely by the "public" ip,
 | 
			
		||||
## but don't know about that address themselves.
 | 
			
		||||
# access_ip: 1.1.1.1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## External LB example config
 | 
			
		||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
 | 
			
		||||
# loadbalancer_apiserver:
 | 
			
		||||
#   address: 1.2.3.4
 | 
			
		||||
#   port: 1234
 | 
			
		||||
 | 
			
		||||
## Internal loadbalancers for apiservers
 | 
			
		||||
# loadbalancer_apiserver_localhost: true
 | 
			
		||||
# valid options are "nginx" or "haproxy"
 | 
			
		||||
# loadbalancer_apiserver_type: nginx  # valid values "nginx" or "haproxy"
 | 
			
		||||
 | 
			
		||||
## If the cilium is going to be used in strict mode, we can use the
 | 
			
		||||
## localhost connection and not use the external LB. If this parameter is
 | 
			
		||||
## not specified, the first node to connect to kubeapi will be used.
 | 
			
		||||
# use_localhost_as_kubeapi_loadbalancer: true
 | 
			
		||||
 | 
			
		||||
## Local loadbalancer should use this port
 | 
			
		||||
## And must be set port 6443
 | 
			
		||||
loadbalancer_apiserver_port: 6443
 | 
			
		||||
 | 
			
		||||
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
 | 
			
		||||
loadbalancer_apiserver_healthcheck_port: 8081
 | 
			
		||||
 | 
			
		||||
### OTHER OPTIONAL VARIABLES
 | 
			
		||||
 | 
			
		||||
## Upstream dns servers
 | 
			
		||||
# upstream_dns_servers:
 | 
			
		||||
#   - 8.8.8.8
 | 
			
		||||
#   - 8.8.4.4
 | 
			
		||||
 | 
			
		||||
## There are some changes specific to the cloud providers
 | 
			
		||||
## for instance we need to encapsulate packets with some network plugins
 | 
			
		||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
 | 
			
		||||
## When openstack is used make sure to source in the openstack credentials
 | 
			
		||||
## like you would do when using openstack-client before starting the playbook.
 | 
			
		||||
# cloud_provider:
 | 
			
		||||
 | 
			
		||||
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
 | 
			
		||||
## Supported cloud controllers are: 'openstack' and 'vsphere'
 | 
			
		||||
## When openstack or vsphere are used make sure to source in the required fields
 | 
			
		||||
# external_cloud_provider:
 | 
			
		||||
 | 
			
		||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
 | 
			
		||||
# http_proxy: ""
 | 
			
		||||
# https_proxy: ""
 | 
			
		||||
 | 
			
		||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
 | 
			
		||||
# no_proxy: ""
 | 
			
		||||
 | 
			
		||||
## Some problems may occur when downloading files over https proxy due to ansible bug
 | 
			
		||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
 | 
			
		||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
 | 
			
		||||
# download_validate_certs: False
 | 
			
		||||
 | 
			
		||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
 | 
			
		||||
# additional_no_proxy: ""
 | 
			
		||||
 | 
			
		||||
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
 | 
			
		||||
## skip_http_proxy_on_os_packages to true
 | 
			
		||||
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
 | 
			
		||||
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
 | 
			
		||||
# skip_http_proxy_on_os_packages: false
 | 
			
		||||
 | 
			
		||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
 | 
			
		||||
## pods will restart) when adding or removing workers.  To override this behaviour by only including master nodes in the
 | 
			
		||||
## no_proxy variable, set below to true:
 | 
			
		||||
no_proxy_exclude_workers: false
 | 
			
		||||
 | 
			
		||||
## Certificate Management
 | 
			
		||||
## This setting determines whether certs are generated via scripts.
 | 
			
		||||
## Chose 'none' if you provide your own certificates.
 | 
			
		||||
## Option is  "script", "none"
 | 
			
		||||
# cert_management: script
 | 
			
		||||
 | 
			
		||||
## Set to true to allow pre-checks to fail and continue deployment
 | 
			
		||||
# ignore_assert_errors: false
 | 
			
		||||
 | 
			
		||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
 | 
			
		||||
# kube_read_only_port: 10255
 | 
			
		||||
 | 
			
		||||
## Set true to download and cache container
 | 
			
		||||
# download_container: true
 | 
			
		||||
 | 
			
		||||
## Deploy container engine
 | 
			
		||||
# Set false if you want to deploy container engine manually.
 | 
			
		||||
# deploy_container_engine: true
 | 
			
		||||
 | 
			
		||||
## Red Hat Enterprise Linux subscription registration
 | 
			
		||||
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
 | 
			
		||||
## Update RHEL subscription purpose usage, role and SLA if necessary
 | 
			
		||||
# rh_subscription_username: ""
 | 
			
		||||
# rh_subscription_password: ""
 | 
			
		||||
# rh_subscription_org_id: ""
 | 
			
		||||
# rh_subscription_activation_key: ""
 | 
			
		||||
# rh_subscription_usage: "Development"
 | 
			
		||||
# rh_subscription_role: "Red Hat Enterprise Server"
 | 
			
		||||
# rh_subscription_sla: "Self-Support"
 | 
			
		||||
 | 
			
		||||
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
 | 
			
		||||
# ping_access_ip: true
 | 
			
		||||
							
								
								
									
										9
									
								
								kubespray_inventory/edge-2/group_vars/all/aws.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								kubespray_inventory/edge-2/group_vars/all/aws.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,9 @@
 | 
			
		||||
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
 | 
			
		||||
## and configure the parameters below
 | 
			
		||||
# aws_ebs_csi_enabled: true
 | 
			
		||||
# aws_ebs_csi_enable_volume_scheduling: true
 | 
			
		||||
# aws_ebs_csi_enable_volume_snapshot: false
 | 
			
		||||
# aws_ebs_csi_enable_volume_resizing: false
 | 
			
		||||
# aws_ebs_csi_controller_replicas: 1
 | 
			
		||||
# aws_ebs_csi_plugin_image_tag: latest
 | 
			
		||||
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
 | 
			
		||||
							
								
								
									
										40
									
								
								kubespray_inventory/edge-2/group_vars/all/azure.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								kubespray_inventory/edge-2/group_vars/all/azure.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
## When azure is used, you need to also set the following variables.
 | 
			
		||||
## see docs/azure.md for details on how to get these values
 | 
			
		||||
 | 
			
		||||
# azure_cloud:
 | 
			
		||||
# azure_tenant_id:
 | 
			
		||||
# azure_subscription_id:
 | 
			
		||||
# azure_aad_client_id:
 | 
			
		||||
# azure_aad_client_secret:
 | 
			
		||||
# azure_resource_group:
 | 
			
		||||
# azure_location:
 | 
			
		||||
# azure_subnet_name:
 | 
			
		||||
# azure_security_group_name:
 | 
			
		||||
# azure_security_group_resource_group:
 | 
			
		||||
# azure_vnet_name:
 | 
			
		||||
# azure_vnet_resource_group:
 | 
			
		||||
# azure_route_table_name:
 | 
			
		||||
# azure_route_table_resource_group:
 | 
			
		||||
# supported values are 'standard' or 'vmss'
 | 
			
		||||
# azure_vmtype: standard
 | 
			
		||||
 | 
			
		||||
## Azure Disk CSI credentials and parameters
 | 
			
		||||
## see docs/azure-csi.md for details on how to get these values
 | 
			
		||||
 | 
			
		||||
# azure_csi_tenant_id:
 | 
			
		||||
# azure_csi_subscription_id:
 | 
			
		||||
# azure_csi_aad_client_id:
 | 
			
		||||
# azure_csi_aad_client_secret:
 | 
			
		||||
# azure_csi_location:
 | 
			
		||||
# azure_csi_resource_group:
 | 
			
		||||
# azure_csi_vnet_name:
 | 
			
		||||
# azure_csi_vnet_resource_group:
 | 
			
		||||
# azure_csi_subnet_name:
 | 
			
		||||
# azure_csi_security_group_name:
 | 
			
		||||
# azure_csi_use_instance_metadata:
 | 
			
		||||
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
 | 
			
		||||
 | 
			
		||||
## To enable Azure Disk CSI, uncomment below
 | 
			
		||||
# azure_csi_enabled: true
 | 
			
		||||
# azure_csi_controller_replicas: 1
 | 
			
		||||
# azure_csi_plugin_image_tag: latest
 | 
			
		||||
							
								
								
									
										44
									
								
								kubespray_inventory/edge-2/group_vars/all/containerd.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								kubespray_inventory/edge-2/group_vars/all/containerd.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,44 @@
 | 
			
		||||
---
 | 
			
		||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
 | 
			
		||||
 | 
			
		||||
# containerd_storage_dir: "/var/lib/containerd"
 | 
			
		||||
# containerd_state_dir: "/run/containerd"
 | 
			
		||||
# containerd_oom_score: 0
 | 
			
		||||
 | 
			
		||||
# containerd_default_runtime: "runc"
 | 
			
		||||
# containerd_snapshotter: "native"
 | 
			
		||||
 | 
			
		||||
# containerd_runtimes:
 | 
			
		||||
#   - name: runc
 | 
			
		||||
#     type: "io.containerd.runc.v2"
 | 
			
		||||
#     engine: ""
 | 
			
		||||
#     root: ""
 | 
			
		||||
# Example for Kata Containers as additional runtime:
 | 
			
		||||
#   - name: kata
 | 
			
		||||
#     type: "io.containerd.kata.v2"
 | 
			
		||||
#     engine: ""
 | 
			
		||||
#     root: ""
 | 
			
		||||
 | 
			
		||||
# containerd_grpc_max_recv_message_size: 16777216
 | 
			
		||||
# containerd_grpc_max_send_message_size: 16777216
 | 
			
		||||
 | 
			
		||||
# containerd_debug_level: "info"
 | 
			
		||||
 | 
			
		||||
# containerd_metrics_address: ""
 | 
			
		||||
 | 
			
		||||
# containerd_metrics_grpc_histogram: false
 | 
			
		||||
 | 
			
		||||
# containerd_registries:
 | 
			
		||||
#   "docker.io": "https://registry-1.docker.io"
 | 
			
		||||
 | 
			
		||||
# containerd_max_container_log_line_size: -1
 | 
			
		||||
 | 
			
		||||
#containerd_registry_auth:
 | 
			
		||||
#  - registry: 
 | 
			
		||||
#    username: user
 | 
			
		||||
#    password: pass
 | 
			
		||||
 | 
			
		||||
containerd_registries:
 | 
			
		||||
  "docker.io":
 | 
			
		||||
    - "https://mirror.gcr.io"
 | 
			
		||||
    - "https://registry-1.docker.io"
 | 
			
		||||
							
								
								
									
										2
									
								
								kubespray_inventory/edge-2/group_vars/all/coreos.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								kubespray_inventory/edge-2/group_vars/all/coreos.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
## Does coreos need auto upgrade, default is true
 | 
			
		||||
# coreos_auto_upgrade: true
 | 
			
		||||
							
								
								
									
										6
									
								
								kubespray_inventory/edge-2/group_vars/all/cri-o.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								kubespray_inventory/edge-2/group_vars/all/cri-o.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
# crio_insecure_registries:
 | 
			
		||||
#   - 10.0.0.2:5000
 | 
			
		||||
# crio_registry_auth:
 | 
			
		||||
#   - registry: 10.0.0.2:5000
 | 
			
		||||
#     username: user
 | 
			
		||||
#     password: pass
 | 
			
		||||
							
								
								
									
										59
									
								
								kubespray_inventory/edge-2/group_vars/all/docker.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								kubespray_inventory/edge-2/group_vars/all/docker.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
---
 | 
			
		||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
 | 
			
		||||
## Please note that overlay2 is only supported on newer kernels
 | 
			
		||||
# docker_storage_options: -s overlay2
 | 
			
		||||
 | 
			
		||||
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
 | 
			
		||||
docker_container_storage_setup: false
 | 
			
		||||
 | 
			
		||||
## It must be define a disk path for docker_container_storage_setup_devs.
 | 
			
		||||
## Otherwise docker-storage-setup will be executed incorrectly.
 | 
			
		||||
# docker_container_storage_setup_devs: /dev/vdb
 | 
			
		||||
 | 
			
		||||
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
 | 
			
		||||
## Valid options are systemd or cgroupfs, default is systemd
 | 
			
		||||
# docker_cgroup_driver: systemd
 | 
			
		||||
 | 
			
		||||
## Only set this if you have more than 3 nameservers:
 | 
			
		||||
## If true Kubespray will only use the first 3, otherwise it will fail
 | 
			
		||||
docker_dns_servers_strict: false
 | 
			
		||||
 | 
			
		||||
# Path used to store Docker data
 | 
			
		||||
docker_daemon_graph: "/var/lib/docker"
 | 
			
		||||
 | 
			
		||||
## Used to set docker daemon iptables options to true
 | 
			
		||||
docker_iptables_enabled: "false"
 | 
			
		||||
 | 
			
		||||
# Docker log options
 | 
			
		||||
# Rotate container stderr/stdout logs at 50m and keep last 5
 | 
			
		||||
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
 | 
			
		||||
 | 
			
		||||
# define docker bin_dir
 | 
			
		||||
docker_bin_dir: "/usr/bin"
 | 
			
		||||
 | 
			
		||||
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
 | 
			
		||||
# kubespray deletes the docker package on each run, so caching the package makes sense
 | 
			
		||||
docker_rpm_keepcache: 1
 | 
			
		||||
 | 
			
		||||
## An obvious use case is allowing insecure-registry access to self hosted registries.
 | 
			
		||||
## Can be ipaddress and domain_name.
 | 
			
		||||
## example define 172.19.16.11 or mirror.registry.io
 | 
			
		||||
# docker_insecure_registries:
 | 
			
		||||
#   - mirror.registry.io
 | 
			
		||||
#   - 172.19.16.11
 | 
			
		||||
 | 
			
		||||
## Add other registry,example China registry mirror.
 | 
			
		||||
# docker_registry_mirrors:
 | 
			
		||||
#   - https://registry.docker-cn.com
 | 
			
		||||
#   - https://mirror.aliyuncs.com
 | 
			
		||||
 | 
			
		||||
## If non-empty will override default system MountFlags value.
 | 
			
		||||
## This option takes a mount propagation flag: shared, slave
 | 
			
		||||
## or private, which control whether mounts in the file system
 | 
			
		||||
## namespace set up for docker will receive or propagate mounts
 | 
			
		||||
## and unmounts. Leave empty for system default
 | 
			
		||||
# docker_mount_flags:
 | 
			
		||||
 | 
			
		||||
## A string of extra options to pass to the docker daemon.
 | 
			
		||||
## This string should be exactly as you wish it to appear.
 | 
			
		||||
# docker_options: ""
 | 
			
		||||
							
								
								
									
										10
									
								
								kubespray_inventory/edge-2/group_vars/all/gcp.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								kubespray_inventory/edge-2/group_vars/all/gcp.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
## GCP compute Persistent Disk CSI Driver credentials and parameters
 | 
			
		||||
## See docs/gcp-pd-csi.md for information about the implementation
 | 
			
		||||
 | 
			
		||||
## Specify the path to the file containing the service account credentials
 | 
			
		||||
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
 | 
			
		||||
 | 
			
		||||
## To enable GCP Persistent Disk CSI driver, uncomment below
 | 
			
		||||
# gcp_pd_csi_enabled: true
 | 
			
		||||
# gcp_pd_csi_controller_replicas: 1
 | 
			
		||||
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
 | 
			
		||||
							
								
								
									
										28
									
								
								kubespray_inventory/edge-2/group_vars/all/oci.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								kubespray_inventory/edge-2/group_vars/all/oci.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,28 @@
 | 
			
		||||
## When Oracle Cloud Infrastructure is used, set these variables
 | 
			
		||||
# oci_private_key:
 | 
			
		||||
# oci_region_id:
 | 
			
		||||
# oci_tenancy_id:
 | 
			
		||||
# oci_user_id:
 | 
			
		||||
# oci_user_fingerprint:
 | 
			
		||||
# oci_compartment_id:
 | 
			
		||||
# oci_vnc_id:
 | 
			
		||||
# oci_subnet1_id:
 | 
			
		||||
# oci_subnet2_id:
 | 
			
		||||
## Override these default/optional behaviors if you wish
 | 
			
		||||
# oci_security_list_management: All
 | 
			
		||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
 | 
			
		||||
# oci_security_lists:
 | 
			
		||||
#   ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
 | 
			
		||||
#   ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
 | 
			
		||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
 | 
			
		||||
# oci_use_instance_principals: false
 | 
			
		||||
# oci_cloud_controller_version: 0.6.0
 | 
			
		||||
## If you would like to control OCI query rate limits for the controller
 | 
			
		||||
# oci_rate_limit:
 | 
			
		||||
#   rate_limit_qps_read:
 | 
			
		||||
#   rate_limit_qps_write:
 | 
			
		||||
#   rate_limit_bucket_read:
 | 
			
		||||
#   rate_limit_bucket_write:
 | 
			
		||||
## Other optional variables
 | 
			
		||||
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
 | 
			
		||||
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
 | 
			
		||||
							
								
								
									
										84
									
								
								kubespray_inventory/edge-2/group_vars/all/offline.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								kubespray_inventory/edge-2/group_vars/all/offline.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,84 @@
 | 
			
		||||
---
 | 
			
		||||
## Global Offline settings
 | 
			
		||||
### Private Container Image Registry
 | 
			
		||||
# registry_host: "myprivateregisry.com"
 | 
			
		||||
# files_repo: "http://myprivatehttpd"
 | 
			
		||||
### If using CentOS, RedHat, AlmaLinux or Fedora
 | 
			
		||||
# yum_repo: "http://myinternalyumrepo"
 | 
			
		||||
### If using Debian
 | 
			
		||||
# debian_repo: "http://myinternaldebianrepo"
 | 
			
		||||
### If using Ubuntu
 | 
			
		||||
# ubuntu_repo: "http://myinternalubunturepo"
 | 
			
		||||
 | 
			
		||||
## Container Registry overrides
 | 
			
		||||
# kube_image_repo: "{{ registry_host }}"
 | 
			
		||||
# gcr_image_repo: "{{ registry_host }}"
 | 
			
		||||
# github_image_repo: "{{ registry_host }}"
 | 
			
		||||
# docker_image_repo: "{{ registry_host }}"
 | 
			
		||||
# quay_image_repo: "{{ registry_host }}"
 | 
			
		||||
 | 
			
		||||
## Kubernetes components
 | 
			
		||||
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
 | 
			
		||||
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
 | 
			
		||||
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
 | 
			
		||||
 | 
			
		||||
## CNI Plugins
 | 
			
		||||
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
 | 
			
		||||
 | 
			
		||||
## cri-tools
 | 
			
		||||
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
 | 
			
		||||
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] Calico: If using Calico network plugin
 | 
			
		||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
 | 
			
		||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
 | 
			
		||||
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] helm: only if you set helm_enabled: true
 | 
			
		||||
# helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
 | 
			
		||||
 | 
			
		||||
# [Optional] crun: only if you set crun_enabled: true
 | 
			
		||||
# crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
 | 
			
		||||
 | 
			
		||||
# [Optional] kata: only if you set kata_containers_enabled: true
 | 
			
		||||
# kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
 | 
			
		||||
 | 
			
		||||
# [Optional] cri-o: only if you set container_manager: crio
 | 
			
		||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
 | 
			
		||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
 | 
			
		||||
 | 
			
		||||
## CentOS/Redhat/AlmaLinux
 | 
			
		||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
 | 
			
		||||
### By default we enable those repo automatically
 | 
			
		||||
# rhel_enable_repos: false
 | 
			
		||||
### Docker / Containerd
 | 
			
		||||
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
 | 
			
		||||
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
 | 
			
		||||
## Fedora
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
 | 
			
		||||
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
 | 
			
		||||
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
 | 
			
		||||
 | 
			
		||||
## Debian
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
 | 
			
		||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
 | 
			
		||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
 | 
			
		||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
 | 
			
		||||
 | 
			
		||||
## Ubuntu
 | 
			
		||||
### Docker
 | 
			
		||||
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
 | 
			
		||||
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
 | 
			
		||||
### Containerd
 | 
			
		||||
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
 | 
			
		||||
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
 | 
			
		||||
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
 | 
			
		||||
							
								
								
									
										49
									
								
								kubespray_inventory/edge-2/group_vars/all/openstack.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								kubespray_inventory/edge-2/group_vars/all/openstack.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,49 @@
 | 
			
		||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
 | 
			
		||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
 | 
			
		||||
# openstack_blockstorage_ignore_volume_az: yes
 | 
			
		||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
 | 
			
		||||
# openstack_lbaas_enabled: True
 | 
			
		||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
 | 
			
		||||
## To enable automatic floating ip provisioning, specify a subnet.
 | 
			
		||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
 | 
			
		||||
## Override default LBaaS behavior
 | 
			
		||||
# openstack_lbaas_use_octavia: False
 | 
			
		||||
# openstack_lbaas_method: "ROUND_ROBIN"
 | 
			
		||||
# openstack_lbaas_provider: "haproxy"
 | 
			
		||||
# openstack_lbaas_create_monitor: "yes"
 | 
			
		||||
# openstack_lbaas_monitor_delay: "1m"
 | 
			
		||||
# openstack_lbaas_monitor_timeout: "30s"
 | 
			
		||||
# openstack_lbaas_monitor_max_retries: "3"
 | 
			
		||||
 | 
			
		||||
## Values for the external OpenStack Cloud Controller
 | 
			
		||||
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
 | 
			
		||||
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
 | 
			
		||||
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
 | 
			
		||||
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
 | 
			
		||||
# external_openstack_lbaas_method: "ROUND_ROBIN"
 | 
			
		||||
# external_openstack_lbaas_provider: "octavia"
 | 
			
		||||
# external_openstack_lbaas_create_monitor: false
 | 
			
		||||
# external_openstack_lbaas_monitor_delay: "1m"
 | 
			
		||||
# external_openstack_lbaas_monitor_timeout: "30s"
 | 
			
		||||
# external_openstack_lbaas_monitor_max_retries: "3"
 | 
			
		||||
# external_openstack_lbaas_manage_security_groups: false
 | 
			
		||||
# external_openstack_lbaas_internal_lb: false
 | 
			
		||||
# external_openstack_network_ipv6_disabled: false
 | 
			
		||||
# external_openstack_network_internal_networks: []
 | 
			
		||||
# external_openstack_network_public_networks: []
 | 
			
		||||
# external_openstack_metadata_search_order: "configDrive,metadataService"
 | 
			
		||||
 | 
			
		||||
## Application credentials to authenticate against Keystone API
 | 
			
		||||
## Those settings will take precedence over username and password that might be set your environment
 | 
			
		||||
## All of them are required
 | 
			
		||||
# external_openstack_application_credential_name:
 | 
			
		||||
# external_openstack_application_credential_id:
 | 
			
		||||
# external_openstack_application_credential_secret:
 | 
			
		||||
 | 
			
		||||
## The tag of the external OpenStack Cloud Controller image
 | 
			
		||||
# external_openstack_cloud_controller_image_tag: "latest"
 | 
			
		||||
 | 
			
		||||
## To use Cinder CSI plugin to provision volumes set this value to true
 | 
			
		||||
## Make sure to source in the openstack credentials
 | 
			
		||||
# cinder_csi_enabled: true
 | 
			
		||||
# cinder_csi_controller_replicas: 1
 | 
			
		||||
							
								
								
									
										32
									
								
								kubespray_inventory/edge-2/group_vars/all/vsphere.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								kubespray_inventory/edge-2/group_vars/all/vsphere.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
## Values for the external vSphere Cloud Provider
 | 
			
		||||
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
 | 
			
		||||
# external_vsphere_vcenter_port: "443"
 | 
			
		||||
# external_vsphere_insecure: "true"
 | 
			
		||||
# external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
 | 
			
		||||
# external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
 | 
			
		||||
# external_vsphere_datacenter: "DATACENTER_name"
 | 
			
		||||
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
 | 
			
		||||
 | 
			
		||||
## Vsphere version where located VMs
 | 
			
		||||
# external_vsphere_version: "6.7u3"
 | 
			
		||||
 | 
			
		||||
## Tags for the external vSphere Cloud Provider images
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/cpi/release/manager
 | 
			
		||||
# external_vsphere_cloud_controller_image_tag: "latest"
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/csi/release/syncer
 | 
			
		||||
# vsphere_syncer_image_tag: "v2.2.1"
 | 
			
		||||
## quay.io/k8scsi/csi-attacher
 | 
			
		||||
# vsphere_csi_attacher_image_tag: "v3.1.0"
 | 
			
		||||
## gcr.io/cloud-provider-vsphere/csi/release/driver
 | 
			
		||||
# vsphere_csi_controller: "v2.2.1"
 | 
			
		||||
## quay.io/k8scsi/livenessprobe
 | 
			
		||||
# vsphere_csi_liveness_probe_image_tag: "v2.2.0"
 | 
			
		||||
## quay.io/k8scsi/csi-provisioner
 | 
			
		||||
# vsphere_csi_provisioner_image_tag: "v2.1.0"
 | 
			
		||||
## quay.io/k8scsi/csi-resizer
 | 
			
		||||
## makes sense only for vSphere version >=7.0
 | 
			
		||||
# vsphere_csi_resizer_tag: "v1.1.0"
 | 
			
		||||
 | 
			
		||||
## To use vSphere CSI plugin to provision volumes set this value to true
 | 
			
		||||
# vsphere_csi_enabled: true
 | 
			
		||||
# vsphere_csi_controller_replicas: 1
 | 
			
		||||
							
								
								
									
										22
									
								
								kubespray_inventory/edge-2/group_vars/etcd.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								kubespray_inventory/edge-2/group_vars/etcd.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
---
 | 
			
		||||
## Etcd auto compaction retention for mvcc key value store in hour
 | 
			
		||||
# etcd_compaction_retention: 0
 | 
			
		||||
 | 
			
		||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
 | 
			
		||||
# etcd_metrics: basic
 | 
			
		||||
 | 
			
		||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
 | 
			
		||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
 | 
			
		||||
# etcd_memory_limit: "512M"
 | 
			
		||||
 | 
			
		||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
 | 
			
		||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
 | 
			
		||||
## etcd documentation for more information.
 | 
			
		||||
# etcd_quota_backend_bytes: "2147483648"
 | 
			
		||||
 | 
			
		||||
### ETCD: disable peer client cert authentication.
 | 
			
		||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
 | 
			
		||||
# etcd_peer_client_auth: true
 | 
			
		||||
 | 
			
		||||
## Settings for etcd deployment type
 | 
			
		||||
etcd_deployment_type: host
 | 
			
		||||
							
								
								
									
										187
									
								
								kubespray_inventory/edge-2/group_vars/k8s_cluster/addons.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										187
									
								
								kubespray_inventory/edge-2/group_vars/k8s_cluster/addons.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,187 @@
 | 
			
		||||
---
 | 
			
		||||
# Kubernetes dashboard
 | 
			
		||||
# RBAC required. see docs/getting-started.md for access details.
 | 
			
		||||
# dashboard_enabled: false
 | 
			
		||||
 | 
			
		||||
# Helm deployment
 | 
			
		||||
helm_enabled: false
 | 
			
		||||
 | 
			
		||||
# Registry deployment
 | 
			
		||||
registry_enabled: false
 | 
			
		||||
# registry_namespace: kube-system
 | 
			
		||||
# registry_storage_class: ""
 | 
			
		||||
# registry_disk_size: "10Gi"
 | 
			
		||||
 | 
			
		||||
# Metrics Server deployment
 | 
			
		||||
metrics_server_enabled: false
 | 
			
		||||
# metrics_server_resizer: false
 | 
			
		||||
# metrics_server_kubelet_insecure_tls: true
 | 
			
		||||
# metrics_server_metric_resolution: 15s
 | 
			
		||||
# metrics_server_kubelet_preferred_address_types: "InternalIP"
 | 
			
		||||
 | 
			
		||||
# Rancher Local Path Provisioner
 | 
			
		||||
local_path_provisioner_enabled: false
 | 
			
		||||
# local_path_provisioner_namespace: "local-path-storage"
 | 
			
		||||
# local_path_provisioner_storage_class: "local-path"
 | 
			
		||||
# local_path_provisioner_reclaim_policy: Delete
 | 
			
		||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
 | 
			
		||||
# local_path_provisioner_debug: false
 | 
			
		||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
 | 
			
		||||
# local_path_provisioner_image_tag: "v0.0.19"
 | 
			
		||||
# local_path_provisioner_helper_image_repo: "busybox"
 | 
			
		||||
# local_path_provisioner_helper_image_tag: "latest"
 | 
			
		||||
 | 
			
		||||
# Local volume provisioner deployment
 | 
			
		||||
local_volume_provisioner_enabled: false
 | 
			
		||||
# local_volume_provisioner_namespace: kube-system
 | 
			
		||||
# local_volume_provisioner_nodelabels:
 | 
			
		||||
#   - kubernetes.io/hostname
 | 
			
		||||
#   - topology.kubernetes.io/region
 | 
			
		||||
#   - topology.kubernetes.io/zone
 | 
			
		||||
# local_volume_provisioner_storage_classes:
 | 
			
		||||
#   local-storage:
 | 
			
		||||
#     host_dir: /mnt/disks
 | 
			
		||||
#     mount_dir: /mnt/disks
 | 
			
		||||
#     volume_mode: Filesystem
 | 
			
		||||
#     fs_type: ext4
 | 
			
		||||
#   fast-disks:
 | 
			
		||||
#     host_dir: /mnt/fast-disks
 | 
			
		||||
#     mount_dir: /mnt/fast-disks
 | 
			
		||||
#     block_cleaner_command:
 | 
			
		||||
#       - "/scripts/shred.sh"
 | 
			
		||||
#       - "2"
 | 
			
		||||
#     volume_mode: Filesystem
 | 
			
		||||
#     fs_type: ext4
 | 
			
		||||
 | 
			
		||||
# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots
 | 
			
		||||
# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller
 | 
			
		||||
# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray
 | 
			
		||||
# csi_snapshot_controller_enabled: false
 | 
			
		||||
 | 
			
		||||
# CephFS provisioner deployment
 | 
			
		||||
cephfs_provisioner_enabled: false
 | 
			
		||||
# cephfs_provisioner_namespace: "cephfs-provisioner"
 | 
			
		||||
# cephfs_provisioner_cluster: ceph
 | 
			
		||||
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
 | 
			
		||||
# cephfs_provisioner_admin_id: admin
 | 
			
		||||
# cephfs_provisioner_secret: secret
 | 
			
		||||
# cephfs_provisioner_storage_class: cephfs
 | 
			
		||||
# cephfs_provisioner_reclaim_policy: Delete
 | 
			
		||||
# cephfs_provisioner_claim_root: /volumes
 | 
			
		||||
# cephfs_provisioner_deterministic_names: true
 | 
			
		||||
 | 
			
		||||
# RBD provisioner deployment
 | 
			
		||||
rbd_provisioner_enabled: false
 | 
			
		||||
# rbd_provisioner_namespace: rbd-provisioner
 | 
			
		||||
# rbd_provisioner_replicas: 2
 | 
			
		||||
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
 | 
			
		||||
# rbd_provisioner_pool: kube
 | 
			
		||||
# rbd_provisioner_admin_id: admin
 | 
			
		||||
# rbd_provisioner_secret_name: ceph-secret-admin
 | 
			
		||||
# rbd_provisioner_secret: ceph-key-admin
 | 
			
		||||
# rbd_provisioner_user_id: kube
 | 
			
		||||
# rbd_provisioner_user_secret_name: ceph-secret-user
 | 
			
		||||
# rbd_provisioner_user_secret: ceph-key-user
 | 
			
		||||
# rbd_provisioner_user_secret_namespace: rbd-provisioner
 | 
			
		||||
# rbd_provisioner_fs_type: ext4
 | 
			
		||||
# rbd_provisioner_image_format: "2"
 | 
			
		||||
# rbd_provisioner_image_features: layering
 | 
			
		||||
# rbd_provisioner_storage_class: rbd
 | 
			
		||||
# rbd_provisioner_reclaim_policy: Delete
 | 
			
		||||
 | 
			
		||||
# Nginx ingress controller deployment
 | 
			
		||||
ingress_nginx_enabled: false
 | 
			
		||||
# ingress_nginx_host_network: false
 | 
			
		||||
ingress_publish_status_address: ""
 | 
			
		||||
# ingress_nginx_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# ingress_nginx_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# ingress_nginx_namespace: "ingress-nginx"
 | 
			
		||||
# ingress_nginx_insecure_port: 80
 | 
			
		||||
# ingress_nginx_secure_port: 443
 | 
			
		||||
# ingress_nginx_configmap:
 | 
			
		||||
#   map-hash-bucket-size: "128"
 | 
			
		||||
#   ssl-protocols: "TLSv1.2 TLSv1.3"
 | 
			
		||||
# ingress_nginx_configmap_tcp_services:
 | 
			
		||||
#   9000: "default/example-go:8080"
 | 
			
		||||
# ingress_nginx_configmap_udp_services:
 | 
			
		||||
#   53: "kube-system/coredns:53"
 | 
			
		||||
# ingress_nginx_extra_args:
 | 
			
		||||
#   - --default-ssl-certificate=default/foo-tls
 | 
			
		||||
# ingress_nginx_class: nginx
 | 
			
		||||
 | 
			
		||||
# ambassador ingress controller deployment
 | 
			
		||||
ingress_ambassador_enabled: false
 | 
			
		||||
# ingress_ambassador_namespace: "ambassador"
 | 
			
		||||
# ingress_ambassador_version: "*"
 | 
			
		||||
# ingress_ambassador_multi_namespaces: false
 | 
			
		||||
 | 
			
		||||
# ALB ingress controller deployment
 | 
			
		||||
ingress_alb_enabled: false
 | 
			
		||||
# alb_ingress_aws_region: "us-east-1"
 | 
			
		||||
# alb_ingress_restrict_scheme: "false"
 | 
			
		||||
# Enables logging on all outbound requests sent to the AWS API.
 | 
			
		||||
# If logging is desired, set to true.
 | 
			
		||||
# alb_ingress_aws_debug: "false"
 | 
			
		||||
 | 
			
		||||
# Cert manager deployment
 | 
			
		||||
cert_manager_enabled: false
 | 
			
		||||
# cert_manager_namespace: "cert-manager"
 | 
			
		||||
 | 
			
		||||
# MetalLB deployment
 | 
			
		||||
metallb_enabled: false
 | 
			
		||||
metallb_speaker_enabled: true
 | 
			
		||||
# metallb_ip_range:
 | 
			
		||||
#   - "10.5.0.50-10.5.0.99"
 | 
			
		||||
# metallb_speaker_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# metallb_controller_nodeselector:
 | 
			
		||||
#   kubernetes.io/os: "linux"
 | 
			
		||||
# metallb_speaker_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# metallb_controller_tolerations:
 | 
			
		||||
#   - key: "node-role.kubernetes.io/master"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
#   - key: "node-role.kubernetes.io/control-plane"
 | 
			
		||||
#     operator: "Equal"
 | 
			
		||||
#     value: ""
 | 
			
		||||
#     effect: "NoSchedule"
 | 
			
		||||
# metallb_version: v0.10.2
 | 
			
		||||
# metallb_protocol: "layer2"
 | 
			
		||||
# metallb_port: "7472"
 | 
			
		||||
# metallb_memberlist_port: "7946"
 | 
			
		||||
# metallb_additional_address_pools:
 | 
			
		||||
#   kube_service_pool:
 | 
			
		||||
#     ip_range:
 | 
			
		||||
#       - "10.5.1.50-10.5.1.99"
 | 
			
		||||
#     protocol: "layer2"
 | 
			
		||||
#     auto_assign: false
 | 
			
		||||
# metallb_protocol: "bgp"
 | 
			
		||||
# metallb_peers:
 | 
			
		||||
#   - peer_address: 192.0.2.1
 | 
			
		||||
#     peer_asn: 64512
 | 
			
		||||
#     my_asn: 4200000000
 | 
			
		||||
#   - peer_address: 192.0.2.2
 | 
			
		||||
#     peer_asn: 64513
 | 
			
		||||
#     my_asn: 4200000000
 | 
			
		||||
 | 
			
		||||
# The plugin manager for kubectl
 | 
			
		||||
krew_enabled: false
 | 
			
		||||
krew_root_dir: "/usr/local/krew"
 | 
			
		||||
@@ -0,0 +1,318 @@
 | 
			
		||||
---
 | 
			
		||||
# Kubernetes configuration dirs and system namespace.
 | 
			
		||||
# Those are where all the additional config stuff goes
 | 
			
		||||
# the kubernetes normally puts in /srv/kubernetes.
 | 
			
		||||
# This puts them in a same location and namespace.
 | 
			
		||||
# Editing those values will almost surely break something.
 | 
			
		||||
kube_config_dir: /etc/kubernetes
 | 
			
		||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
 | 
			
		||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
 | 
			
		||||
 | 
			
		||||
# This is where all the cert scripts and certs will be located
 | 
			
		||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
 | 
			
		||||
 | 
			
		||||
# This is where all of the bearer tokens will be stored
 | 
			
		||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
 | 
			
		||||
 | 
			
		||||
kube_api_anonymous_auth: true
 | 
			
		||||
 | 
			
		||||
## Change this to use another Kubernetes version, e.g. a current beta release
 | 
			
		||||
kube_version: v1.21.6
 | 
			
		||||
 | 
			
		||||
# Where the binaries will be downloaded.
 | 
			
		||||
# Note: ensure that you've enough disk space (about 1G)
 | 
			
		||||
local_release_dir: "/tmp/releases"
 | 
			
		||||
# Random shifts for retrying failed ops like pushing/downloading
 | 
			
		||||
retry_stagger: 5
 | 
			
		||||
 | 
			
		||||
# This is the group that the cert creation scripts chgrp the
 | 
			
		||||
# cert files to. Not really changeable...
 | 
			
		||||
kube_cert_group: kube-cert
 | 
			
		||||
 | 
			
		||||
# Cluster Loglevel configuration
 | 
			
		||||
kube_log_level: 2
 | 
			
		||||
 | 
			
		||||
# Directory where credentials will be stored
 | 
			
		||||
credentials_dir: "{{ inventory_dir }}/credentials"
 | 
			
		||||
 | 
			
		||||
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
 | 
			
		||||
# kube_oidc_auth: false
 | 
			
		||||
# kube_token_auth: false
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
 | 
			
		||||
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
 | 
			
		||||
 | 
			
		||||
# kube_oidc_url: https:// ...
 | 
			
		||||
# kube_oidc_client_id: kubernetes
 | 
			
		||||
## Optional settings for OIDC
 | 
			
		||||
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
 | 
			
		||||
# kube_oidc_username_claim: sub
 | 
			
		||||
# kube_oidc_username_prefix: 'oidc:'
 | 
			
		||||
# kube_oidc_groups_claim: groups
 | 
			
		||||
# kube_oidc_groups_prefix: 'oidc:'
 | 
			
		||||
 | 
			
		||||
## Variables to control webhook authn/authz
 | 
			
		||||
# kube_webhook_token_auth: false
 | 
			
		||||
# kube_webhook_token_auth_url: https://...
 | 
			
		||||
# kube_webhook_token_auth_url_skip_tls_verify: false
 | 
			
		||||
 | 
			
		||||
## For webhook authorization, authorization_modes must include Webhook
 | 
			
		||||
# kube_webhook_authorization: false
 | 
			
		||||
# kube_webhook_authorization_url: https://...
 | 
			
		||||
# kube_webhook_authorization_url_skip_tls_verify: false
 | 
			
		||||
 | 
			
		||||
# Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
 | 
			
		||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
 | 
			
		||||
kube_network_plugin: weave
 | 
			
		||||
 | 
			
		||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
 | 
			
		||||
kube_network_plugin_multus: false
 | 
			
		||||
 | 
			
		||||
# Kubernetes internal network for services, unused block of space.
 | 
			
		||||
kube_service_addresses: 10.233.0.0/18
 | 
			
		||||
 | 
			
		||||
# internal network. When used, it will assign IP
 | 
			
		||||
# addresses from this range to individual pods.
 | 
			
		||||
# This network must be unused in your network infrastructure!
 | 
			
		||||
kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
 | 
			
		||||
# internal network node size allocation (optional). This is the size allocated
 | 
			
		||||
# to each node for pod IP address allocation. Note that the number of pods per node is
 | 
			
		||||
# also limited by the kubelet_max_pods variable which defaults to 110.
 | 
			
		||||
#
 | 
			
		||||
# Example:
 | 
			
		||||
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
 | 
			
		||||
#  - kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
#  - kube_network_node_prefix: 24
 | 
			
		||||
#  - kubelet_max_pods: 110
 | 
			
		||||
#
 | 
			
		||||
# Example:
 | 
			
		||||
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
 | 
			
		||||
#  - kube_pods_subnet: 10.233.64.0/18
 | 
			
		||||
#  - kube_network_node_prefix: 25
 | 
			
		||||
#  - kubelet_max_pods: 110
 | 
			
		||||
kube_network_node_prefix: 24
 | 
			
		||||
 | 
			
		||||
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
 | 
			
		||||
enable_dual_stack_networks: false
 | 
			
		||||
 | 
			
		||||
# Kubernetes internal network for IPv6 services, unused block of space.
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true
 | 
			
		||||
# This provides 4096 IPv6 IPs
 | 
			
		||||
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
 | 
			
		||||
 | 
			
		||||
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
 | 
			
		||||
# This network must not already be in your network infrastructure!
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true.
 | 
			
		||||
# This provides room for 256 nodes with 254 pods per node.
 | 
			
		||||
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 | 
			
		||||
 | 
			
		||||
# IPv6 subnet size allocated to each for pods.
 | 
			
		||||
# This is only used if enable_dual_stack_networks is set to true
 | 
			
		||||
# This provides room for 254 pods per node.
 | 
			
		||||
kube_network_node_prefix_ipv6: 120
 | 
			
		||||
 | 
			
		||||
# The port the API Server will be listening on.
 | 
			
		||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
 | 
			
		||||
kube_apiserver_port: 6443  # (https)
 | 
			
		||||
# kube_apiserver_insecure_port: 8080  # (http)
 | 
			
		||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
 | 
			
		||||
kube_apiserver_insecure_port: 0  # (disabled)
 | 
			
		||||
 | 
			
		||||
# Kube-proxy proxyMode configuration.
 | 
			
		||||
# Can be ipvs, iptables
 | 
			
		||||
kube_proxy_mode: ipvs
 | 
			
		||||
 | 
			
		||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
 | 
			
		||||
# must be set to true for MetalLB to work
 | 
			
		||||
kube_proxy_strict_arp: false
 | 
			
		||||
 | 
			
		||||
# A string slice of values which specify the addresses to use for NodePorts.
 | 
			
		||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
 | 
			
		||||
# The default empty string slice ([]) means to use all local addresses.
 | 
			
		||||
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
 | 
			
		||||
kube_proxy_nodeport_addresses: >-
 | 
			
		||||
  {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
 | 
			
		||||
  [{{ kube_proxy_nodeport_addresses_cidr }}]
 | 
			
		||||
  {%- else -%}
 | 
			
		||||
  []
 | 
			
		||||
  {%- endif -%}
 | 
			
		||||
 | 
			
		||||
# If non-empty, will use this string as identification instead of the actual hostname
 | 
			
		||||
# kube_override_hostname: >-
 | 
			
		||||
#   {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
 | 
			
		||||
#   {%- else -%}
 | 
			
		||||
#   {{ inventory_hostname }}
 | 
			
		||||
#   {%- endif -%}
 | 
			
		||||
 | 
			
		||||
## Encrypting Secret Data at Rest (experimental)
 | 
			
		||||
kube_encrypt_secret_data: false
 | 
			
		||||
 | 
			
		||||
# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
 | 
			
		||||
# kubelet_shutdown_grace_period: 60s
 | 
			
		||||
# kubelet_shutdown_grace_period_critical_pods: 20s
 | 
			
		||||
 | 
			
		||||
# DNS configuration.
 | 
			
		||||
# Kubernetes cluster name, also will be used as DNS domain
 | 
			
		||||
cluster_name: cloud.local
 | 
			
		||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
 | 
			
		||||
ndots: 2
 | 
			
		||||
# Can be coredns, coredns_dual, manual or none
 | 
			
		||||
dns_mode: coredns
 | 
			
		||||
# Set manual server if using a custom cluster DNS server
 | 
			
		||||
# manual_dns_server: 10.x.x.x
 | 
			
		||||
# Enable nodelocal dns cache
 | 
			
		||||
enable_nodelocaldns: true
 | 
			
		||||
nodelocaldns_ip: 169.254.25.10
 | 
			
		||||
nodelocaldns_health_port: 9254
 | 
			
		||||
nodelocaldns_bind_metrics_host_ip: false
 | 
			
		||||
# nodelocaldns_external_zones:
 | 
			
		||||
# - zones:
 | 
			
		||||
#   - example.com
 | 
			
		||||
#   - example.io:1053
 | 
			
		||||
#   nameservers:
 | 
			
		||||
#   - 1.1.1.1
 | 
			
		||||
#   - 2.2.2.2
 | 
			
		||||
#   cache: 5
 | 
			
		||||
# - zones:
 | 
			
		||||
#   - https://mycompany.local:4453
 | 
			
		||||
#   nameservers:
 | 
			
		||||
#   - 192.168.0.53
 | 
			
		||||
#   cache: 0
 | 
			
		||||
# Enable k8s_external plugin for CoreDNS
 | 
			
		||||
enable_coredns_k8s_external: false
 | 
			
		||||
coredns_k8s_external_zone: k8s_external.local
 | 
			
		||||
# Enable endpoint_pod_names option for kubernetes plugin
 | 
			
		||||
enable_coredns_k8s_endpoint_pod_names: false
 | 
			
		||||
 | 
			
		||||
# Can be docker_dns, host_resolvconf or none
 | 
			
		||||
resolvconf_mode: docker_dns
 | 
			
		||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
 | 
			
		||||
deploy_netchecker: false
 | 
			
		||||
# Ip address of the kubernetes skydns service
 | 
			
		||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
 | 
			
		||||
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
 | 
			
		||||
dns_domain: "{{ cluster_name }}"
 | 
			
		||||
 | 
			
		||||
## Container runtime
 | 
			
		||||
## docker for docker, crio for cri-o and containerd for containerd.
 | 
			
		||||
container_manager: containerd
 | 
			
		||||
 | 
			
		||||
# Additional container runtimes
 | 
			
		||||
kata_containers_enabled: false
 | 
			
		||||
 | 
			
		||||
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
 | 
			
		||||
 | 
			
		||||
# K8s image pull policy (imagePullPolicy)
 | 
			
		||||
k8s_image_pull_policy: IfNotPresent
 | 
			
		||||
 | 
			
		||||
# audit log for kubernetes
 | 
			
		||||
kubernetes_audit: false
 | 
			
		||||
 | 
			
		||||
# dynamic kubelet configuration
 | 
			
		||||
# Note: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA.
 | 
			
		||||
# It is planned to be removed from Kubernetes in the version 1.23.
 | 
			
		||||
# Please use alternative ways to update kubelet configuration.
 | 
			
		||||
dynamic_kubelet_configuration: false
 | 
			
		||||
 | 
			
		||||
# define kubelet config dir for dynamic kubelet
 | 
			
		||||
# kubelet_config_dir:
 | 
			
		||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
 | 
			
		||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
 | 
			
		||||
 | 
			
		||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
 | 
			
		||||
podsecuritypolicy_enabled: false
 | 
			
		||||
 | 
			
		||||
# Custom PodSecurityPolicySpec for restricted policy
 | 
			
		||||
# podsecuritypolicy_restricted_spec: {}
 | 
			
		||||
 | 
			
		||||
# Custom PodSecurityPolicySpec for privileged policy
 | 
			
		||||
# podsecuritypolicy_privileged_spec: {}
 | 
			
		||||
 | 
			
		||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
 | 
			
		||||
# kubeconfig_localhost: false
 | 
			
		||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
 | 
			
		||||
# kubectl_localhost: false
 | 
			
		||||
 | 
			
		||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
 | 
			
		||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
 | 
			
		||||
# kubelet_enforce_node_allocatable: pods
 | 
			
		||||
 | 
			
		||||
## Optionally reserve resources for OS system daemons.
 | 
			
		||||
# system_reserved: true
 | 
			
		||||
## Uncomment to override default values
 | 
			
		||||
# system_memory_reserved: 512Mi
 | 
			
		||||
# system_cpu_reserved: 500m
 | 
			
		||||
## Reservation for master hosts
 | 
			
		||||
# system_master_memory_reserved: 256Mi
 | 
			
		||||
# system_master_cpu_reserved: 250m
 | 
			
		||||
 | 
			
		||||
# An alternative flexvolume plugin directory
 | 
			
		||||
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
 | 
			
		||||
 | 
			
		||||
## Supplementary addresses that can be added in kubernetes ssl keys.
 | 
			
		||||
## That can be useful for example to setup a keepalived virtual IP
 | 
			
		||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
 | 
			
		||||
 | 
			
		||||
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
 | 
			
		||||
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
 | 
			
		||||
## Set this variable to true to get rid of this issue
 | 
			
		||||
volume_cross_zone_attachment: false
 | 
			
		||||
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
 | 
			
		||||
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
 | 
			
		||||
persistent_volumes_enabled: false
 | 
			
		||||
 | 
			
		||||
## Container Engine Acceleration
 | 
			
		||||
## Enable container acceleration feature, for example use gpu acceleration in containers
 | 
			
		||||
# nvidia_accelerator_enabled: true
 | 
			
		||||
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
 | 
			
		||||
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
 | 
			
		||||
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
 | 
			
		||||
## Labels and taints won't be set to nodes if they are not in the array.
 | 
			
		||||
# nvidia_gpu_nodes:
 | 
			
		||||
#   - kube-gpu-001
 | 
			
		||||
# nvidia_driver_version: "384.111"
 | 
			
		||||
## flavor can be tesla or gtx
 | 
			
		||||
# nvidia_gpu_flavor: gtx
 | 
			
		||||
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
 | 
			
		||||
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
 | 
			
		||||
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
 | 
			
		||||
## NVIDIA GPU device plugin image.
 | 
			
		||||
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
 | 
			
		||||
 | 
			
		||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
 | 
			
		||||
# tls_min_version: ""
 | 
			
		||||
 | 
			
		||||
## Support tls cipher suites.
 | 
			
		||||
# tls_cipher_suites: {}
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 | 
			
		||||
#   - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
 | 
			
		||||
#   - TLS_ECDHE_RSA_WITH_RC4_128_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_3DES_EDE_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_CBC_SHA256
 | 
			
		||||
#   - TLS_RSA_WITH_AES_128_GCM_SHA256
 | 
			
		||||
#   - TLS_RSA_WITH_AES_256_CBC_SHA
 | 
			
		||||
#   - TLS_RSA_WITH_AES_256_GCM_SHA384
 | 
			
		||||
#   - TLS_RSA_WITH_RC4_128_SHA
 | 
			
		||||
 | 
			
		||||
## Amount of time to retain events. (default 1h0m0s)
 | 
			
		||||
event_ttl_duration: "1h0m0s"
 | 
			
		||||
 | 
			
		||||
## Automatically renew K8S control plane certificates on first Monday of each month
 | 
			
		||||
auto_renew_certificates: false
 | 
			
		||||
# First Monday of each month
 | 
			
		||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
 | 
			
		||||
@@ -0,0 +1,109 @@
 | 
			
		||||
# see roles/network_plugin/calico/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
## With calico it is possible to distributed routes with border routers of the datacenter.
 | 
			
		||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
 | 
			
		||||
## The subnets of each nodes will be distributed by the datacenter router
 | 
			
		||||
# peer_with_router: false
 | 
			
		||||
 | 
			
		||||
# Enables Internet connectivity from containers
 | 
			
		||||
# nat_outgoing: true
 | 
			
		||||
 | 
			
		||||
# Enables Calico CNI "host-local" IPAM plugin
 | 
			
		||||
# calico_ipam_host_local: true
 | 
			
		||||
 | 
			
		||||
# add default ippool name
 | 
			
		||||
# calico_pool_name: "default-pool"
 | 
			
		||||
 | 
			
		||||
# add default ippool blockSize (defaults kube_network_node_prefix)
 | 
			
		||||
# calico_pool_blocksize: 24
 | 
			
		||||
 | 
			
		||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
 | 
			
		||||
# calico_pool_cidr: 1.2.3.4/5
 | 
			
		||||
 | 
			
		||||
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
 | 
			
		||||
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
 | 
			
		||||
 | 
			
		||||
# Global as_num (/calico/bgp/v1/global/as_num)
 | 
			
		||||
# global_as_num: "64512"
 | 
			
		||||
 | 
			
		||||
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
 | 
			
		||||
# to be true.  All other cases, false.
 | 
			
		||||
# calico_no_global_as_num: false
 | 
			
		||||
 | 
			
		||||
# You can set MTU value here. If left undefined or empty, it will
 | 
			
		||||
# not be specified in calico CNI config, so Calico will use built-in
 | 
			
		||||
# defaults. The value should be a number, not a string.
 | 
			
		||||
# calico_mtu: 1500
 | 
			
		||||
 | 
			
		||||
# Configure the MTU to use for workload interfaces and tunnels.
 | 
			
		||||
# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440)
 | 
			
		||||
# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450)
 | 
			
		||||
# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480)
 | 
			
		||||
# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500)
 | 
			
		||||
# calico_veth_mtu: 1440
 | 
			
		||||
 | 
			
		||||
# Advertise Cluster IPs
 | 
			
		||||
# calico_advertise_cluster_ips: true
 | 
			
		||||
 | 
			
		||||
# Advertise Service External IPs
 | 
			
		||||
# calico_advertise_service_external_ips:
 | 
			
		||||
# - x.x.x.x/24
 | 
			
		||||
# - y.y.y.y/32
 | 
			
		||||
 | 
			
		||||
# Adveritse Service LoadBalancer IPs
 | 
			
		||||
# calico_advertise_service_loadbalancer_ips:
 | 
			
		||||
# - x.x.x.x/24
 | 
			
		||||
# - y.y.y.y/16
 | 
			
		||||
 | 
			
		||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
 | 
			
		||||
# calico_datastore: "kdd"
 | 
			
		||||
 | 
			
		||||
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
 | 
			
		||||
# calico_iptables_backend: "Legacy"
 | 
			
		||||
 | 
			
		||||
# Use typha (only with kdd)
 | 
			
		||||
# typha_enabled: false
 | 
			
		||||
 | 
			
		||||
# Generate TLS certs for secure typha<->calico-node communication
 | 
			
		||||
# typha_secure: false
 | 
			
		||||
 | 
			
		||||
# Scaling typha: 1 replica per 100 nodes is adequate
 | 
			
		||||
# Number of typha replicas
 | 
			
		||||
# typha_replicas: 1
 | 
			
		||||
 | 
			
		||||
# Set max typha connections
 | 
			
		||||
# typha_max_connections_lower_limit: 300
 | 
			
		||||
 | 
			
		||||
# Set calico network backend: "bird", "vxlan" or "none"
 | 
			
		||||
# bird enable BGP routing, required for ipip mode.
 | 
			
		||||
# calico_network_backend: bird
 | 
			
		||||
 | 
			
		||||
# IP in IP and VXLAN is mutualy exclusive modes.
 | 
			
		||||
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
 | 
			
		||||
# calico_ipip_mode: 'Always'
 | 
			
		||||
 | 
			
		||||
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
 | 
			
		||||
# calico_vxlan_mode: 'Never'
 | 
			
		||||
 | 
			
		||||
# set VXLAN port and VNI
 | 
			
		||||
# calico_vxlan_vni: 4096
 | 
			
		||||
# calico_vxlan_port: 4789
 | 
			
		||||
 | 
			
		||||
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
 | 
			
		||||
# * can-reach=DESTINATION
 | 
			
		||||
# * interface=INTERFACE-REGEX
 | 
			
		||||
# see https://docs.projectcalico.org/reference/node/configuration
 | 
			
		||||
# calico_ip_auto_method: "interface=eth.*"
 | 
			
		||||
# Choose the iptables insert mode for Calico: "Insert" or "Append".
 | 
			
		||||
# calico_felix_chaininsertmode: Insert
 | 
			
		||||
 | 
			
		||||
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
 | 
			
		||||
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
 | 
			
		||||
# calico_use_default_route_src_ipaddr: false
 | 
			
		||||
 | 
			
		||||
# Enable calico traffic encryption with wireguard
 | 
			
		||||
# calico_wireguard_enabled: false
 | 
			
		||||
 | 
			
		||||
# Under certain situations liveness and readiness probes may need tunning
 | 
			
		||||
# calico_node_livenessprobe_timeout: 10
 | 
			
		||||
# calico_node_readinessprobe_timeout: 10
 | 
			
		||||
@@ -0,0 +1,10 @@
 | 
			
		||||
# see roles/network_plugin/canal/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# The interface used by canal for host <-> host communication.
 | 
			
		||||
# If left blank, then the interface is choosing using the node's
 | 
			
		||||
# default route.
 | 
			
		||||
# canal_iface: ""
 | 
			
		||||
 | 
			
		||||
# Whether or not to masquerade traffic to destinations not within
 | 
			
		||||
# the pod network.
 | 
			
		||||
# canal_masquerade: "true"
 | 
			
		||||
@@ -0,0 +1 @@
 | 
			
		||||
# see roles/network_plugin/cilium/defaults/main.yml
 | 
			
		||||
@@ -0,0 +1,18 @@
 | 
			
		||||
# see roles/network_plugin/flannel/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
## interface that should be used for flannel operations
 | 
			
		||||
## This is actually an inventory cluster-level item
 | 
			
		||||
# flannel_interface:
 | 
			
		||||
 | 
			
		||||
## Select interface that should be used for flannel operations by regexp on Name or IP
 | 
			
		||||
## This is actually an inventory cluster-level item
 | 
			
		||||
## example: select interface with ip from net 10.0.0.0/23
 | 
			
		||||
## single quote and escape backslashes
 | 
			
		||||
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
 | 
			
		||||
 | 
			
		||||
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
 | 
			
		||||
# for experimental backend
 | 
			
		||||
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
 | 
			
		||||
# flannel_backend_type: "vxlan"
 | 
			
		||||
# flannel_vxlan_vni: 1
 | 
			
		||||
# flannel_vxlan_port: 8472
 | 
			
		||||
@@ -0,0 +1,61 @@
 | 
			
		||||
# See roles/network_plugin/kube-router//defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
 | 
			
		||||
# kube_router_run_router: true
 | 
			
		||||
 | 
			
		||||
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
 | 
			
		||||
# kube_router_run_firewall: true
 | 
			
		||||
 | 
			
		||||
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
 | 
			
		||||
# see docs/kube-router.md "Caveats" section
 | 
			
		||||
# kube_router_run_service_proxy: false
 | 
			
		||||
 | 
			
		||||
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
 | 
			
		||||
# kube_router_advertise_cluster_ip: false
 | 
			
		||||
 | 
			
		||||
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
 | 
			
		||||
# kube_router_advertise_external_ip: false
 | 
			
		||||
 | 
			
		||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
 | 
			
		||||
# kube_router_advertise_loadbalancer_ip: false
 | 
			
		||||
 | 
			
		||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
 | 
			
		||||
# kube_router_enable_dsr: false
 | 
			
		||||
 | 
			
		||||
# Array of arbitrary extra arguments to kube-router, see
 | 
			
		||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
 | 
			
		||||
# kube_router_extra_args: []
 | 
			
		||||
 | 
			
		||||
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
 | 
			
		||||
# kube_router_peer_router_asns: ~
 | 
			
		||||
 | 
			
		||||
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
 | 
			
		||||
# kube_router_peer_router_ips: ~
 | 
			
		||||
 | 
			
		||||
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
 | 
			
		||||
# kube_router_peer_router_ports: ~
 | 
			
		||||
 | 
			
		||||
# Setups node CNI to allow hairpin mode, requires node reboots, see
 | 
			
		||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
 | 
			
		||||
# kube_router_support_hairpin_mode: false
 | 
			
		||||
 | 
			
		||||
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
 | 
			
		||||
# kube_router_dns_policy: ClusterFirstWithHostNet
 | 
			
		||||
 | 
			
		||||
# Array of annotations for master
 | 
			
		||||
# kube_router_annotations_master: []
 | 
			
		||||
 | 
			
		||||
# Array of annotations for every node
 | 
			
		||||
# kube_router_annotations_node: []
 | 
			
		||||
 | 
			
		||||
# Array of common annotations for every node
 | 
			
		||||
# kube_router_annotations_all: []
 | 
			
		||||
 | 
			
		||||
# Enables scraping kube-router metrics with Prometheus
 | 
			
		||||
# kube_router_enable_metrics: false
 | 
			
		||||
 | 
			
		||||
# Path to serve Prometheus metrics on
 | 
			
		||||
# kube_router_metrics_path: /metrics
 | 
			
		||||
 | 
			
		||||
# Prometheus metrics port to use
 | 
			
		||||
# kube_router_metrics_port: 9255
 | 
			
		||||
@@ -0,0 +1,6 @@
 | 
			
		||||
---
 | 
			
		||||
# private interface, on a l2-network
 | 
			
		||||
macvlan_interface: "eth1"
 | 
			
		||||
 | 
			
		||||
# Enable nat in default gateway network interface
 | 
			
		||||
enable_nat_default_gateway: true
 | 
			
		||||
@@ -0,0 +1,61 @@
 | 
			
		||||
# see roles/network_plugin/weave/defaults/main.yml
 | 
			
		||||
 | 
			
		||||
# Weave's network password for encryption, if null then no network encryption.
 | 
			
		||||
# weave_password: ~
 | 
			
		||||
 | 
			
		||||
# If set to 1, disable checking for new Weave Net versions (default is blank,
 | 
			
		||||
# i.e. check is enabled)
 | 
			
		||||
# weave_checkpoint_disable: false
 | 
			
		||||
 | 
			
		||||
# Soft limit on the number of connections between peers. Defaults to 100.
 | 
			
		||||
# weave_conn_limit: 100
 | 
			
		||||
 | 
			
		||||
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
 | 
			
		||||
# for containers attached. If you need to disable hairpin, e.g. your kernel is
 | 
			
		||||
# one of those that can panic if hairpin is enabled, then you can disable it by
 | 
			
		||||
# setting `HAIRPIN_MODE=false`.
 | 
			
		||||
# weave_hairpin_mode: true
 | 
			
		||||
 | 
			
		||||
# The range of IP addresses used by Weave Net and the subnet they are placed in
 | 
			
		||||
# (CIDR format; default 10.32.0.0/12)
 | 
			
		||||
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
 | 
			
		||||
 | 
			
		||||
# Set to 0 to disable Network Policy Controller (default is on)
 | 
			
		||||
# weave_expect_npc: "{{ enable_network_policy }}"
 | 
			
		||||
 | 
			
		||||
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
 | 
			
		||||
# list from the api-server)
 | 
			
		||||
# weave_kube_peers: ~
 | 
			
		||||
 | 
			
		||||
# Set the initialization mode of the IP Address Manager (defaults to consensus
 | 
			
		||||
# amongst the KUBE_PEERS)
 | 
			
		||||
# weave_ipalloc_init: ~
 | 
			
		||||
 | 
			
		||||
# Set the IP address used as a gateway from the Weave network to the host
 | 
			
		||||
# network - this is useful if you are configuring the addon as a static pod.
 | 
			
		||||
# weave_expose_ip: ~
 | 
			
		||||
 | 
			
		||||
# Address and port that the Weave Net daemon will serve Prometheus-style
 | 
			
		||||
# metrics on (defaults to 0.0.0.0:6782)
 | 
			
		||||
# weave_metrics_addr: ~
 | 
			
		||||
 | 
			
		||||
# Address and port that the Weave Net daemon will serve status requests on
 | 
			
		||||
# (defaults to disabled)
 | 
			
		||||
# weave_status_addr: ~
 | 
			
		||||
 | 
			
		||||
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
 | 
			
		||||
# underlying network has a tighter limit, or set a larger size for better
 | 
			
		||||
# performance if your network supports jumbo frames (e.g. 8916)
 | 
			
		||||
# weave_mtu: 1376
 | 
			
		||||
 | 
			
		||||
# Set to 1 to preserve the client source IP address when accessing Service
 | 
			
		||||
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
 | 
			
		||||
# only with Weave IPAM (default).
 | 
			
		||||
# weave_no_masq_local: true
 | 
			
		||||
 | 
			
		||||
# set to nft to use nftables backend for iptables (default is iptables)
 | 
			
		||||
# weave_iptables_backend: iptables
 | 
			
		||||
 | 
			
		||||
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
 | 
			
		||||
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
 | 
			
		||||
# weave_extra_args: ~
 | 
			
		||||
							
								
								
									
										40
									
								
								kubespray_inventory/edge-2/hosts.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								kubespray_inventory/edge-2/hosts.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
all:
 | 
			
		||||
  vars:
 | 
			
		||||
    ansible_user: ubuntu
 | 
			
		||||
    ansible_become: true
 | 
			
		||||
  hosts:
 | 
			
		||||
    edge-cluster-2-master-1:
 | 
			
		||||
      ansible_host: edge-cluster-2-master-1
 | 
			
		||||
      ip: 192.168.112.100
 | 
			
		||||
      access_ip: 192.168.112.100
 | 
			
		||||
 | 
			
		||||
    edge-cluster-2-worker-1:
 | 
			
		||||
      ansible_host: edge-cluster-2-worker-1
 | 
			
		||||
      ip: 192.168.112.101
 | 
			
		||||
      access_ip: 192.168.112.101
 | 
			
		||||
 | 
			
		||||
    edge-cluster-2-worker-2:
 | 
			
		||||
      ansible_host: edge-cluster-2-worker-2
 | 
			
		||||
      ip: 192.168.112.102
 | 
			
		||||
      access_ip: 192.168.112.102
 | 
			
		||||
 | 
			
		||||
  children:
 | 
			
		||||
    kube_control_plane:
 | 
			
		||||
      hosts:
 | 
			
		||||
        edge-cluster-2-master-1:
 | 
			
		||||
    kube_node:
 | 
			
		||||
      hosts:
 | 
			
		||||
        edge-cluster-2-master-1:
 | 
			
		||||
        edge-cluster-2-worker-1:
 | 
			
		||||
        edge-cluster-2-worker-2:
 | 
			
		||||
    etcd:
 | 
			
		||||
      hosts:
 | 
			
		||||
        edge-cluster-2-master-1:
 | 
			
		||||
        edge-cluster-2-worker-1:
 | 
			
		||||
        edge-cluster-2-worker-2:
 | 
			
		||||
    k8s_cluster:
 | 
			
		||||
      children:
 | 
			
		||||
        kube_control_plane:
 | 
			
		||||
        kube_node:
 | 
			
		||||
    calico_rr:
 | 
			
		||||
      hosts: {}
 | 
			
		||||
@@ -216,10 +216,20 @@ def run_xml(outdir:str, hostname:str, ram:int, net:str, ipaddr:str):
 | 
			
		||||
 | 
			
		||||
  interfacecfg = ""
 | 
			
		||||
 | 
			
		||||
  for n in net.split(','):
 | 
			
		||||
  interfaces=list(net.split(','))
 | 
			
		||||
 | 
			
		||||
  for i, n in enumerate(interfaces):
 | 
			
		||||
 | 
			
		||||
    multi = ""
 | 
			
		||||
    if i == 0 and len(interfaces) > 1:
 | 
			
		||||
      multi = 'multifunction="on"'
 | 
			
		||||
 | 
			
		||||
    # Defining address like this fixes the interface to be enp1s0
 | 
			
		||||
    # And consequitive interfaces will be enp1s0f{n}
 | 
			
		||||
    interfacecfg += f"""    <interface type="network">
 | 
			
		||||
        <source network="{n}"/>
 | 
			
		||||
        <model type="virtio"/>
 | 
			
		||||
        <address type="pci" domain="0x0000" bus="0x01" slot="0x00" function="0x{i}" {multi} />
 | 
			
		||||
      </interface>"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -206,22 +206,33 @@ def run_iso(hostname:str, ipaddr:str):
 | 
			
		||||
      cloudinit.format(hostname=hostname)
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
  netplan_fname = None
 | 
			
		||||
  netplan_generated = False
 | 
			
		||||
  if ipaddr:
 | 
			
		||||
    gateway = '192.168.' + ipaddr.split('.')[2] + '.1'  # Ezen Kristóf jót nevetett
 | 
			
		||||
    netplan_fhandle, netplan_fname = tempfile.mkstemp(suffix='.yaml')
 | 
			
		||||
    with open(netplan_fhandle, "wt") as f:
 | 
			
		||||
      f.write(
 | 
			
		||||
        netplan.format(
 | 
			
		||||
          addr=ipaddr,
 | 
			
		||||
          gateway=gateway,
 | 
			
		||||
    if not ipaddr.startswith('@'):
 | 
			
		||||
      gateway = '192.168.' + ipaddr.split('.')[2] + '.1'  # Ezen Kristóf jót nevetett
 | 
			
		||||
      netplan_fhandle, netplan_fname = tempfile.mkstemp(suffix='.yaml')
 | 
			
		||||
      with open(netplan_fhandle, "wt") as f:
 | 
			
		||||
        f.write(
 | 
			
		||||
          netplan.format(
 | 
			
		||||
            addr=ipaddr,
 | 
			
		||||
            gateway=gateway,
 | 
			
		||||
          )
 | 
			
		||||
        )
 | 
			
		||||
      )
 | 
			
		||||
    os.system(f'cloud-localds -v {isoout}/cloudinit-{hostname}.iso --network-config={netplan_fname} {cloudinit_fname}')
 | 
			
		||||
    os.unlink(netplan_fname)
 | 
			
		||||
  else:
 | 
			
		||||
    os.system(f'cloud-localds -v {isoout}/cloudinit-{hostname}.iso {cloudinit_fname}')
 | 
			
		||||
      netplan_generated = True
 | 
			
		||||
    else:
 | 
			
		||||
      fname = ipaddr[1:]
 | 
			
		||||
      netplan_fname = fname
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if netplan_fname:
 | 
			
		||||
      os.system(f'cloud-localds -v {isoout}/cloudinit-{hostname}.iso --network-config={netplan_fname} {cloudinit_fname}')
 | 
			
		||||
    else:
 | 
			
		||||
      os.system(f'cloud-localds -v {isoout}/cloudinit-{hostname}.iso {cloudinit_fname}')
 | 
			
		||||
  
 | 
			
		||||
  os.unlink(cloudinit_fname)
 | 
			
		||||
  if netplan_generated:
 | 
			
		||||
    os.unlink(netplan_fname)
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
def run_img(hostname:str, img_src:str):
 | 
			
		||||
@@ -239,11 +250,21 @@ def run_xml(outdir:str, hostname:str, ram:int, net:str):
 | 
			
		||||
  os.makedirs(xmlout, exist_ok=True)
 | 
			
		||||
 | 
			
		||||
  interfacecfg = ""
 | 
			
		||||
  
 | 
			
		||||
  interfaces=list(net.split(','))
 | 
			
		||||
 | 
			
		||||
  for n in net.split(','):
 | 
			
		||||
  for i, n in enumerate(interfaces):
 | 
			
		||||
 | 
			
		||||
    multi = ""
 | 
			
		||||
    if i == 0 and len(interfaces) > 1:
 | 
			
		||||
      multi = 'multifunction="on"'
 | 
			
		||||
 | 
			
		||||
    # Defining address like this fixes the interface to be enp1s0
 | 
			
		||||
    # And consequitive interfaces will be enp1s0f{n} 
 | 
			
		||||
    interfacecfg += f"""    <interface type="network">
 | 
			
		||||
        <source network="{n}"/>
 | 
			
		||||
        <model type="virtio"/>
 | 
			
		||||
        <address type="pci" domain="0x0000" bus="0x01" slot="0x00" function="0x{i}" {multi} />
 | 
			
		||||
      </interface>"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -262,7 +283,7 @@ def run_xml(outdir:str, hostname:str, ram:int, net:str):
 | 
			
		||||
def run_ssh(outdir:str, hostname:str, ipaddr:str):
 | 
			
		||||
  if not ipaddr:
 | 
			
		||||
    return
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
  sshout_file = os.path.join(outdir, 'ssh_config')
 | 
			
		||||
  with open(sshout_file, 'at') as f:
 | 
			
		||||
    f.write("\n".join([
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										32
									
								
								vm_generator_luna/magic_router_netplan.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vm_generator_luna/magic_router_netplan.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
network:
 | 
			
		||||
  version: 2
 | 
			
		||||
  ethernets:
 | 
			
		||||
  
 | 
			
		||||
    # cloud
 | 
			
		||||
    enp1s0f0:
 | 
			
		||||
      dhcp4: false
 | 
			
		||||
      dhcp6: false
 | 
			
		||||
      gateway4: 192.168.110.1
 | 
			
		||||
      addresses:
 | 
			
		||||
        - "192.168.110.10/24"
 | 
			
		||||
 | 
			
		||||
    # edge-1
 | 
			
		||||
    enp1s0f1:
 | 
			
		||||
      dhcp4: false
 | 
			
		||||
      dhcp6: false
 | 
			
		||||
      addresses:
 | 
			
		||||
        - "192.168.111.10/24"
 | 
			
		||||
 | 
			
		||||
    # edge-2
 | 
			
		||||
    enp1s0f2:
 | 
			
		||||
      dhcp4: false
 | 
			
		||||
      dhcp6: false
 | 
			
		||||
      addresses:
 | 
			
		||||
        - "192.168.112.10/24"
 | 
			
		||||
 | 
			
		||||
    # site
 | 
			
		||||
    enp1s0f3:
 | 
			
		||||
      dhcp4: false
 | 
			
		||||
      dhcp6: false
 | 
			
		||||
      addresses:
 | 
			
		||||
        - "192.168.113.10/24"
 | 
			
		||||
		Reference in New Issue
	
	Block a user