diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-app.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-app.yaml new file mode 100644 index 0000000..3f544bd --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-app.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cloud-prometheus + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: cloud-prometheus + destination: + server: https://kubernetes.default.svc + namespace: cloud-prod + sources: + - repoURL: https://git.avroid.tech/K8s/k8s-configs.git + targetRevision: master + ref: values + - repoURL: https://nexus.avroid.tech/repository/devops-helm-proxy-helm/ + chart: "prometheus-community/prometheus" + targetRevision: 27.5.1 + helm: + valueFiles: + - $values/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/values-override.yaml + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - ApplyOutOfSyncOnly=true + - CreateNamespace=true +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: cloud-prometheus + namespace: argocd + # Finalizer that ensures that project is not deleted until it is not referenced by any application + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + sourceRepos: + - https://git.avroid.tech/K8s/k8s-configs.git + - https://nexus.avroid.tech/repository/devops-helm-proxy-helm/ + # Only permit applications to deploy to the guestbook namespace in the same cluster + destinations: + - namespace: cloud-prod + server: https://kubernetes.default.svc + # Deny all cluster-scoped resources from being created, except for Namespace + clusterResourceWhitelist: + - group: '' + kind: Namespace diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-network-policy.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-network-policy.yaml new file mode 100644 index 0000000..70032ea --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-network-policy.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: cloud-prometheus-in + namespace: cloud-prod + labels: + app.kubernetes.io/managed-by: argocd +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: cloud-prometheus-out + namespace: cloud-prod + labels: + app.kubernetes.io/managed-by: argocd +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + policyTypes: + - Egress + ingress: [] + egress: + - ports: + - port: 443 + protocol: TCP + - port: 80 + protocol: TCP + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus-redis-exporter + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus-postgres-exporter diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-secret.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-secret.yaml new file mode 100644 index 0000000..2c0b609 --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/argocd-apps-prometheus-secret.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/managed-by: argocd + name: prometheus-secret + namespace: cloud-prod + annotations: + vault.security.banzaicloud.io/vault-addr: "https://vault.avroid.tech" + vault.security.banzaicloud.io/vault-role: "cloud-prod" + vault.security.banzaicloud.io/vault-skip-verify: "false" + vault.security.banzaicloud.io/vault-path: "avroid-office" +type: Opaque +stringData: + consul.secret: dmF1bHQ6dGVhbS1kZXZvcHMvZGF0YS9zZXJ2aWNlcy9tb25pdG9yaW5nL2s4cy9jbG91ZC1wcm9kL3Byb21ldGhldXMjY29uc3VsLnNlY3JldA== diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/values-override.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/values-override.yaml new file mode 100644 index 0000000..2f0bf87 --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/prometheus/values-override.yaml @@ -0,0 +1,275 @@ +# yaml-language-server: $schema=values.schema.json +# Default values for prometheus. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +rbac: + create: false + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + server: + create: false + name: "vault" + + ## Opt out of automounting Kubernetes API credentials. + ## It will be overriden by server.automountServiceAccountToken value, if set. + automountServiceAccountToken: true + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader +## +configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: false + +server: + ## Prometheus server container name + ## + name: server + + ## Opt out of automounting Kubernetes API credentials. + ## If set it will override serviceAccounts.server.automountServiceAccountToken value for ServiceAccount. + automountServiceAccountToken: true + + ## Prometheus server container image + ## + image: + repository: harbor.avroid.tech/quay-proxy/prometheus/prometheus + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "cloud-k8s-prometheus.avroid.tech" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: + - name: CONSUL_SECRET + valueFrom: + secretKeyRef: + name: prometheus-secret + key: consul.secret + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: + kubernetes.io/ingress.class: nginx + + ## Prometheus server Ingress hostnames with optional path (passed through tpl) + ## Must be provided if Ingress is enabled + ## + hosts: + - cloud-k8s-prometheus.avroid.tech + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + node-role.kubernetes.io/worker: "" + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: false + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "1Gi" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: + vault.security.banzaicloud.io/vault-addr: "https://vault.avroid.tech" + vault.security.banzaicloud.io/vault-role: "cloud-prod" + vault.security.banzaicloud.io/vault-skip-verify: "false" + vault.security.banzaicloud.io/vault-path: "avroid-office" + vault.security.banzaicloud.io/run-as-non-root: "true" + vault.security.banzaicloud.io/run-as-user: "65534" + vault.security.banzaicloud.io/run-as-group: "65534" + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: + vault.security.banzaicloud.io/vault-addr: "https://vault.avroid.tech" + vault.security.banzaicloud.io/vault-role: "cloud-prod" + vault.security.banzaicloud.io/vault-skip-verify: "false" + vault.security.banzaicloud.io/vault-path: "avroid-office" + vault.security.banzaicloud.io/run-as-non-root: "true" + vault.security.banzaicloud.io/run-as-user: "65534" + vault.security.banzaicloud.io/run-as-group: "65534" + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 750m + memory: 768Mi + + ## Prometheus' data retention size. Supported units: B, KB, MB, GB, TB, PB, EB. + ## + retentionSize: "512MB" + +## Prometheus server ConfigMap entries +## +serverFiles: + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + - job_name: 'postgres_exporter' + consul_sd_configs: + - server: consul.avroid.tech + scheme: https + datacenter: "avroid-office" + tags: [ k8s_postgres_exporter ] + services: [ monitoring_k8s_postgres_exporter ] + authorization: + credentials: "${CONSUL_SECRET}" + relabel_configs: + - source_labels: [ __meta_consul_service_metadata_metrics_path ] + target_label: __metrics_path__ + - source_labels: [ __meta_consul_service_metadata_job_name ] + target_label: job + - source_labels: [ __meta_consul_service_metadata_auth_module ] + target_label: __param_auth_module + - source_labels: [ __meta_consul_service_metadata_ssl_mode ] + target_label: __param_sslmode + - source_labels: [ __address__,__meta_consul_service_metadata_db_name ] + separator: "/" + target_label: __param_target + - source_labels: [ __meta_consul_node ] + target_label: instance + regex: "([^:]+).*" + replacement: '${1}' + - target_label: __address__ + replacement: cloud-postgres-exporter-prometheus-postgres-exporter:9187 + - job_name: 'redis_exporter' + consul_sd_configs: + - server: consul.avroid.tech + scheme: https + datacenter: "avroid-office" + tags: [ k8s_redis_exporter ] + services: [ monitoring_k8s_redis_exporter ] + authorization: + credentials: "${CONSUL_SECRET}" + relabel_configs: + - source_labels: [ __meta_consul_service_metadata_metrics_path ] + target_label: __metrics_path__ + - source_labels: [ __meta_consul_service_metadata_job_name ] + target_label: job + - source_labels: [ __address__ ] + target_label: __param_target + - source_labels: [ __meta_consul_node ] + target_label: instance + regex: "([^:]+).*" + replacement: '${1}' + - target_label: __address__ + replacement: cloud-redis-exporter-prometheus-redis-exporter:9121 + - job_name: 'patroni_exporter' + consul_sd_configs: + - server: consul.avroid.tech + scheme: https + datacenter: "avroid-office" + tags: [ k8s_patroni_exporter ] + services: [ monitoring_k8s_patroni_exporter ] + authorization: + credentials: "${CONSUL_SECRET}" + relabel_configs: + - source_labels: [ __meta_consul_service_metadata_metrics_path ] + target_label: __metrics_path__ + - source_labels: [ __meta_consul_service_metadata_job_name ] + target_label: job + - source_labels: [ __address__ ] + target_label: __param_target + - source_labels: [ __meta_consul_node ] + target_label: instance + regex: "([^:]+).*" + replacement: '${1}' + +# Configuration of subcharts defined in Chart.yaml + +## alertmanager sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager +## +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: false + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +kube-state-metrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: false + +## prometheus-node-exporter sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter +## +prometheus-node-exporter: + ## If false, node-exporter will not be installed + ## + enabled: false + +## prometheus-pushgateway sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway +## +prometheus-pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: false