diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/.rbac/argocd-apps-vault-service-account.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/.rbac/argocd-apps-vault-service-account.yaml new file mode 100644 index 0000000..cd959d1 --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/.rbac/argocd-apps-vault-service-account.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: vault-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: vault-operator + app.kubernetes.io/part-of: vault-operator + app.kubernetes.io/managed-by: argocd + name: vault + namespace: kube-prometheus-stack diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/argocd-apps-kube-prometheus-stack.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/argocd-apps-kube-prometheus-stack.yaml new file mode 100644 index 0000000..27a58ef --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/argocd-apps-kube-prometheus-stack.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-prometheus-stack + labels: + name: kube-prometheus-stack + app.kubernetes.io/managed-by: argocd + annotations: + argocd.argoproj.io/sync-wave: "-1" + scheduler.alpha.kubernetes.io/node-selector: node-role.kubernetes.io/worker= diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/kube-prometheus-stack/argocd-apps-kube-prometheus-stack-app.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/kube-prometheus-stack/argocd-apps-kube-prometheus-stack-app.yaml new file mode 100644 index 0000000..365d2ae --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/kube-prometheus-stack/argocd-apps-kube-prometheus-stack-app.yaml @@ -0,0 +1,61 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: kube-prometheus-stack + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: kube-prometheus-stack + destination: + server: https://kubernetes.default.svc + namespace: kube-prometheus-stack + sources: + - repoURL: https://git.avroid.tech/K8s/k8s-configs.git + targetRevision: master + ref: values + - repoURL: https://nexus.avroid.tech/repository/devops-helm-proxy-helm/ + chart: "prometheus-community/kube-prometheus-stack" + targetRevision: 69.7.3 + helm: + valueFiles: + - $values/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/security/kube-prometheus-stack/values-override.yaml + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - ApplyOutOfSyncOnly=true + - CreateNamespace=true +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: kube-prometheus-stack + namespace: argocd + # Finalizer that ensures that project is not deleted until it is not referenced by any application + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + sourceRepos: + - https://git.avroid.tech/K8s/k8s-configs.git + - https://nexus.avroid.tech/repository/devops-helm-proxy-helm/ + # Only permit applications to deploy to the guestbook namespace in the same cluster + destinations: + - namespace: kube-prometheus-stack + server: https://kubernetes.default.svc + # Deny all cluster-scoped resources from being created, except for Namespace + clusterResourceWhitelist: + - group: '' + kind: Namespace + - group: '*' + kind: Role + - group: '*' + kind: RoleBinding + - group: '*' + kind: ClusterRole + - group: '*' + kind: ClusterRoleBinding + - group: '*' + kind: CustomResourceDefinition diff --git a/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/kube-prometheus-stack/values-override.yaml b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/kube-prometheus-stack/values-override.yaml new file mode 100644 index 0000000..b92406f --- /dev/null +++ b/clusters/k8s-avroid-office.prod.local/namespaces/kube-prometheus-stack/kube-prometheus-stack/values-override.yaml @@ -0,0 +1,82 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Create default rules for monitoring the cluster +## +defaultRules: + create: false + +windowsMonitoring: + ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter') + enabled: false + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## +alertmanager: + + ## Deploy alertmanager + ## + enabled: false + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: false + +## Deploy a Prometheus instance +## +prometheus: + ingress: + enabled: true + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + ingressClassName: nginx + + annotations: + kubernetes.io/ingress.class: nginx + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - prometheus.domain.com + hosts: + - k8s-kube-prometheus-stack.avroid.tech + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: + - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + pathType: ImplementationSpecific + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheusspec + ## + prometheusSpec: + ## Image of Prometheus. + ## + image: + registry: harbor.avroid.tech/quay-proxy + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + node-role.kubernetes.io/worker: "" + + ## Maximum size of metrics + ## + retentionSize: "512MB" + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md + ## + storageSpec: + emptyDir: + sizeLimit: "1Gi"