[hotfix] Fix deploy postgres_exporter to manually deploy (!107)

Co-authored-by: denis.patrakeev <denis.patrakeev@avroid.tech>
Reviewed-on: https://git.avroid.tech/K8s/k8s-configs/pulls/107
This commit is contained in:
Denis Patrakeev
2025-03-05 13:12:00 +03:00
parent 2f9d791047
commit d48e97cf25
8 changed files with 100 additions and 128 deletions

View File

@@ -0,0 +1,55 @@
# Install [Prometheus Postgres Exporter](https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus-postgres-exporter/README.md)
## Create from version
| Project | Version |
|------------------------------|----------|
| Prometheus Postgres Exporter | 6.9.0 |
## Install
Настраиваем для работы файл конфигурации kubectl для подключения к кластеру Kubernetes
Готовим служебную УЗ для vault (ЭТО АВТОМАТИЗИРОВАНО через ArgoCD):
```bash
kubectl apply -f .rbac/vault-service-account.yaml
kubectl apply -f .rbac/harbor-registry-secret.yaml
```
Получаем из Vault секреты с паролями/токенами/сертификатами и выгружаем в окружение оболочки:
```bash
./prometheus_postgres_exporter_values_secrets_init.sh
```
Применяем секрет и сетевые политики:
```bash
kubectl -n cloud-prod apply -f .secrets/argocd-secret-path.yaml
kubectl -n cloud-prod apply -f postgres-exporter-network-policy.yaml
```
И производим непосредственную установку:
```bash
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm install -n cloud-prod prometheus-postgres-exporter prometheus-community/prometheus-postgres-exporter -f values-override.yaml
```
## Upgrade/Changes
Обновление настроек и чарта делаем так:
В начале сравниваем содержимое `values-override.yaml` с исходным файлом `values.yaml`
(ссылка на исходную версию зафиксирована в заголовке `values-override.yaml`) через diff (IDE лучше).
Или с новой версией `values.yaml` в Helm-чарте.
Вносим необходимые правки, новые опции или добавляем секреты.
Получаем из Vault секреты с паролями/токенами/сертификатами и выгружаем в окружение оболочки:
```bash
./prometheus_postgres_exporter_values_secrets_init.sh
kubectl -n cloud-prod apply -f .secrets/argocd-secret-path.yaml
```
Затем применяем:
```bash
helm upgrade -n cloud-prod prometheus-postgres-exporter prometheus-community/prometheus-postgres-exporter -f values-override.yaml
```

View File

@@ -1,51 +0,0 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cloud-postgres-exporter
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cloud-postgres-exporter
destination:
server: https://kubernetes.default.svc
namespace: cloud-prod
sources:
- repoURL: https://git.avroid.tech/K8s/k8s-configs.git
targetRevision: master
ref: values
- repoURL: https://nexus.avroid.tech/repository/devops-helm-proxy-helm/
chart: "prometheus-community/prometheus-postgres-exporter"
targetRevision: 6.9.0
helm:
valueFiles:
- $values/clusters/k8s-avroid-office.prod.local/namespaces/cloud-prod/monitoring/postgres-exporter/values-override.yaml
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- ApplyOutOfSyncOnly=true
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: cloud-postgres-exporter
namespace: argocd
# Finalizer that ensures that project is not deleted until it is not referenced by any application
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
sourceRepos:
- https://git.avroid.tech/K8s/k8s-configs.git
- https://nexus.avroid.tech/repository/devops-helm-proxy-helm/
# Only permit applications to deploy to the guestbook namespace in the same cluster
destinations:
- namespace: cloud-prod
server: https://kubernetes.default.svc
# Deny all cluster-scoped resources from being created, except for Namespace
clusterResourceWhitelist:
- group: ''
kind: Namespace

View File

@@ -1,17 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/managed-by: argocd
name: postgres-exporter-secret
namespace: cloud-prod
annotations:
vault.security.banzaicloud.io/vault-addr: "https://vault.avroid.tech"
vault.security.banzaicloud.io/vault-role: "cloud-prod"
vault.security.banzaicloud.io/vault-skip-verify: "false"
vault.security.banzaicloud.io/vault-path: "avroid-office"
type: Opaque
data:
username: dmF1bHQ6dGVhbS1kZXZvcHMvZGF0YS9zZXJ2aWNlcy9tb25pdG9yaW5nL2s4cy9jbG91ZC1wcm9kL3Bvc3RncmVzLWV4cG9ydGVyI3VzZXJuYW1l
password: dmF1bHQ6dGVhbS1kZXZvcHMvZGF0YS9zZXJ2aWNlcy9tb25pdG9yaW5nL2s4cy9jbG91ZC1wcm9kL3Bvc3RncmVzLWV4cG9ydGVyI3Bhc3N3b3Jk

View File

@@ -5,7 +5,7 @@ metadata:
name: cloud-postgres-exporter-in
namespace: cloud-prod
labels:
app.kubernetes.io/managed-by: argocd
app.kubernetes.io/managed-by: manually
spec:
podSelector:
matchLabels:
@@ -27,7 +27,7 @@ metadata:
name: cloud-postgres-exporter-out
namespace: cloud-prod
labels:
app.kubernetes.io/managed-by: argocd
app.kubernetes.io/managed-by: manually
spec:
podSelector:
matchLabels:

View File

@@ -0,0 +1,36 @@
#!/bin/sh
set -e
rm -rf .secrets
prometheus_postgres_exporter_username=$(vault kv get team-devops/services/monitoring/k8s/cloud-prod/postgres-exporter | grep username | awk '{print $2}')
prometheus_postgres_exporter_password=$(vault kv get team-devops/services/monitoring/k8s/cloud-prod/postgres-exporter | grep password | awk '{print $2}')
mkdir .secrets
cat > .secrets/argocd-secret-path.yaml << EOF
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/managed-by: manually
name: postgres-exporter-secret
namespace: cloud-prod
type: Opaque
stringData:
postgres_exporter.yml: |
---
auth_modules:
pg_monitoring:
type: userpass
userpass:
username: ${prometheus_postgres_exporter_username}
password: ${prometheus_postgres_exporter_password}
options:
sslmode: disable
EOF
echo "Run:"
echo ' 1. kubectl -n cloud-prod apply -f .secrets/argocd-secret-path.yaml'
echo ' 2. kubectl -n cloud-prod apply -f prometheus-network-policy.yaml'

View File

@@ -14,59 +14,12 @@ resources:
cpu: 200m
memory: 256Mi
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name: vault
config:
## The datasource properties on config are passed through helm tpl function.
## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
datasource:
# Specify userFile if DB username is stored in a file.
# For example, to use with vault-injector from Hashicorp
userSecret:
name: postgres-exporter-secret
key: username
# Only one of password, passwordFile, passwordSecret and pgpassfile can be specified
passwordSecret:
name: postgres-exporter-secret
key: password
# postgres_exporter.yml
postgresExporter: |
auth_modules:
pg_monitoring:
type: userpass
userpass:
username: "${DATA_SOURCE_USER}"
password: "${DATA_SOURCE_PASS}"
options:
sslmode: disable
# define an existing secret to be mounted as the config file
# needs to have the key 'postgres_exporter.yml'
existingSecret:
enabled: true
name: "postgres-exporter-secret"
nodeSelector:
node-role.kubernetes.io/worker: ""
annotations:
vault.security.banzaicloud.io/vault-addr: "https://vault.avroid.tech"
vault.security.banzaicloud.io/vault-role: "cloud-prod"
vault.security.banzaicloud.io/vault-skip-verify: "false"
vault.security.banzaicloud.io/vault-path: "avroid-office"
vault.security.banzaicloud.io/run-as-non-root: "true"
vault.security.banzaicloud.io/run-as-user: "1001"
vault.security.banzaicloud.io/run-as-group: "1001"
vault.security.banzaicloud.io/readonly-root-fs: "true"
# Labels and annotations to attach to the deployment resource
deployment:
annotations:
vault.security.banzaicloud.io/vault-addr: "https://vault.avroid.tech"
vault.security.banzaicloud.io/vault-role: "cloud-prod"
vault.security.banzaicloud.io/vault-skip-verify: "false"
vault.security.banzaicloud.io/vault-path: "avroid-office"
vault.security.banzaicloud.io/run-as-non-root: "true"
vault.security.banzaicloud.io/run-as-user: "1001"
vault.security.banzaicloud.io/run-as-group: "1001"
vault.security.banzaicloud.io/readonly-root-fs: "true"

View File

@@ -3,7 +3,7 @@ set -e
rm -rf .creds
prometheus_consul_token_value=$(vault kv get team-devops//services/monitoring/k8s/cloud-prod/prometheus | grep consul.secret | awk '{print $2}')
prometheus_consul_token_value=$(vault kv get team-devops/services/monitoring/k8s/cloud-prod/prometheus | grep consul.secret | awk '{print $2}')
cat > .creds << EOF
export PROMETHEUS_CONSUL_TOKEN_VALUE='${prometheus_consul_token_value}'

View File

@@ -40,10 +40,6 @@ server:
image:
repository: harbor.avroid.tech/quay-proxy/prometheus/prometheus
## External URL which can access prometheus
## Maybe same with Ingress host name
baseURL: "cloud-k8s-prometheus.avroid.tech"
ingress:
## If true, Prometheus server Ingress will be created
##
@@ -168,7 +164,7 @@ serverFiles:
regex: "([^:]+).*"
replacement: '${1}'
- target_label: __address__
replacement: cloud-postgres-exporter-prometheus-postgres-exporter:9187
replacement: cloud-postgres-exporter-prometheus-postgres-exporter
- job_name: redis_exporter
consul_sd_configs:
- server: consul.avroid.tech