[DO-1600] Move namespace prod to avroid-prod (!36)

[DO-1600]

Co-authored-by: denis.patrakeev <denis.patrakeev@avroid.tech>
Reviewed-on: https://git.avroid.tech/K8s/k8s-configs/pulls/36
Reviewed-by: Vasiliy Chipizhin <vasiliy.chipizhin@avroid.team>
Reviewed-by: Rustam Tagaev <rustam.tagaev@avroid.team>
This commit is contained in:
Denis Patrakeev
2025-02-24 13:56:29 +03:00
parent d23152de7c
commit 565da97928
13 changed files with 19 additions and 19 deletions

View File

@@ -0,0 +1,44 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mermaid
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: mermaid
destination:
server: https://kubernetes.default.svc
namespace: avroid-prod
sources:
- repoURL: https://git.avroid.tech/K8s/k8s-configs.git
targetRevision: master
path: clusters/k8s-avroid-office.prod.local/namespaces/avroid-prod/diagrams-tools/mermaid/kustomize
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- ApplyOutOfSyncOnly=true
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: mermaid
namespace: argocd
# Finalizer that ensures that project is not deleted until it is not referenced by any application
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
sourceRepos:
- https://git.avroid.tech/K8s/k8s-configs.git
# Only permit applications to deploy to the guestbook namespace in the same cluster
destinations:
- namespace: avroid-prod
server: https://kubernetes.default.svc
# Deny all cluster-scoped resources from being created, except for Namespace
clusterResourceWhitelist:
- group: ''
kind: Namespace

View File

@@ -0,0 +1,19 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: mermaid-in
namespace: avroid-prod
labels:
app.kubernetes.io/managed-by: argocd
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: mermaid
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: ingress-nginx

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- mermaid-deployment.yaml
- mermaid-service.yaml
- mermaid-ingress.yaml

View File

@@ -0,0 +1,43 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mermaid
labels:
app.kubernetes.io/name: mermaid
app.kubernetes.io/instance: mermaid
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: mermaid
app.kubernetes.io/instance: mermaid
template:
metadata:
labels:
app.kubernetes.io/name: mermaid
app.kubernetes.io/instance: mermaid
spec:
containers:
- name: mermaid
image: harbor.avroid.tech/docker-hub-proxy/supinf/mermaid-editor:8.0
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
protocol: TCP
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 5
periodSeconds: 15
resources:
requests:
cpu: 50m
memory: 32Mi
limits:
cpu: 250m
memory: 128Mi
nodeSelector:
node-role.kubernetes.io/worker: ""

View File

@@ -0,0 +1,21 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mermaid
labels:
app.kubernetes.io/name: mermaid
app.kubernetes.io/instance: mermaid
spec:
ingressClassName: nginx
rules:
- host: mermaid.avroid.tech
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mermaid
port:
name: http

View File

@@ -0,0 +1,18 @@
---
apiVersion: v1
kind: Service
metadata:
name: mermaid
labels:
app.kubernetes.io/name: mermaid
app.kubernetes.io/instance: mermaid
spec:
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
selector:
app.kubernetes.io/name: mermaid
app.kubernetes.io/instance: mermaid

View File

@@ -0,0 +1,51 @@
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: plantuml
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: plantuml
destination:
server: https://kubernetes.default.svc
namespace: avroid-prod
sources:
- repoURL: https://git.avroid.tech/K8s/k8s-configs.git
targetRevision: master
ref: values
- repoURL: https://nexus.avroid.tech/repository/devops-helm-proxy-helm/
chart: "stevehipwell/plantuml"
targetRevision: 3.36.0
helm:
valueFiles:
- $values/clusters/k8s-avroid-office.prod.local/namespaces/avroid-prod/diagrams-tools/plantuml/values-ovveride.yaml
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- ApplyOutOfSyncOnly=true
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: plantuml
namespace: argocd
# Finalizer that ensures that project is not deleted until it is not referenced by any application
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
sourceRepos:
- https://git.avroid.tech/K8s/k8s-configs.git
- https://nexus.avroid.tech/repository/devops-helm-proxy-helm/
# Only permit applications to deploy to the guestbook namespace in the same cluster
destinations:
- namespace: avroid-prod
server: https://kubernetes.default.svc
# Deny all cluster-scoped resources from being created, except for Namespace
clusterResourceWhitelist:
- group: ''
kind: Namespace

View File

@@ -0,0 +1,19 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: plantuml-in
namespace: avroid-prod
labels:
app.kubernetes.io/managed-by: argocd
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: plantuml
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: ingress-nginx

View File

@@ -0,0 +1,41 @@
# https://github.com/stevehipwell/helm-charts/blob/main/charts/plantuml/values.yaml
image:
# -- Image repository for the default container.
repository: harbor.avroid.tech/docker-hub-proxy/plantuml/plantuml-server
ingress:
# -- If `true`, create an `Ingress` resource.
enabled: true
# -- Ingress annotations.
annotations:
kubernetes.io/ingress.class: nginx
ingressClassName: "nginx"
# -- (list) Ingress hosts.
# @default -- See _values.yaml_
hosts:
- plantuml.avroid.tech
# -- (string) Ingress path.
path: /
# -- (list) Ingress TLS.
# @default -- See _values.yaml_
tls: []
# - hosts:
# - plantuml.local
# secretName: plantuml-tls
# -- Number of replicas to create if `autoscaling.enabled` is `false`.
replicaCount: 2
# -- Resources for the default container.
resources:
requests:
cpu: 200m
memory: 1024Mi
limits:
cpu: 1000m
memory: 2048Mi
# -- Node labels to match for pod scheduling.
nodeSelector:
node-role.kubernetes.io/worker: ""