inital setup
This commit is contained in:
commit
eeb1369b55
2
argo/apps/kustomization.yaml
Normal file
2
argo/apps/kustomization.yaml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
resources:
|
||||||
|
- uptime-karma.yaml
|
||||||
40
argo/apps/monitoring.yaml
Normal file
40
argo/apps/monitoring.yaml
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# resources:apiVersion: argoproj.io/v1alpha1
|
||||||
|
# kind: Application
|
||||||
|
# metadata:
|
||||||
|
# name: monitoring
|
||||||
|
# namespace: monitoring
|
||||||
|
# finalizers:
|
||||||
|
# - resources-finalizer.argocd.argoproj.io
|
||||||
|
# spec:
|
||||||
|
# project: default
|
||||||
|
# source:
|
||||||
|
# repoURL: https://git.james-mellors.com/mello/Monitoring.git
|
||||||
|
# targetRevision: main
|
||||||
|
# path: argo/apps
|
||||||
|
# destination:
|
||||||
|
# server: https://kubernetes.default.svc
|
||||||
|
# namespace: argocd
|
||||||
|
# syncPolicy:
|
||||||
|
# automated:
|
||||||
|
# prune: true
|
||||||
|
# selfHeal: true
|
||||||
|
resources:apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: monitoring-apps
|
||||||
|
namespace: argocd
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
source:
|
||||||
|
repoURL: https://git.james-mellors.com/mello/Monitoring.git
|
||||||
|
targetRevision: main
|
||||||
|
path: argo/apps
|
||||||
|
destination:
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
namespace: argocd
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
selfHeal: true
|
||||||
20
argo/apps/uptime-karma.yaml
Normal file
20
argo/apps/uptime-karma.yaml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: uptime-karma
|
||||||
|
namespace: argocd
|
||||||
|
spec:
|
||||||
|
project: default # Or your specific Argo CD project
|
||||||
|
source:
|
||||||
|
repoURL: https://git.james-mellors.com/mello/Monitoring.git # Sealed Secrets chart repository
|
||||||
|
targetRevision: main # Specify the desired chart version (Check for the latest stable version!)
|
||||||
|
path: heml/uptime-karma
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
namespace: monitoring
|
||||||
|
syncPolicy:
|
||||||
|
automated: # Optional: Enable automatic sync
|
||||||
|
prune: true
|
||||||
|
selfHeal: true
|
||||||
|
|
||||||
23
helm/uptime-karma/.helmignore
Normal file
23
helm/uptime-karma/.helmignore
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
14
helm/uptime-karma/Chart.yaml
Normal file
14
helm/uptime-karma/Chart.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
appVersion: "1.23.13"
|
||||||
|
deprecated: false
|
||||||
|
description: A self-hosted Monitoring tool like "Uptime-Robot".
|
||||||
|
home: https://github.com/dirsigler/uptime-kuma-helm
|
||||||
|
icon: https://raw.githubusercontent.com/louislam/uptime-kuma/master/public/icon.png
|
||||||
|
maintainers:
|
||||||
|
- name: dirsigler
|
||||||
|
email: dennis@irsigler.dev
|
||||||
|
name: uptime-kuma
|
||||||
|
sources:
|
||||||
|
- https://github.com/louislam/uptime-kuma
|
||||||
|
type: application
|
||||||
|
version: 2.21.2
|
||||||
103
helm/uptime-karma/README.md
Normal file
103
helm/uptime-karma/README.md
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# uptime-kuma
|
||||||
|
|
||||||
|
  
|
||||||
|
|
||||||
|
A self-hosted Monitoring tool like "Uptime-Robot".
|
||||||
|
|
||||||
|
**Homepage:** <https://github.com/dirsigler/uptime-kuma-helm>
|
||||||
|
|
||||||
|
## Maintainers
|
||||||
|
|
||||||
|
| Name | Email | Url |
|
||||||
|
| ---- | ------ | --- |
|
||||||
|
| dirsigler | <dennis@irsigler.dev> | |
|
||||||
|
|
||||||
|
## Source Code
|
||||||
|
|
||||||
|
* <https://github.com/louislam/uptime-kuma>
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
| Key | Type | Default | Description |
|
||||||
|
|-----|------|---------|-------------|
|
||||||
|
| additionalVolumeMounts | list | `[]` | A list of additional volumeMounts to be added to the pod |
|
||||||
|
| additionalVolumes | list | `[]` | A list of additional volumes to be added to the pod |
|
||||||
|
| affinity | object | `{}` | |
|
||||||
|
| dnsConfig | object | `{}` | Use this option to set custom DNS configurations to the created deployment |
|
||||||
|
| dnsPolicy | string | `""` | Use this option to set a custom DNS policy to the created deployment |
|
||||||
|
| fullnameOverride | string | `""` | |
|
||||||
|
| image.pullPolicy | string | `"IfNotPresent"` | |
|
||||||
|
| image.repository | string | `"louislam/uptime-kuma"` | |
|
||||||
|
| image.tag | string | `"1.23.13-debian"` | |
|
||||||
|
| imagePullSecrets | list | `[]` | |
|
||||||
|
| ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"3600"` | |
|
||||||
|
| ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"3600"` | |
|
||||||
|
| ingress.annotations."nginx.ingress.kubernetes.io/server-snippets" | string | `"location / {\n proxy_set_header Upgrade $http_upgrade;\n proxy_http_version 1.1;\n proxy_set_header X-Forwarded-Host $http_host;\n proxy_set_header X-Forwarded-Proto $scheme;\n proxy_set_header X-Forwarded-For $remote_addr;\n proxy_set_header Host $host;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header X-Real-IP $remote_addr;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header Upgrade $http_upgrade;\n proxy_cache_bypass $http_upgrade;\n}\n"` | |
|
||||||
|
| ingress.enabled | bool | `false` | |
|
||||||
|
| ingress.extraLabels | object | `{}` | |
|
||||||
|
| ingress.hosts[0].host | string | `"chart-example.local"` | |
|
||||||
|
| ingress.hosts[0].paths[0].path | string | `"/"` | |
|
||||||
|
| ingress.hosts[0].paths[0].pathType | string | `"ImplementationSpecific"` | |
|
||||||
|
| ingress.tls | list | `[]` | |
|
||||||
|
| livenessProbe.enabled | bool | `true` | |
|
||||||
|
| livenessProbe.exec.command[0] | string | `"extra/healthcheck"` | |
|
||||||
|
| livenessProbe.failureThreshold | int | `3` | |
|
||||||
|
| livenessProbe.initialDelaySeconds | int | `180` | |
|
||||||
|
| livenessProbe.periodSeconds | int | `10` | |
|
||||||
|
| livenessProbe.successThreshold | int | `1` | |
|
||||||
|
| livenessProbe.timeoutSeconds | int | `2` | |
|
||||||
|
| nameOverride | string | `""` | |
|
||||||
|
| namespaceOverride | string | `""` | A custom namespace to override the default namespace for the deployed resources. |
|
||||||
|
| networkPolicy | object | `{"allowExternal":true,"egress":true,"enabled":false,"ingress":true,"namespaceSelector":{}}` | Create a NetworkPolicy |
|
||||||
|
| networkPolicy.allowExternal | bool | `true` | Allow incoming connections only from specific Pods When set to true, the geoserver will accept connections from any source. When false, only Pods with the label {{ include "geoserver.fullname" . }}-client=true will have network access |
|
||||||
|
| networkPolicy.egress | bool | `true` | Enable/disable Egress policy type |
|
||||||
|
| networkPolicy.enabled | bool | `false` | Enable/disable Network Policy |
|
||||||
|
| networkPolicy.ingress | bool | `true` | Enable/disable Ingress policy type |
|
||||||
|
| networkPolicy.namespaceSelector | object | `{}` | Selects particular namespaces for which all Pods are allowed as ingress sources |
|
||||||
|
| nodeSelector | object | `{}` | |
|
||||||
|
| podAnnotations | object | `{}` | |
|
||||||
|
| podEnv | list | `[]` | |
|
||||||
|
| podLabels | object | `{}` | |
|
||||||
|
| podSecurityContext | object | `{}` | |
|
||||||
|
| priorityClassName | string | `""` | Use this option to set custom PriorityClass to the created deployment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass |
|
||||||
|
| readinessProbe.enabled | bool | `true` | |
|
||||||
|
| readinessProbe.exec.command | list | `[]` | |
|
||||||
|
| readinessProbe.failureThreshold | int | `3` | |
|
||||||
|
| readinessProbe.httpGet.httpHeaders | list | `[]` | |
|
||||||
|
| readinessProbe.httpGet.path | string | `"/"` | |
|
||||||
|
| readinessProbe.httpGet.port | int | `3001` | |
|
||||||
|
| readinessProbe.httpGet.scheme | string | `"HTTP"` | |
|
||||||
|
| readinessProbe.initialDelaySeconds | int | `10` | |
|
||||||
|
| readinessProbe.periodSeconds | int | `10` | |
|
||||||
|
| readinessProbe.successThreshold | int | `1` | |
|
||||||
|
| readinessProbe.timeoutSeconds | int | `1` | |
|
||||||
|
| resources | object | `{}` | |
|
||||||
|
| securityContext | object | `{}` | |
|
||||||
|
| service.annotations | object | `{}` | |
|
||||||
|
| service.nodePort | string | `nil` | |
|
||||||
|
| service.port | int | `3001` | |
|
||||||
|
| service.type | string | `"ClusterIP"` | |
|
||||||
|
| serviceAccount.annotations | object | `{}` | |
|
||||||
|
| serviceAccount.create | bool | `false` | |
|
||||||
|
| serviceAccount.name | string | `""` | |
|
||||||
|
| serviceMonitor.additionalLabels | object | `{}` | Additional labels to add to the ServiceMonitor |
|
||||||
|
| serviceMonitor.annotations | object | `{}` | Additional annotations to add to the ServiceMonitor |
|
||||||
|
| serviceMonitor.enabled | bool | `false` | |
|
||||||
|
| serviceMonitor.interval | string | `"60s"` | Scrape interval. If not set, the Prometheus default scrape interval is used. |
|
||||||
|
| serviceMonitor.metricRelabelings | list | `[]` | Prometheus [MetricRelabelConfigs] to apply to samples before ingestion |
|
||||||
|
| serviceMonitor.namespace | string | `nil` | Namespace where the ServiceMonitor resource should be created, default is the same as the release namespace |
|
||||||
|
| serviceMonitor.relabelings | list | `[]` | Prometheus [RelabelConfigs] to apply to samples before scraping |
|
||||||
|
| serviceMonitor.scheme | string | `nil` | Scheme to use when scraping, e.g. http (default) or https. |
|
||||||
|
| serviceMonitor.scrapeTimeout | string | `"10s"` | Timeout if metrics can't be retrieved in given time interval |
|
||||||
|
| serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector, only select Prometheus's with these labels (if not set, select any Prometheus) |
|
||||||
|
| serviceMonitor.tlsConfig | object | `{}` | TLS configuration to use when scraping, only applicable for scheme https. |
|
||||||
|
| strategy.type | string | `"Recreate"` | |
|
||||||
|
| tolerations | list | `[]` | |
|
||||||
|
| useDeploy | bool | `true` | |
|
||||||
|
| volume.accessMode | string | `"ReadWriteOnce"` | |
|
||||||
|
| volume.enabled | bool | `true` | |
|
||||||
|
| volume.existingClaim | string | `""` | |
|
||||||
|
| volume.size | string | `"4Gi"` | |
|
||||||
|
|
||||||
|
----------------------------------------------
|
||||||
|
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)
|
||||||
23
helm/uptime-karma/templates/NOTES.txt
Normal file
23
helm/uptime-karma/templates/NOTES.txt
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
1. Get the application URL by running these commands:
|
||||||
|
{{- if .Values.ingress.enabled }}
|
||||||
|
{{- range $host := .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "uptime-kuma.fullname" . }})
|
||||||
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||||
|
echo http://$NODE_IP:$NODE_PORT
|
||||||
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||||
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||||
|
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "uptime-kuma.fullname" . }}'
|
||||||
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "uptime-kuma.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||||
|
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||||
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||||
|
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "uptime-kuma.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||||
|
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||||
|
echo "Visit http://127.0.0.1:3001 to use your application"
|
||||||
|
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3001:$CONTAINER_PORT
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
87
helm/uptime-karma/templates/_helpers.tpl
Normal file
87
helm/uptime-karma/templates/_helpers.tpl
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "uptime-kuma.chart" . }}
|
||||||
|
{{ include "uptime-kuma.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "uptime-kuma.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Port of the Uptime Kuma container
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.port" -}}
|
||||||
|
3001
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "uptime-kuma.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Set automountServiceAccountToken when service account is created
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.automountServiceAccountToken" -}}
|
||||||
|
{{- default .Values.serviceAccount.create }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Determine the namespace to use, allowing for a namespace override.
|
||||||
|
*/}}
|
||||||
|
{{- define "uptime-kuma.namespace" -}}
|
||||||
|
{{- if .Values.namespaceOverride }}
|
||||||
|
{{- .Values.namespaceOverride }}
|
||||||
|
{{- else }}
|
||||||
|
{{- .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
141
helm/uptime-karma/templates/deployment.yaml
Normal file
141
helm/uptime-karma/templates/deployment.yaml
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
{{- if .Values.useDeploy -}}
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}
|
||||||
|
namespace: {{ include "uptime-kuma.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
|
||||||
|
{{- if .Values.podLabels }}
|
||||||
|
{{- toYaml .Values.podLabels | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.strategy }}
|
||||||
|
strategy:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- if .Values.podLabels }}
|
||||||
|
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: {{ include "uptime-kuma.automountServiceAccountToken" . }}
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "uptime-kuma.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
{{- if .Values.dnsPolicy }}
|
||||||
|
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.dnsConfig }}
|
||||||
|
dnsConfig:
|
||||||
|
{{- toYaml .Values.dnsConfig | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.priorityClassName }}
|
||||||
|
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||||
|
{{- end }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
{{- with .Values.podEnv }}
|
||||||
|
env:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: {{ include "uptime-kuma.port" . }}
|
||||||
|
protocol: TCP
|
||||||
|
{{ if or .Values.volume.enabled .Values.additionalVolumeMounts -}}
|
||||||
|
volumeMounts:
|
||||||
|
{{- if .Values.volume.enabled }}
|
||||||
|
- mountPath: /app/data
|
||||||
|
name: storage
|
||||||
|
{{- end -}}
|
||||||
|
{{ with .Values.additionalVolumeMounts }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.livenessProbe.enabled }}
|
||||||
|
livenessProbe:
|
||||||
|
{{- if .Values.livenessProbe.enabled }}
|
||||||
|
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
|
||||||
|
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
|
||||||
|
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
|
||||||
|
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
|
||||||
|
successThreshold: {{ .Values.livenessProbe.successThreshold }}
|
||||||
|
{{- if .Values.livenessProbe.exec.command }}
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
{{- toYaml .Values.livenessProbe.exec.command | nindent 16 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
readinessProbe:
|
||||||
|
{{- if .Values.readinessProbe.enabled }}
|
||||||
|
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
|
||||||
|
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
|
||||||
|
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
|
||||||
|
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
|
||||||
|
successThreshold: {{ .Values.readinessProbe.successThreshold }}
|
||||||
|
{{- if .Values.readinessProbe.exec.command }}
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
{{- toYaml .Values.readinessProbe.exec.command | nindent 16 }}
|
||||||
|
{{- else if .Values.readinessProbe.httpGet.path }}
|
||||||
|
httpGet:
|
||||||
|
path: {{ .Values.readinessProbe.httpGet.path }}
|
||||||
|
port: {{ .Values.readinessProbe.httpGet.port }}
|
||||||
|
scheme: {{ .Values.readinessProbe.httpGet.scheme }}
|
||||||
|
{{- if .Values.readinessProbe.httpGet.httpHeaders }}
|
||||||
|
httpHeaders:
|
||||||
|
{{- toYaml .Values.readinessProbe.httpGet.httpHeaders | nindent 16 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{ if or .Values.volume.enabled .Values.additionalVolumes -}}
|
||||||
|
volumes:
|
||||||
|
{{- if .Values.volume.enabled }}
|
||||||
|
- name: storage
|
||||||
|
persistentVolumeClaim:
|
||||||
|
{{- if not .Values.volume.existingClaim }}
|
||||||
|
claimName: {{ include "uptime-kuma.fullname" . }}-pvc
|
||||||
|
{{- else }}
|
||||||
|
claimName: {{ .Values.volume.existingClaim }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- with .Values.additionalVolumes }}
|
||||||
|
{{- toYaml . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
129
helm/uptime-karma/templates/ingress.yaml
Normal file
129
helm/uptime-karma/templates/ingress.yaml
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
{{- /* ================================================================== */ -}}
|
||||||
|
{{- /* Istio Gateway Configuration */ -}}
|
||||||
|
{{- /* ================================================================== */ -}}
|
||||||
|
{{- if .Values.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "uptime-kuma.fullname" . -}}
|
||||||
|
{{- $namespace := include "uptime-kuma.namespace" . -}}
|
||||||
|
{{- $gatewayName := $fullName -}} {{/* Use the same name for simplicity, or define a new one */}}
|
||||||
|
apiVersion: networking.istio.io/v1beta1
|
||||||
|
kind: Gateway
|
||||||
|
metadata:
|
||||||
|
name: {{ $gatewayName }}
|
||||||
|
namespace: {{ $namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
{{- /* You might want specific labels for Istio resources */}}
|
||||||
|
{{- if .Values.istio.gateway.extraLabels }}
|
||||||
|
{{- toYaml .Values.istio.gateway.extraLabels | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- /* Annotations from Ingress might not apply directly, review if needed */}}
|
||||||
|
{{- /* Add Istio specific annotations if required */}}
|
||||||
|
{{- with .Values.istio.gateway.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
# This selector usually targets the default Istio ingress gateway pods
|
||||||
|
# Adjust if your Istio installation uses different labels
|
||||||
|
selector:
|
||||||
|
{{- /* Make this configurable, e.g., .Values.istio.gateway.selector */}}
|
||||||
|
{{- .Values.istio.gateway.selector | default (dict "istio" "ingressgateway") | toYaml | nindent 4 }}
|
||||||
|
servers:
|
||||||
|
# HTTP Server entry
|
||||||
|
- port:
|
||||||
|
number: 80
|
||||||
|
name: http-{{ $fullName }} # Name must be unique per Gateway
|
||||||
|
protocol: HTTP
|
||||||
|
# Listen on the hosts defined in the ingress rules
|
||||||
|
hosts:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- {{ .host | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- /* Optional: Add default http->https redirect */}}
|
||||||
|
{{- if and .Values.ingress.tls .Values.istio.gateway.httpRedirect }}
|
||||||
|
tls:
|
||||||
|
httpsRedirect: true
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- /* HTTPS Server entry - only if TLS is configured */}}
|
||||||
|
{{- if .Values.ingress.tls }}
|
||||||
|
- port:
|
||||||
|
number: 443
|
||||||
|
name: https-{{ $fullName }} # Name must be unique per Gateway
|
||||||
|
protocol: HTTPS
|
||||||
|
# Use hosts defined in the TLS section
|
||||||
|
hosts:
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
tls:
|
||||||
|
mode: SIMPLE # Terminate TLS at the gateway
|
||||||
|
# Reference secrets from the TLS config. Assumes secrets are in the same namespace as the Gateway.
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
credentialName: {{ .secretName }}
|
||||||
|
{{- /* Note: Istio Gateway only supports one credentialName per server block directly. */}}
|
||||||
|
{{- /* If multiple TLS secrets are needed for different hosts on port 443, */}}
|
||||||
|
{{- /* you might need multiple server blocks or rely on SNI matching if your */}}
|
||||||
|
{{- /* Istio version/setup supports it implicitly based on VirtualService hosts. */}}
|
||||||
|
{{- /* For simplicity, this example assumes the *first* secret applies if multiple are listed */}}
|
||||||
|
{{- /* under .Values.ingress.tls and you only have one https server block. */}}
|
||||||
|
{{- /* A more robust solution might generate multiple HTTPS server blocks if needed. */}}
|
||||||
|
{{- break }} {{/* Only use the first secret for this simple server block */}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
---
|
||||||
|
{{- /* ================================================================== */ -}}
|
||||||
|
{{- /* Istio VirtualService Configuration */ -}}
|
||||||
|
{{- /* ================================================================== */ -}}
|
||||||
|
{{- $fullName := include "uptime-kuma.fullname" . -}}
|
||||||
|
{{- $namespace := include "uptime-kuma.namespace" . -}}
|
||||||
|
{{- $svcPort := .Values.service.port -}}
|
||||||
|
{{- $gatewayName := $fullName -}} {{/* Must match the Gateway name defined above */}}
|
||||||
|
apiVersion: networking.istio.io/v1beta1
|
||||||
|
kind: VirtualService
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
namespace: {{ $namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
{{- if .Values.istio.virtualService.extraLabels }}
|
||||||
|
{{- toYaml .Values.istio.virtualService.extraLabels | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.istio.virtualService.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
# Apply these rules to traffic coming through the specified gateway(s)
|
||||||
|
gateways:
|
||||||
|
- {{ $gatewayName }}
|
||||||
|
# Apply these rules for requests targeting the specified host(s)
|
||||||
|
hosts:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- {{ .host | quote }}
|
||||||
|
{{- end }}
|
||||||
|
http:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
- match:
|
||||||
|
- uri:
|
||||||
|
# Map pathType to Istio's match types
|
||||||
|
{{- $pathType := .pathType | default "Prefix" -}} {{/* Default to Prefix if not specified */}}
|
||||||
|
{{- if or (eq $pathType "Prefix") (eq $pathType "ImplementationSpecific") }}
|
||||||
|
prefix: {{ .path }}
|
||||||
|
{{- else if eq $pathType "Exact" }}
|
||||||
|
exact: {{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
# Define where to route the traffic
|
||||||
|
route:
|
||||||
|
- destination:
|
||||||
|
# Route to the internal Kubernetes Service
|
||||||
|
host: {{ $fullName }}
|
||||||
|
port:
|
||||||
|
number: {{ $svcPort }}
|
||||||
|
{{- /* Add other Istio features like rewrite, headers, retries, timeouts here if needed */}}
|
||||||
|
{{- end }} {{- /* end range .paths */}}
|
||||||
|
{{- end }} {{- /* end range .Values.ingress.hosts */}}
|
||||||
|
{{- end }} {{/* End if .Values.ingress.enabled */}}
|
||||||
37
helm/uptime-karma/templates/netpol.yaml
Normal file
37
helm/uptime-karma/templates/netpol.yaml
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
{{- if .Values.networkPolicy.enabled }}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: NetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
podSelector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
|
||||||
|
policyTypes:
|
||||||
|
{{- if .Values.networkPolicy.ingress }}
|
||||||
|
- Ingress
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.networkPolicy.egress }}
|
||||||
|
- Egress
|
||||||
|
{{- end }}
|
||||||
|
egress:
|
||||||
|
- {}
|
||||||
|
{{- if .Values.networkPolicy.ingress }}
|
||||||
|
ingress:
|
||||||
|
- ports:
|
||||||
|
- port: {{ include "uptime-kuma.port" . }}
|
||||||
|
protocol: TCP
|
||||||
|
{{- if not .Values.networkPolicy.allowExternal }}
|
||||||
|
from:
|
||||||
|
- podSelector:
|
||||||
|
matchLabels:
|
||||||
|
{{ include "uptime-kuma.fullname" . }}-client: "true"
|
||||||
|
{{- with .Values.networkPolicy.namespaceSelector }}
|
||||||
|
- namespaceSelector:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
20
helm/uptime-karma/templates/pvc.yaml
Normal file
20
helm/uptime-karma/templates/pvc.yaml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{{- if and .Values.useDeploy (not .Values.volume.existingClaim) }}
|
||||||
|
{{- if and .Values.volume.enabled (not .Values.volume.existingClaim) }}
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}-pvc
|
||||||
|
namespace: {{ include "uptime-kuma.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- {{ .Values.volume.accessMode | default "ReadWriteOnce" | quote }}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.volume.size | quote }}
|
||||||
|
{{- with .Values.volume.storageClassName }}
|
||||||
|
storageClassName: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
23
helm/uptime-karma/templates/service.yaml
Normal file
23
helm/uptime-karma/templates/service.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}
|
||||||
|
namespace: {{ include "uptime-kuma.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: {{ include "uptime-kuma.port" . }}
|
||||||
|
protocol: TCP
|
||||||
|
{{- with .Values.service.nodePort }}
|
||||||
|
nodePort: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
name: http
|
||||||
|
selector:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 4 }}
|
||||||
13
helm/uptime-karma/templates/serviceaccount.yaml
Normal file
13
helm/uptime-karma/templates/serviceaccount.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.serviceAccountName" . }}
|
||||||
|
namespace: {{ include "uptime-kuma.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
12
helm/uptime-karma/templates/servicemonitor.auth.secret.yaml
Normal file
12
helm/uptime-karma/templates/servicemonitor.auth.secret.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.basicAuth }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}-metrics-basic-auth
|
||||||
|
namespace: {{ .Values.serviceMonitor.namespace | default (include "uptime-kuma.namespace" .) }}
|
||||||
|
type: kubernetes.io/basic-auth
|
||||||
|
stringData:
|
||||||
|
{{- range $key, $value := .Values.serviceMonitor.basicAuth }}
|
||||||
|
{{ $key }}: {{ $value }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
54
helm/uptime-karma/templates/servicemonitor.yaml
Normal file
54
helm/uptime-karma/templates/servicemonitor.yaml
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
{{- if and .Values.serviceMonitor.enabled (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}
|
||||||
|
namespace: {{ .Values.serviceMonitor.namespace | default (include "uptime-kuma.namespace" .) }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceMonitor.selector }}
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.serviceMonitor.additionalLabels }}
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.serviceMonitor.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- {{ .Release.Namespace }}
|
||||||
|
endpoints:
|
||||||
|
- port: http
|
||||||
|
path: /metrics
|
||||||
|
interval: {{ .Values.serviceMonitor.interval }}
|
||||||
|
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
|
||||||
|
{{- with .Values.serviceMonitor.scheme }}
|
||||||
|
scheme: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.serviceMonitor.tlsConfig }}
|
||||||
|
tlsConfig:
|
||||||
|
{{- toYaml . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.serviceMonitor.relabelings }}
|
||||||
|
relabelings:
|
||||||
|
{{- toYaml . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.serviceMonitor.metricRelabelings }}
|
||||||
|
metricRelabelings:
|
||||||
|
{{- toYaml . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.serviceMonitor.basicAuth }}
|
||||||
|
basicAuth:
|
||||||
|
{{- range $key, $value := . }}
|
||||||
|
{{ $key }}:
|
||||||
|
name: {{ include "uptime-kuma.fullname" $ }}-metrics-basic-auth
|
||||||
|
key: {{ $key }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
123
helm/uptime-karma/templates/statefulset.yaml
Normal file
123
helm/uptime-karma/templates/statefulset.yaml
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
{{- if not .Values.useDeploy -}}
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: StatefulSet
|
||||||
|
metadata:
|
||||||
|
name: {{ include "uptime-kuma.fullname" . }}
|
||||||
|
namespace: {{ include "uptime-kuma.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
serviceName: {{ include "uptime-kuma.fullname" . }}
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
|
||||||
|
{{- if .Values.podLabels }}
|
||||||
|
{{- toYaml .Values.podLabels | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- if .Values.podLabels }}
|
||||||
|
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: {{ include "uptime-kuma.automountServiceAccountToken" . }}
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
enableServiceLinks: false
|
||||||
|
serviceAccountName: {{ include "uptime-kuma.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
{{- if .Values.dnsPolicy }}
|
||||||
|
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.dnsConfig }}
|
||||||
|
dnsConfig:
|
||||||
|
{{- toYaml .Values.dnsConfig | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.priorityClassName }}
|
||||||
|
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||||
|
{{- end }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
- name: "UPTIME_KUMA_PORT"
|
||||||
|
value: {{ include "uptime-kuma.port" . | quote }}
|
||||||
|
{{- with .Values.podEnv }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: {{ include "uptime-kuma.port" . }}
|
||||||
|
protocol: TCP
|
||||||
|
{{ if or .Values.volume.enabled .Values.additionalVolumeMounts -}}
|
||||||
|
volumeMounts:
|
||||||
|
{{- if .Values.volume.enabled }}
|
||||||
|
- mountPath: /app/data
|
||||||
|
name: storage
|
||||||
|
{{- end -}}
|
||||||
|
{{ with .Values.additionalVolumeMounts }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.livenessProbe.enabled }}
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- extra/healthcheck
|
||||||
|
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds}}
|
||||||
|
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds}}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.readinessProbe.enabled }}
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: {{ include "uptime-kuma.port" . }}
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds}}
|
||||||
|
{{- end }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.additionalVolumes }}
|
||||||
|
volumes:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{ if .Values.volume.enabled -}}
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: storage
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- {{ .Values.volume.accessMode }}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.volume.size }}
|
||||||
|
{{- with .Values.volume.storageClassName }}
|
||||||
|
storageClassName: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
16
helm/uptime-karma/templates/tests/test-connection.yaml
Normal file
16
helm/uptime-karma/templates/tests/test-connection.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: "{{ include "uptime-kuma.fullname" . }}-test-connection"
|
||||||
|
namespace: {{ include "uptime-kuma.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "uptime-kuma.labels" . | nindent 4 }}
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": test
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: wget
|
||||||
|
image: busybox
|
||||||
|
command: ['wget']
|
||||||
|
args: ['{{ include "uptime-kuma.fullname" . }}:{{ .Values.service.port }}']
|
||||||
|
restartPolicy: Never
|
||||||
260
helm/uptime-karma/values.yaml
Normal file
260
helm/uptime-karma/values.yaml
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
# Default values for uptime-kuma.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: louislam/uptime-kuma
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
|
tag: "1.23.13-debian"
|
||||||
|
|
||||||
|
imagePullSecrets: []
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
# -- A custom namespace to override the default namespace for the deployed resources.
|
||||||
|
namespaceOverride: "monitoring"
|
||||||
|
|
||||||
|
# If this option is set to false a StateFulset instead of a Deployment is used
|
||||||
|
useDeploy: true
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: false
|
||||||
|
# Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
|
||||||
|
podAnnotations: {}
|
||||||
|
podLabels:
|
||||||
|
{}
|
||||||
|
# app: uptime-kuma
|
||||||
|
podEnv: []
|
||||||
|
# optional additional environment variables
|
||||||
|
# - name: "A_VARIABLE"
|
||||||
|
# value: "a-value"
|
||||||
|
|
||||||
|
podSecurityContext:
|
||||||
|
{}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
securityContext:
|
||||||
|
{}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 3001
|
||||||
|
nodePort:
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
# className: ""
|
||||||
|
extraLabels:
|
||||||
|
{}
|
||||||
|
# vhost: uptime-kuma.company.corp
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||||
|
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||||
|
nginx.ingress.kubernetes.io/server-snippets: |
|
||||||
|
location / {
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header X-Forwarded-Host $http_host;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
}
|
||||||
|
hosts:
|
||||||
|
- host: uptime.james-mellors.com
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
|
||||||
|
tls:
|
||||||
|
[]
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
resources:
|
||||||
|
{}
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
livenessProbe:
|
||||||
|
enabled: true
|
||||||
|
failureThreshold: 3
|
||||||
|
# Uptime-Kuma recommends to configure a delay of 180 seconds until the server fully started.
|
||||||
|
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.go#L3
|
||||||
|
initialDelaySeconds: 180
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 2
|
||||||
|
# The NodeJS Version of this Healthcheck is no longer supported, therefore we don't specify a node command.
|
||||||
|
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.js#L6
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- "extra/healthcheck"
|
||||||
|
|
||||||
|
readinessProbe:
|
||||||
|
enabled: true
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
timeoutSeconds: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
successThreshold: 1
|
||||||
|
exec:
|
||||||
|
command: []
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 3001
|
||||||
|
scheme: HTTP
|
||||||
|
httpHeaders: []
|
||||||
|
|
||||||
|
volume:
|
||||||
|
enabled: true
|
||||||
|
accessMode: ReadWriteOnce
|
||||||
|
size: 4Gi
|
||||||
|
# If you want to use a storage class other than the default, uncomment this
|
||||||
|
# line and define the storage class name
|
||||||
|
# storageClassName:
|
||||||
|
# Reuse your own pre-existing PVC.
|
||||||
|
existingClaim: ""
|
||||||
|
|
||||||
|
# -- A list of additional volumes to be added to the pod
|
||||||
|
additionalVolumes:
|
||||||
|
[]
|
||||||
|
# - name: "additional-certificates"
|
||||||
|
# configMap:
|
||||||
|
# name: "additional-certificates"
|
||||||
|
# optional: true
|
||||||
|
# defaultMode: 420
|
||||||
|
|
||||||
|
# -- A list of additional volumeMounts to be added to the pod
|
||||||
|
additionalVolumeMounts:
|
||||||
|
[]
|
||||||
|
# - name: "additional-certificates"
|
||||||
|
# mountPath: "/etc/ssl/certs/additional/additional-ca.pem"
|
||||||
|
# readOnly: true
|
||||||
|
# subPath: "additional-ca.pem"
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
|
||||||
|
# Prometheus ServiceMonitor configuration
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: false
|
||||||
|
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||||||
|
interval: 60s
|
||||||
|
# -- Timeout if metrics can't be retrieved in given time interval
|
||||||
|
scrapeTimeout: 10s
|
||||||
|
# -- Scheme to use when scraping, e.g. http (default) or https.
|
||||||
|
scheme: ~
|
||||||
|
# -- TLS configuration to use when scraping, only applicable for scheme https.
|
||||||
|
tlsConfig: {}
|
||||||
|
# -- Prometheus [RelabelConfigs] to apply to samples before scraping
|
||||||
|
relabelings: []
|
||||||
|
# -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
|
||||||
|
metricRelabelings: []
|
||||||
|
# -- Prometheus ServiceMonitor selector, only select Prometheus's with these
|
||||||
|
# labels (if not set, select any Prometheus)
|
||||||
|
selector: {}
|
||||||
|
|
||||||
|
# -- Namespace where the ServiceMonitor resource should be created, default is
|
||||||
|
# the same as the release namespace
|
||||||
|
namespace: ~
|
||||||
|
# -- Additional labels to add to the ServiceMonitor
|
||||||
|
additionalLabels: {}
|
||||||
|
# -- Additional annotations to add to the ServiceMonitor
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# -- BasicAuth credentials for scraping metrics, use API token and any string for username
|
||||||
|
# basicAuth:
|
||||||
|
# username: "metrics"
|
||||||
|
# password: ""
|
||||||
|
|
||||||
|
# -- Use this option to set a custom DNS policy to the created deployment
|
||||||
|
dnsPolicy: ""
|
||||||
|
|
||||||
|
# -- Use this option to set custom DNS configurations to the created deployment
|
||||||
|
dnsConfig: {}
|
||||||
|
|
||||||
|
# -- Use this option to set custom PriorityClass to the created deployment
|
||||||
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
# -- Create a NetworkPolicy
|
||||||
|
networkPolicy:
|
||||||
|
# -- Enable/disable Network Policy
|
||||||
|
enabled: false
|
||||||
|
# -- Enable/disable Ingress policy type
|
||||||
|
ingress: true
|
||||||
|
# -- Enable/disable Egress policy type
|
||||||
|
egress: true
|
||||||
|
# -- Allow incoming connections only from specific Pods
|
||||||
|
# When set to true, the geoserver will accept connections from any source.
|
||||||
|
# When false, only Pods with the label {{ include "geoserver.fullname" . }}-client=true will have network access
|
||||||
|
allowExternal: true
|
||||||
|
# -- Selects particular namespaces for which all Pods are allowed as ingress sources
|
||||||
|
namespaceSelector: {}
|
||||||
|
# matchLabels:
|
||||||
|
# role: frontend
|
||||||
|
# matchExpressions:
|
||||||
|
# - {key: role, operator: In, values: [frontend]}
|
||||||
|
# -- Istio specific configuration
|
||||||
|
istio:
|
||||||
|
# Note: istio requires ingress.enabled: true to activate gateway/virtualservice creation
|
||||||
|
# Generally, you would disable the standard Kubernetes Ingress template if using Istio templates.
|
||||||
|
|
||||||
|
gateway:
|
||||||
|
# -- Add extra labels to the Istio Gateway resource
|
||||||
|
extraLabels: {}
|
||||||
|
# my-label: value
|
||||||
|
|
||||||
|
# -- Add extra annotations to the Istio Gateway resource
|
||||||
|
annotations: {}
|
||||||
|
# some-istio-annotation: value
|
||||||
|
|
||||||
|
# -- Selector for the Istio Ingress Gateway deployment/pods.
|
||||||
|
# Adjust if your Istio installation uses different labels.
|
||||||
|
selector:
|
||||||
|
istio: ingressgateway # Common default selector
|
||||||
|
|
||||||
|
# -- Enable automatic HTTP to HTTPS redirection on the Gateway (requires ingress.tls to be configured)
|
||||||
|
httpRedirect: true # Set to true to force HTTPS
|
||||||
|
|
||||||
|
virtualService:
|
||||||
|
# -- Add extra labels to the Istio VirtualService resource
|
||||||
|
extraLabels: {}
|
||||||
|
# my-label: value
|
||||||
|
|
||||||
|
# -- Add extra annotations to the Istio VirtualService resource
|
||||||
|
annotations: {}
|
||||||
|
# some-istio-annotation: value
|
||||||
Loading…
x
Reference in New Issue
Block a user