Files
microdao-daarion/infrastructure/kubernetes/nats/deployment.yaml
Apple 8fe0b58978 🚀 NATS JetStream: K8s deployment + streams + job schema v1
- K8s deployment (2 replicas, PVC, initContainer для server_name)
- Streams definitions (MM_ONLINE, MM_OFFLINE, MM_WRITE, MM_EVENTS)
- Job payload schema (JSON v1 з idempotency)
- Worker contract (capabilities + ack/retry)
- Init streams script
- Оновлено ARCHITECTURE-150-NODES.md (Control-plane vs Data-plane)

TODO: Auth (nkeys), 3+ replicas для prod, worker-daemon implementation
2026-01-10 10:02:25 -08:00

213 lines
4.8 KiB
YAML

---
# NATS JetStream Deployment
# Data-plane для 150 нод: fan-out/fan-in, online/offline пріоритети
apiVersion: v1
kind: Namespace
metadata:
name: nats
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nats
namespace: nats
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nats-config
namespace: nats
data:
nats.conf: |
# NATS JetStream Configuration
port: 4222
http_port: 8222
cluster {
port: 6222
routes = [
nats://nats-0.nats:6222
nats://nats-1.nats:6222
nats://nats-2.nats:6222
]
}
server_name: POD_NAME_PLACEHOLDER
cluster {
name: nats-cluster
port: 6222
routes = [
nats://nats-0.nats:6222
nats://nats-1.nats:6222
]
}
jetstream {
store_dir: /data/jetstream
max_mem_store: 2G
max_file_store: 50G
}
# TODO: Auth: nkeys (operator + system account)
# operator: /etc/nats/nats-operator.jwt
# system_account: SYSTEM
# resolver: MEMORY
# Для dev: auth вимкнено. Для prod: обов'язково увімкнути!
---
apiVersion: v1
kind: Secret
metadata:
name: nats-operator
namespace: nats
type: Opaque
data:
# TODO: Generate operator JWT and system account
# nats-operator.jwt: <base64>
# system-account.jwt: <base64>
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nats
namespace: nats
labels:
app: nats
spec:
serviceName: nats
replicas: 2 # Для поточного кластера (2 ноди). Збільшити до 3+ для prod
selector:
matchLabels:
app: nats
template:
metadata:
labels:
app: nats
spec:
serviceAccountName: nats
initContainers:
- name: config-init
image: busybox:latest
command: ['sh', '-c']
args:
- |
sed "s/POD_NAME_PLACEHOLDER/${POD_NAME}/g" /etc/nats/nats.conf.template > /etc/nats/nats.conf
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- name: config
mountPath: /etc/nats
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nats
topologyKey: kubernetes.io/hostname
containers:
- name: nats
image: nats:2.10-alpine
ports:
- containerPort: 4222
name: client
- containerPort: 6222
name: cluster
- containerPort: 8222
name: monitor
args:
- -c
- /etc/nats/nats.conf
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: config
mountPath: /etc/nats
- name: data
mountPath: /data
resources:
requests:
memory: "2Gi"
cpu: "1"
limits:
memory: "4Gi"
cpu: "2"
livenessProbe:
httpGet:
path: /healthz
port: 8222
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /healthz
port: 8222
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: config
configMap:
name: nats-config
items:
- key: nats.conf
path: nats.conf.template
# TODO: Додати Secret для operator JWT
# - name: operator-jwt
# secret:
# secretName: nats-operator
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: local-path
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: Service
metadata:
name: nats
namespace: nats
spec:
type: ClusterIP
clusterIP: None
selector:
app: nats
ports:
- name: client
port: 4222
targetPort: 4222
- name: cluster
port: 6222
targetPort: 6222
- name: monitor
port: 8222
targetPort: 8222
---
apiVersion: v1
kind: Service
metadata:
name: nats-client
namespace: nats
spec:
type: ClusterIP
selector:
app: nats
ports:
- name: client
port: 4222
targetPort: 4222