apiVersion: apps/v1 kind: StatefulSet metadata: labels: app: ${deployment:=fooocus} name: ${deployment:=fooocus} namespace: ${deployment:=fooocus} spec: replicas: 1 serviceName: ${deployment:=fooocus} selector: matchLabels: app: ${deployment:=fooocus} template: metadata: labels: app: ${deployment:=fooocus} spec: securityContext: runAsUser: 1000 runAsGroup: 1000 fsGroup: 1000 containers: - name: ${deployment:=fooocus} image: ghcr.io/lllyasviel/fooocus:latest env: - name: CMDARGS value: --listen - name: DATADIR value: /content/data - name: config_path value: /content/data/config.txt - name: config_example_path value: /content/data/config_modification_tutorial.txt - name: path_checkpoints value: /content/data/models/checkpoints/ - name: path_loras value: /content/data/models/loras/ - name: path_embeddings value: /content/data/models/embeddings/ - name: path_vae_approx value: /content/data/models/vae_approx/ - name: path_upscale_models value: /content/data/models/upscale_models/ - name: path_inpaint value: /content/data/models/inpaint/ - name: path_controlnet value: /content/data/models/controlnet/ - name: path_clip_vision value: /content/data/models/clip_vision/ - name: path_fooocus_expansion value: /content/data/models/prompt_expansion/fooocus_expansion/ - name: path_outputs value: /content/app/outputs/ resources: requests: cpu: "${cpu_request:=4}" memory: "${memory_request:=23Gi}" terminationMessagePath: /dev/termination-log terminationMessagePolicy: File ports: - containerPort: 7865 name: http protocol: TCP volumeMounts: - mountPath: /content/data name: ${deployment:=fooocus}-data runtimeClassName: "${runtime_class:=nvidia}" nodeSelector: nvidia.com/gpu.family: ampere restartPolicy: Always terminationGracePeriodSeconds: 300 volumeClaimTemplates: - metadata: name: ${deployment:=fooocus}-data spec: accessModes: ["ReadWriteOnce"] storageClassName: "${storage_class:=longhorn}" resources: requests: storage: ${volume_size:=256Gi}