kube-cascade/frigate/build-trt-models.yaml

86 lines
2.3 KiB
YAML

apiVersion: batch/v1
kind: Job
metadata:
name: build-trt-models
namespace: frigate
spec:
parallelism: 1
completions: 1
backoffLimit: 6
completionMode: NonIndexed
template:
spec:
restartPolicy: OnFailure
runtimeClassName: nvidia
containers:
- name: builder
image: nvcr.io/nvidia/tensorrt:22.07-py3
command:
- bash
- /tensorrt_models.sh
env:
- name: USE_FP16
value: "False"
- name: YOLO_MODELS
value: yolov7-640
volumeMounts:
- name: trt-models
mountPath: /tensorrt_demos
subPath: tensorrt_demos
- name: trt-models
mountPath: /tensorrt_models
- name: tensorrt-build-models-script
mountPath: /tensorrt_models.sh
subPath: tensorrt_models.sh
volumes:
- name: trt-models
persistentVolumeClaim:
claimName: trt-models
- name: tensorrt-build-models-script
configMap:
name: tensorrt-build-models-script
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tensorrt-build-models-script
namespace: frigate
data:
tensorrt_models.sh: |
#!/bin/bash
set -euxo pipefail
CUDA_HOME=/usr/local/cuda
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
OUTPUT_FOLDER=/tensorrt_models
echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
# Install packages
pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3
if [ ! -d /tensorrt_demos/.git ];then
# Clone tensorrt_demos repo
git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos
fi
# Build libyolo
cd /tensorrt_demos/plugins && make all
cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
# Download yolo weights
cd /tensorrt_demos/yolo && ./download_yolo.sh
# Build trt engine
cd /tensorrt_demos/yolo
for model in ${YOLO_MODELS//,/ }
do
python3 yolo_to_onnx.py -m ${model}
python3 onnx_to_tensorrt.py -m ${model}
cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
done