Gemma3:12b:Q4 on CUDA
docker run -p 1032:8080 -v $HOME/.cache/llama.cpp:/root/.cache/llama.cpp --gpus all ghcr.io/ggml-org/llama.cpp:server-cuda -t 2 --prio 3 -ngl 99 -fa --temp 0 -hf ggml-org/gemma-3-12b-it-GGUF:Q4_K_M
docker run -p 1032:8080 -v $HOME/.cache/llama.cpp:/root/.cache/llama.cpp --gpus all ghcr.io/ggml-org/llama.cpp:server-cuda -t 2 --prio 3 -ngl 99 -fa --temp 0 -hf ggml-org/gemma-3-12b-it-GGUF:Q4_K_M