Gemma3:12b:Q4 on CUDA
docker run -p 1032:8080 -v $HOME/.cache/llama.cpp:/root/.cache/llama.cpp --gpus all ghcr.io/ggml-org/llama.cpp:server-cuda -t 2 --prio 3 -ngl 99 -fa --temp 0 -hf ggml-org/gemma-3-12b-it-GGUF:Q4_K_M
import subprocess
subprocess.Popen(['llama-server', '-t', '2', '--prio', '2', '-ngl', '99', '-fa',
'-hf', 'ggml-org/gemma-3-4b-it-GGUF'], stderr=subprocess.DEVNULL)
def en2ko(en):
import json, requests; return json.loads(requests.post('http://localhost:8080/v1/chat/completions', json=dict(messages=[dict(role='user', content=f"{ {'en-US': en}}")],response_format={'type': 'json_object', 'schema': {'type': 'object', 'properties': {'ko-KR': {'type': 'string'}}}})).json()['choices'][0]['message']['content'])['ko-KR']
en2ko('Hello') # '안녕하세요