Skip to content

LlamaCpp

For AMD install ROCm

Then Run


make GGML_HIPBLAS=1

How to use .safetensors model ? · Issue #688 · ggerganov/llama.cpp

``` bash

huggingface-cli \
  download \
  meta-llama/Llama-3.2-1B \
  model.safetensors \
  --local-dir . \
  --local-dir-use-symlinks False

huggingface-cli \
  download \
  TheBloke/Llama-2-7B-GGUF \
  llama-2-7b.Q4_K_M.gguf \
  --local-dir . \
  --local-dir-use-symlinks False


./llama-cli \
  -ngl 32 \
  -m llama-2-7b.Q4_K_M.gguf \
  --color -c 4096 \
  --temp 0.7 \
  --repeat_penalty 1.1 \
  -n -1 \
  -s 100282318 \
  -p "Why is the sky blue?"


./llama-cli \
  -mg 0 \
  -ngl 32 \
  -m llama-2-7b.Q4_K_M.gguf \
  --color -c 4096 \
  --temp 0.7 \
  --repeat_penalty 1.1 \
  -n -1 \
  -s 100282318 \
  -p "Why is the sky blue?"