#!/bin/bash
# Script to start vLLM server with local model

set -e

# Configuration
#MODEL_NAME="${MODEL_NAME:-Qwen/Qwen2.5-7B-Instruct}"
MODEL_NAME="./models/Qwen/Qwen2.5-7B-Instruct"
HOST="${HOST:-0.0.0.0}"
PORT="${PORT:-8000}"
GPU_MEMORY_UTILIZATION="${GPU_MEMORY_UTILIZATION:-0.9}"
MAX_MODEL_LEN="${MAX_MODEL_LEN:-4096}"

echo "=========================================="
echo "Starting vLLM Server"
echo "=========================================="
echo "Model: $MODEL_NAME"
echo "Host: $HOST"
echo "Port: $PORT"
echo "GPU Memory: ${GPU_MEMORY_UTILIZATION}%"
echo "Max Length: $MAX_MODEL_LEN tokens"
echo "=========================================="
echo ""

# Check if vLLM is installed
if ! command -v vllm &> /dev/null; then
    echo "❌ vLLM not found. Installing..."
    pip install vllm
fi

# Start vLLM server
echo "Starting vLLM server..."
echo "API will be available at: http://${HOST}:${PORT}/v1"
echo ""
echo "Press Ctrl+C to stop"
echo ""

vllm serve ./models/Qwen/Qwen2.5-7B-Instruct \
    --host "$HOST" \
    --port "$PORT" \
    --gpu-memory-utilization "$GPU_MEMORY_UTILIZATION" \
    --max-model-len "$MAX_MODEL_LEN" \
    --disable-log-requests \
    --trust-remote-code
