This commit is contained in:
letai2001 2025-10-06 23:37:59 +07:00
parent 38ef519534
commit 5c05617e79
2 changed files with 78 additions and 0 deletions

12
utils/api_llm.py Normal file
View File

@ -0,0 +1,12 @@
import google.generativeai as genai
import time
genai.configure(api_key="AIzaSyDWqNUBKhaZjbFI8CW52_hKr46JtWABkGU")
model = genai.GenerativeModel("models/gemini-2.0-flash-001")
start_time = time.time()
response = model.generate_content("Xin chào, bạn là ai?")
print(response.text)
print("Thời gian thực thi: %.2f giây" % (time.time() - start_time))
# for m in genai.list_models():
# if "generateContent" in m.supported_generation_methods:
# print(m.name)

66
utils/llm_client_vllm.py Normal file
View File

@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
"""
LLM Client tương tác với vLLM server
-------------------------------------
- Dùng HTTP endpoint (OpenAI-compatible) của vLLM.
- Nhận prompt đã được build từ PromptBuilder.
- Gọi model sinh câu trả lời.
"""
import requests
from typing import Optional, Dict, Any
from src.core.config import VLLM_URL, VLLM_MODEL, LLM_TEMPERATURE, LLM_MAX_TOKENS
class LLMClient:
"""
Gọi model vLLM theo giao thức OpenAI-compatible.
"""
def __init__(
self,
base_url: str = VLLM_URL,
model: str = VLLM_MODEL,
temperature: float = LLM_TEMPERATURE,
max_tokens: int = LLM_MAX_TOKENS
):
self.base_url = base_url.rstrip("/")
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
def generate(self, prompt: str, system_prompt: Optional[str] = None) -> str:
"""
Gửi prompt tới vLLM API trả về câu trả lời.
"""
url = f"{self.base_url}/v1/chat/completions"
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
payload: Dict[str, Any] = {
"model": self.model,
"messages": messages,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"stream": False,
}
try:
response = requests.post(url, json=payload, timeout=60)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"].strip()
except Exception as e:
print(f"[LLMClient] ❌ Error calling vLLM: {e}")
return f"[Error] {e}"
# ---- Test nhanh ----
if __name__ == "__main__":
client = LLMClient()
prompt = "Viết đoạn mô tả ngắn về hành tinh Sao Hỏa."
answer = client.generate(prompt)
print("🪐 Kết quả từ LLM:")
print(answer)