clean up code
This commit is contained in:
@@ -1,54 +1,70 @@
|
||||
import requests
|
||||
import json
|
||||
from typing import Dict, Optional
|
||||
import logging
|
||||
from typing import Dict, Optional, List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LLMClient:
|
||||
def __init__(self, endpoint: str = 'http://192.168.1.74:1234', model: str = 'local'):
|
||||
def __init__(self, endpoint: str = 'http://localhost:11434', model: str = 'llama3', use_local: bool = True):
|
||||
self.endpoint = endpoint
|
||||
self.model = model
|
||||
self.local_ollama = 'http://localhost:11434'
|
||||
self.use_local = use_local
|
||||
self.lm_studio_endpoint = 'http://192.168.1.74:1234'
|
||||
self.lm_studio_model = 'openai/gpt-oss-20b'
|
||||
|
||||
def summarize(self, text: str, max_length: int = 200) -> Dict:
|
||||
prompt = f"Summarize the following in {max_length} chars or less:\n\n{text[:2000]}"
|
||||
prompt = f"Summarize this concisely in under {max_length} characters:\n\n{text[:3000]}"
|
||||
return self._query(prompt)
|
||||
|
||||
def extract_topics(self, text: str) -> Dict:
|
||||
prompt = f"Extract 5-10 key topics/tags from this text. Return as comma-separated list:\n\n{text[:2000]}"
|
||||
prompt = f"Extract 5-10 key topics/tags. Return ONLY comma-separated words:\n\n{text[:3000]}"
|
||||
result = self._query(prompt)
|
||||
if result.get('success'):
|
||||
topics = [t.strip() for t in result['text'].split(',')]
|
||||
result['topics'] = topics[:10]
|
||||
return result
|
||||
|
||||
def extract_intent(self, text: str) -> Dict:
|
||||
prompt = f"What is the main purpose/intent of this code/document? Answer in 1-2 sentences:\n\n{text[:3000]}"
|
||||
return self._query(prompt)
|
||||
|
||||
def classify_content(self, text: str) -> Dict:
|
||||
prompt = f"Classify this content. Return: category, topics, has_pii (yes/no), quality (high/medium/low):\n\n{text[:1000]}"
|
||||
def detect_project_type(self, text: str, file_list: List[str]) -> Dict:
|
||||
files_str = ', '.join(file_list[:20])
|
||||
prompt = f"Based on these files: {files_str}\nAnd this content:\n{text[:2000]}\n\nWhat type of project is this? (e.g. web app, ml/ai, transcription, data processing, etc.)"
|
||||
return self._query(prompt)
|
||||
|
||||
def _query(self, prompt: str, use_local: bool = False) -> Dict:
|
||||
def _query(self, prompt: str, timeout: int = 30) -> Dict:
|
||||
try:
|
||||
endpoint = self.local_ollama if use_local else self.endpoint
|
||||
|
||||
if use_local:
|
||||
if self.use_local:
|
||||
response = requests.post(
|
||||
f'{endpoint}/api/generate',
|
||||
json={'model': 'llama3.2', 'prompt': prompt, 'stream': False},
|
||||
timeout=30
|
||||
f'{self.endpoint}/api/generate',
|
||||
json={'model': self.model, 'prompt': prompt, 'stream': False},
|
||||
timeout=timeout
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return {'success': True, 'text': data.get('response', '').strip()}
|
||||
else:
|
||||
response = requests.post(
|
||||
f'{endpoint}/v1/chat/completions',
|
||||
f'{self.lm_studio_endpoint}/v1/chat/completions',
|
||||
json={
|
||||
'model': self.model,
|
||||
'model': self.lm_studio_model,
|
||||
'messages': [{'role': 'user', 'content': prompt}],
|
||||
'max_tokens': 500
|
||||
'max_tokens': 500,
|
||||
'temperature': 0.7
|
||||
},
|
||||
timeout=30
|
||||
timeout=timeout
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return {'success': True, 'text': data['choices'][0]['message']['content'].strip()}
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if use_local:
|
||||
return {'success': True, 'text': data.get('response', '')}
|
||||
else:
|
||||
return {'success': True, 'text': data['choices'][0]['message']['content']}
|
||||
else:
|
||||
return {'success': False, 'error': f'HTTP {response.status_code}'}
|
||||
return {'success': False, 'error': f'HTTP {response.status_code}'}
|
||||
|
||||
except requests.Timeout:
|
||||
logger.warning(f'LLM request timeout after {timeout}s')
|
||||
return {'success': False, 'error': 'timeout'}
|
||||
except Exception as e:
|
||||
logger.error(f'LLM query failed: {e}')
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
Reference in New Issue
Block a user