Compare commits
10 Commits
f906e6666a
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
909d5ad345 | ||
|
|
2a0375b5bb | ||
|
|
2e2de4773b | ||
|
|
47b90a2e90 | ||
|
|
66034eda34 | ||
|
|
54d265067e | ||
|
|
607592466a | ||
|
|
b9ceaecc3d | ||
|
|
f4ef534e3f | ||
|
|
91d4592272 |
146
README.md
146
README.md
@@ -1,70 +1,146 @@
|
||||
```markdown
|
||||
# finish.sh
|
||||
# finish
|
||||
|
||||
AI-powered shell completion that runs 100 % on your machine.
|
||||
AI-powered shell completion that runs 100% on your machine.
|
||||
|
||||
One command and your terminal learns what you type next.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
# On Ubuntu/Debian, first ensure python3-venv is installed
|
||||
sudo apt-get install python3-venv
|
||||
|
||||
# Then run the installer
|
||||
curl -sSL https://git.appmodel.nl/tour/finish/raw/branch/main/docs/install.sh | bash
|
||||
source ~/.bashrc # or ~/.zshrc
|
||||
|
||||
# for plato special, add plato.lan to /etc/hosts:
|
||||
sudo nano /etc/hosts - 192.168.1.74 plato.lan
|
||||
```
|
||||
|
||||
Press `Tab` twice on any partial command and finish.sh suggests the rest—no cloud, no data leak, no latency.
|
||||
Press **Alt+\\** after typing any command to get intelligent completions—no cloud, no data leak, instant results.
|
||||
|
||||
## How it works
|
||||
|
||||
1. Captures your current directory, recent history, env vars.
|
||||
2. Builds a concise prompt for a local LLM (LM-Studio, Ollama, or any OpenAI-compatible endpoint).
|
||||
3. Returns ranked completions in <200 ms, cached for instant replay.
|
||||
1. Captures your current directory, recent history, env vars, and available tools
|
||||
2. Analyzes your intent and builds a context-aware prompt
|
||||
3. Queries your local LLM (LM Studio, Ollama, or any OpenAI-compatible endpoint)
|
||||
4. Returns 2-5 ranked completions with an interactive picker
|
||||
5. Results are cached for instant replay
|
||||
|
||||
## Use
|
||||
## Usage
|
||||
|
||||
Type a command, then press **Alt+\\**:
|
||||
|
||||
```bash
|
||||
docker <Tab><Tab> # → docker run -it --rm ubuntu bash
|
||||
git commit <Tab><Tab> # → git commit -m "feat: add finish.sh"
|
||||
# large files <Tab><Tab> # → find . -type f -size +100M
|
||||
# Natural language commands
|
||||
show gpu status # → nvidia-smi
|
||||
resolve title of website google.com # → curl -s https://google.com | grep -oP '<title>\K[^<]+'
|
||||
make file about dogs # → echo "About Dogs" > dogs.md
|
||||
|
||||
# Partial commands
|
||||
git commit # → git commit -m "..."
|
||||
docker run # → docker run -it --rm ubuntu bash
|
||||
find large files # → find . -type f -size +100M
|
||||
```
|
||||
|
||||
Dry-run mode:
|
||||
|
||||
```bash
|
||||
finish --dry-run "tar czf backup.tar.gz"
|
||||
```
|
||||
Navigate with ↑↓, press Enter to accept, Esc to cancel.
|
||||
|
||||
## Configure
|
||||
|
||||
View current configuration:
|
||||
|
||||
```bash
|
||||
finish config set endpoint http://plato.lan:11434/v1/chat/completions
|
||||
finish config set model codellama:13b
|
||||
finish model # interactive picker
|
||||
finish config
|
||||
```
|
||||
|
||||
## Providers
|
||||
|
||||
| Provider | Auth | URL | Notes |
|
||||
|-----------|------|-----------------------------|---------|
|
||||
| LM-Studio | none | `http://localhost:1234/v1` | default |
|
||||
| Ollama | none | `http://localhost:11434` | |
|
||||
| OpenAI | key | `https://api.openai.com/v1` | |
|
||||
|
||||
Add others by editing `~/.finish/config`.
|
||||
|
||||
## Commands
|
||||
Edit configuration file:
|
||||
|
||||
```bash
|
||||
finish install # hook into shell
|
||||
finish remove # uninstall
|
||||
finish clear # wipe cache & logs
|
||||
finish usage # tokens & cost
|
||||
nano ~/.finish/finish.json
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Local Ollama
|
||||
```json
|
||||
{
|
||||
"provider": "ollama",
|
||||
"model": "llama3:latest",
|
||||
"endpoint": "http://localhost:11434/api/chat",
|
||||
"temperature": 0.0,
|
||||
"api_prompt_cost": 0.0,
|
||||
"api_completion_cost": 0.0,
|
||||
"max_history_commands": 20,
|
||||
"max_recent_files": 20,
|
||||
"cache_size": 100
|
||||
}
|
||||
```
|
||||
|
||||
### LM Studio
|
||||
```json
|
||||
{
|
||||
"provider": "lmstudio",
|
||||
"model": "dolphin3.0-llama3.1-8b@q4_k_m",
|
||||
"endpoint": "http://localhost:1234/v1/chat/completions",
|
||||
"temperature": 0.0,
|
||||
"api_prompt_cost": 0.0,
|
||||
"api_completion_cost": 0.0,
|
||||
"max_history_commands": 20,
|
||||
"max_recent_files": 20,
|
||||
"cache_size": 100
|
||||
}
|
||||
```
|
||||
|
||||
### OpenAI (or compatible API)
|
||||
```json
|
||||
{
|
||||
"provider": "lmstudio",
|
||||
"model": "gpt-4",
|
||||
"endpoint": "https://api.openai.com/v1/chat/completions",
|
||||
"api_key": "sk-...",
|
||||
"temperature": 0.0,
|
||||
"api_prompt_cost": 0.03,
|
||||
"api_completion_cost": 0.06,
|
||||
"max_history_commands": 20,
|
||||
"max_recent_files": 20,
|
||||
"cache_size": 100
|
||||
}
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
Bash ≥4 or Zsh ≥5, curl, jq, bc.
|
||||
Optional: bash-completion.
|
||||
- Python 3.7+
|
||||
- Bash ≥4 or Zsh ≥5
|
||||
- A local LLM running (Ollama, LM Studio, etc.) or API access
|
||||
|
||||
Python dependencies (installed automatically):
|
||||
- httpx
|
||||
- prompt_toolkit
|
||||
- rich
|
||||
|
||||
## Commands
|
||||
|
||||
```bash
|
||||
finish install # Set up Alt+\ keybinding
|
||||
finish config # Show current configuration
|
||||
finish command "text" # Test completions manually
|
||||
```
|
||||
|
||||
## Advanced
|
||||
|
||||
### Debug mode
|
||||
```bash
|
||||
export FINISH_DEBUG=1
|
||||
finish command "your command here"
|
||||
cat ~/.finish/finish.log
|
||||
```
|
||||
|
||||
### Clear cache
|
||||
```bash
|
||||
rm -rf ~/.finish/cache/*.json
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
119
docs/install.sh
119
docs/install.sh
@@ -4,7 +4,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
ACSH_VERSION="v0.5.1"
|
||||
ACSH_VERSION="v0.6.0"
|
||||
BRANCH_OR_VERSION=${1:-main}
|
||||
REPO_URL="https://git.appmodel.nl/tour/finish/raw/branch"
|
||||
|
||||
@@ -97,11 +97,9 @@ main() {
|
||||
SHELL_TYPE=$(detect_shell)
|
||||
case "$SHELL_TYPE" in
|
||||
zsh)
|
||||
SCRIPT_NAME="finish.zsh"
|
||||
RC_FILE="$HOME/.zshrc"
|
||||
;;
|
||||
bash)
|
||||
SCRIPT_NAME="finish.sh"
|
||||
RC_FILE="$HOME/.bashrc"
|
||||
;;
|
||||
*)
|
||||
@@ -112,6 +110,29 @@ main() {
|
||||
|
||||
echo "Detected shell: $SHELL_TYPE"
|
||||
|
||||
# Check Python and venv
|
||||
echo "Checking Python installation..."
|
||||
if ! command -v python3 > /dev/null 2>&1; then
|
||||
echo_error "Python 3 is required but not found"
|
||||
echo "Install it with:"
|
||||
echo " Ubuntu/Debian: sudo apt-get install python3 python3-pip python3-venv"
|
||||
echo " macOS: brew install python3"
|
||||
exit 1
|
||||
fi
|
||||
echo_green "✓ Python 3 found: $(python3 --version)"
|
||||
|
||||
# Check if venv module is available
|
||||
if ! python3 -m venv --help > /dev/null 2>&1; then
|
||||
echo_error "python3-venv module is not installed"
|
||||
echo "Install it with:"
|
||||
echo " Ubuntu/Debian: sudo apt-get install python3-venv"
|
||||
echo " CentOS/RHEL: sudo yum install python3-venv"
|
||||
echo " macOS: (included with python3)"
|
||||
exit 1
|
||||
fi
|
||||
echo_green "✓ python3-venv available"
|
||||
echo ""
|
||||
|
||||
# Check dependencies
|
||||
echo "Checking dependencies..."
|
||||
if ! check_dependencies; then
|
||||
@@ -144,17 +165,73 @@ main() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create directory if needed
|
||||
# Create directories if needed
|
||||
mkdir -p "$(dirname "$INSTALL_LOCATION")"
|
||||
mkdir -p "$HOME/.venvs"
|
||||
|
||||
# Download script
|
||||
echo "Downloading finish.sh..."
|
||||
URL="$REPO_URL/$BRANCH_OR_VERSION/$SCRIPT_NAME"
|
||||
# Download Python script
|
||||
echo "Downloading finish.py..."
|
||||
URL="$REPO_URL/$BRANCH_OR_VERSION/src/finish.py"
|
||||
if ! download_file "$URL" "$INSTALL_LOCATION"; then
|
||||
echo_error "Failed to download from $URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download requirements.txt
|
||||
echo "Downloading requirements.txt..."
|
||||
TEMP_REQ="/tmp/finish_requirements.txt"
|
||||
REQ_URL="$REPO_URL/$BRANCH_OR_VERSION/requirements.txt"
|
||||
if ! download_file "$REQ_URL" "$TEMP_REQ"; then
|
||||
echo_error "Failed to download requirements.txt from $REQ_URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create virtualenv and install dependencies
|
||||
echo "Creating virtual environment..."
|
||||
VENV_PATH="$HOME/.venvs/finish"
|
||||
if [ -d "$VENV_PATH" ]; then
|
||||
echo "Virtual environment already exists, removing old one..."
|
||||
rm -rf "$VENV_PATH"
|
||||
fi
|
||||
|
||||
if ! python3 -m venv "$VENV_PATH"; then
|
||||
echo_error "Failed to create virtual environment"
|
||||
echo "Make sure python3-venv is installed:"
|
||||
echo " Ubuntu/Debian: sudo apt-get install python3-venv"
|
||||
exit 1
|
||||
fi
|
||||
echo_green "✓ Virtual environment created at $VENV_PATH"
|
||||
|
||||
echo "Installing Python dependencies..."
|
||||
if ! "$VENV_PATH/bin/pip" install --quiet --upgrade pip; then
|
||||
echo_error "Failed to upgrade pip"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! "$VENV_PATH/bin/pip" install --quiet -r "$TEMP_REQ"; then
|
||||
echo_error "Failed to install dependencies"
|
||||
cat "$TEMP_REQ"
|
||||
exit 1
|
||||
fi
|
||||
echo_green "✓ Dependencies installed"
|
||||
rm -f "$TEMP_REQ"
|
||||
|
||||
# Update shebang to use venv python (compatible with macOS and Linux)
|
||||
if sed --version >/dev/null 2>&1; then
|
||||
# GNU sed (Linux)
|
||||
sed -i "1s|.*|#!$VENV_PATH/bin/python3|" "$INSTALL_LOCATION"
|
||||
else
|
||||
# BSD sed (macOS)
|
||||
sed -i '' "1s|.*|#!$VENV_PATH/bin/python3|" "$INSTALL_LOCATION"
|
||||
fi
|
||||
|
||||
# Verify shebang was updated correctly
|
||||
SHEBANG=$(head -n 1 "$INSTALL_LOCATION")
|
||||
if [ "$SHEBANG" != "#!$VENV_PATH/bin/python3" ]; then
|
||||
echo_error "Failed to update shebang. Got: $SHEBANG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod +x "$INSTALL_LOCATION"
|
||||
echo_green "✓ Installed to $INSTALL_LOCATION"
|
||||
echo ""
|
||||
@@ -183,8 +260,18 @@ main() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run finish install
|
||||
echo "Running finish installation..."
|
||||
# Test that finish works
|
||||
echo "Testing installation..."
|
||||
if ! "$INSTALL_LOCATION" --version > /dev/null 2>&1; then
|
||||
echo_error "finish command failed to execute"
|
||||
echo "Testing with direct python call..."
|
||||
"$VENV_PATH/bin/python3" "$INSTALL_LOCATION" --version
|
||||
exit 1
|
||||
fi
|
||||
echo_green "✓ finish command works"
|
||||
|
||||
# Run finish install to set up keybinding
|
||||
echo "Setting up shell keybinding..."
|
||||
if "$INSTALL_LOCATION" install; then
|
||||
echo ""
|
||||
echo_green "=========================================="
|
||||
@@ -195,10 +282,18 @@ main() {
|
||||
echo " 1. Reload your shell configuration:"
|
||||
echo " source $RC_FILE"
|
||||
echo ""
|
||||
echo " 2. Select a language model:"
|
||||
echo " finish model"
|
||||
echo " 2. Configure your LLM endpoint:"
|
||||
echo " finish config"
|
||||
echo ""
|
||||
echo " 3. Start using by pressing Tab twice after any command"
|
||||
echo " 3. Edit config if needed:"
|
||||
echo " nano ~/.finish/finish.json"
|
||||
echo ""
|
||||
echo " 4. Start using by pressing Alt+\\ after typing a command"
|
||||
echo ""
|
||||
echo "Example:"
|
||||
echo " Type: show gpu status"
|
||||
echo " Press: Alt+\\"
|
||||
echo " Select completion with ↑↓ arrows, Enter to accept"
|
||||
echo ""
|
||||
echo "Documentation: https://git.appmodel.nl/tour/finish"
|
||||
else
|
||||
|
||||
3
requirements.txt
Normal file
3
requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
httpx
|
||||
prompt_toolkit
|
||||
rich
|
||||
442
src/finish.py
Executable file
442
src/finish.py
Executable file
@@ -0,0 +1,442 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
finish.py – AI shell completions that never leave your machine.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
import hashlib
|
||||
|
||||
import argparse, asyncio, json, os, re, shlex, subprocess, sys, time
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
import httpx
|
||||
from prompt_toolkit import ANSI, Application
|
||||
from prompt_toolkit.buffer import Buffer
|
||||
from prompt_toolkit.key_binding import KeyBindings
|
||||
from prompt_toolkit.layout import (
|
||||
ConditionalContainer, FormattedTextControl, HSplit, Layout, Window
|
||||
)
|
||||
from prompt_toolkit.widgets import Label
|
||||
from rich.console import Console
|
||||
from rich.spinner import Spinner
|
||||
|
||||
VERSION = "0.5.0"
|
||||
CFG_DIR = Path.home()/".finish"
|
||||
CFG_FILE = CFG_DIR/"finish.json"
|
||||
CACHE_DIR = CFG_DIR/"cache"
|
||||
LOG_FILE = CFG_DIR/"finish.log"
|
||||
|
||||
console = Console()
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Config
|
||||
# --------------------------------------------------------------------------- #
|
||||
DEFAULT_CFG = {
|
||||
"provider": "ollama",
|
||||
"model": "llama3:latest",
|
||||
"endpoint": "http://localhost:11434/api/chat",
|
||||
"temperature": 0.0,
|
||||
"api_prompt_cost": 0.0,
|
||||
"api_completion_cost": 0.0,
|
||||
"max_history_commands": 20,
|
||||
"max_recent_files": 20,
|
||||
"cache_size": 100,
|
||||
}
|
||||
|
||||
def cfg() -> dict:
|
||||
if not CFG_FILE.exists():
|
||||
CFG_DIR.mkdir(exist_ok=True)
|
||||
CFG_FILE.write_text(json.dumps(DEFAULT_CFG, indent=2))
|
||||
return json.loads(CFG_FILE.read_text())
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Context builders
|
||||
# --------------------------------------------------------------------------- #
|
||||
def _sanitise_history() -> str:
|
||||
hist = subprocess.check_output(["bash", "-ic", "history"]).decode()
|
||||
# scrub tokens / hashes
|
||||
for pat in [
|
||||
r"\b[0-9a-f]{32,40}\b", # long hex
|
||||
r"\b[A-Za-z0-9-]{36}\b", # uuid
|
||||
r"\b[A-Za-z0-9]{16,40}\b", # api-keyish
|
||||
]:
|
||||
hist = re.sub(pat, "REDACTED", hist)
|
||||
return "\n".join(hist.splitlines()[-cfg()["max_history_commands"]:])
|
||||
|
||||
def _recent_files() -> str:
|
||||
try:
|
||||
files = subprocess.check_output(
|
||||
["find", ".", "-maxdepth", "1", "-type", "f", "-printf", "%T@ %p\n"],
|
||||
stderr=subprocess.DEVNULL,
|
||||
).decode()
|
||||
return "\n".join(sorted(files.splitlines(), reverse=True)[: cfg()["max_recent_files"]])
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
def _installed_tools() -> str:
|
||||
"""Check for commonly used tools that might be relevant"""
|
||||
tools = ["curl", "wget", "jq", "grep", "awk", "sed", "python3", "node", "docker", "git", "nvcc", "nvidia-smi"]
|
||||
available = []
|
||||
for tool in tools:
|
||||
try:
|
||||
subprocess.run(["which", tool], capture_output=True, timeout=0.5, check=True)
|
||||
available.append(tool)
|
||||
except Exception:
|
||||
pass
|
||||
return ", ".join(available) if available else "standard shell tools"
|
||||
|
||||
def _get_context_info() -> dict:
|
||||
"""Gather comprehensive shell context"""
|
||||
return {
|
||||
"user": os.getenv("USER", "unknown"),
|
||||
"pwd": os.getcwd(),
|
||||
"home": os.getenv("HOME", ""),
|
||||
"hostname": os.getenv("HOSTNAME", os.getenv("HOST", "localhost")),
|
||||
"shell": os.getenv("SHELL", "bash"),
|
||||
"tools": _installed_tools(),
|
||||
}
|
||||
|
||||
def build_prompt(user_input: str) -> str:
|
||||
"""Build an enhanced prompt with better context and instructions"""
|
||||
c = cfg()
|
||||
ctx = _get_context_info()
|
||||
|
||||
# Analyze user intent
|
||||
user_words = user_input.lower().split()
|
||||
intent_hints = []
|
||||
if any(word in user_words for word in ["resolve", "get", "fetch", "download", "curl", "wget"]):
|
||||
intent_hints.append("The user wants to fetch/download data")
|
||||
if any(word in user_words for word in ["website", "url", "http", "html", "title"]):
|
||||
intent_hints.append("The user is working with web content")
|
||||
if any(word in user_words for word in ["parse", "extract", "grep", "find"]):
|
||||
intent_hints.append("The user wants to extract/parse data")
|
||||
if any(word in user_words for word in ["create", "make", "new", "write", "edit"]):
|
||||
intent_hints.append("The user wants to create or edit files")
|
||||
if any(word in user_words for word in ["file", "document", "text", "story", "about"]):
|
||||
intent_hints.append("The user is working with files/documents")
|
||||
if any(word in user_words for word in ["install", "setup", "configure", "apt", "pip"]):
|
||||
intent_hints.append("The user wants to install or configure software")
|
||||
if any(word in user_words for word in ["gpu", "nvidia", "cuda", "memory", "cpu"]):
|
||||
intent_hints.append("The user wants system/hardware information")
|
||||
|
||||
intent_context = "\n".join(f"- {hint}" for hint in intent_hints) if intent_hints else "- General command completion"
|
||||
|
||||
prompt = f"""You are an intelligent bash completion assistant. Analyze the user's intent and provide practical, executable commands.
|
||||
|
||||
USER INPUT: {user_input}
|
||||
|
||||
DETECTED INTENT:
|
||||
{intent_context}
|
||||
|
||||
SHELL CONTEXT:
|
||||
- User: {ctx['user']}
|
||||
- Working directory: {ctx['pwd']}
|
||||
- Hostname: {ctx['hostname']}
|
||||
- Available tools: {ctx['tools']}
|
||||
|
||||
RECENT COMMAND HISTORY:
|
||||
{_sanitise_history()}
|
||||
|
||||
INSTRUCTIONS:
|
||||
1. Understand what the user wants to accomplish (not just the literal words)
|
||||
2. Generate 2-5 practical bash commands that achieve the user's goal
|
||||
3. Prefer common tools (curl, wget, grep, awk, sed, jq, python)
|
||||
4. For file creation: use echo with heredoc, cat, or nano/vim
|
||||
5. For web tasks: use curl with headers, pipe to grep/sed/awk for parsing
|
||||
6. For complex parsing: suggest one-liners with proper escaping
|
||||
7. Commands should be copy-paste ready and executable
|
||||
8. Order by most likely to least likely intent
|
||||
|
||||
EXAMPLES:
|
||||
- "resolve title of website example.com" → curl -s https://example.com | grep -oP '<title>\\K[^<]+'
|
||||
- "show gpu status" → nvidia-smi
|
||||
- "download json from api.com" → curl -s https://api.com/data | jq .
|
||||
- "make a new file story.txt" → nano story.txt
|
||||
- "create file and write hello" → echo "hello" > file.txt
|
||||
- "write about dogs to file" → cat > dogs.txt << 'EOF' (then Ctrl+D to finish)
|
||||
|
||||
OUTPUT FORMAT (JSON only, no other text):
|
||||
{{"completions":["command1", "command2", "command3"]}}"""
|
||||
|
||||
return prompt
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# LLM call
|
||||
# --------------------------------------------------------------------------- #
|
||||
async def llm_complete(prompt: str) -> List[str]:
|
||||
c = cfg()
|
||||
payload = {
|
||||
"model": c["model"],
|
||||
"temperature": c["temperature"],
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a helpful bash completion assistant."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
"response_format": {"type": "text"},
|
||||
}
|
||||
if c["provider"] == "ollama":
|
||||
payload["format"] = "json"
|
||||
payload["stream"] = False
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if c.get("api_key"):
|
||||
headers["Authorization"] = f"Bearer {c['api_key']}"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
resp = await client.post(c["endpoint"], headers=headers, json=payload)
|
||||
resp.raise_for_status()
|
||||
body = resp.json()
|
||||
except httpx.ConnectError as e:
|
||||
console.print(f"[red]Error:[/] Cannot connect to {c['endpoint']}")
|
||||
console.print(f"[yellow]Check that your LLM server is running and endpoint is correct.[/]")
|
||||
console.print(f"[yellow]Run 'finish config' to see current config.[/]")
|
||||
return []
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error:[/] {e}")
|
||||
return []
|
||||
|
||||
# extract content
|
||||
if c["provider"] == "ollama":
|
||||
raw = body["message"]["content"]
|
||||
else:
|
||||
raw = body["choices"][0]["message"]["content"]
|
||||
|
||||
# Debug logging
|
||||
if os.getenv("FINISH_DEBUG"):
|
||||
LOG_FILE.parent.mkdir(exist_ok=True)
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(f"\n=== {time.strftime('%Y-%m-%d %H:%M:%S')} ===\n")
|
||||
f.write(f"Raw response:\n{raw}\n")
|
||||
|
||||
# try json first - use strict=False to be more lenient
|
||||
try:
|
||||
result = json.loads(raw, strict=False)["completions"]
|
||||
if os.getenv("FINISH_DEBUG"):
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(f"Parsed completions: {result}\n")
|
||||
return result
|
||||
except Exception as e:
|
||||
if os.getenv("FINISH_DEBUG"):
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(f"JSON parse failed: {e}\n")
|
||||
f.write(f"Attempting manual extraction...\n")
|
||||
|
||||
# Try to extract JSON array manually using regex
|
||||
try:
|
||||
match = re.search(r'"completions"\s*:\s*\[(.*?)\]', raw, re.DOTALL)
|
||||
if match:
|
||||
# Extract commands from the array - handle escaped quotes
|
||||
array_content = match.group(1)
|
||||
# Find all quoted strings
|
||||
commands = re.findall(r'"([^"\\]*(?:\\.[^"\\]*)*)"', array_content)
|
||||
if commands:
|
||||
if os.getenv("FINISH_DEBUG"):
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(f"Manual extraction succeeded: {commands}\n")
|
||||
return commands[:5]
|
||||
except Exception as e2:
|
||||
if os.getenv("FINISH_DEBUG"):
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(f"Manual extraction failed: {e2}\n")
|
||||
|
||||
# fallback: grep command-like lines
|
||||
fallback = [ln for ln in raw.splitlines() if re.match(r"^(ls|cd|find|cat|grep|echo|mkdir|rm|cp|mv|pwd|chmod|chown|nano|vim|touch)\b", ln)][:5]
|
||||
if os.getenv("FINISH_DEBUG"):
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(f"Fallback completions: {fallback}\n")
|
||||
return fallback
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# TUI picker
|
||||
# --------------------------------------------------------------------------- #
|
||||
async def select_completion(completions: List[str]) -> Optional[str]:
|
||||
if not completions:
|
||||
return None
|
||||
if len(completions) == 1:
|
||||
return completions[0]
|
||||
|
||||
# If not in an interactive terminal, return first completion
|
||||
if not sys.stdin.isatty():
|
||||
return completions[0]
|
||||
|
||||
# Import here to avoid issues when not needed
|
||||
from prompt_toolkit.input import create_input
|
||||
from prompt_toolkit.output import create_output
|
||||
|
||||
kb = KeyBindings()
|
||||
current = 0
|
||||
|
||||
def get_text():
|
||||
return [
|
||||
("", "Select completion (↑↓ navigate, Enter accept, Esc cancel)\n\n"),
|
||||
*[
|
||||
("[SetCursorPosition]" if i == current else "", f"{c}\n")
|
||||
for i, c in enumerate(completions)
|
||||
],
|
||||
]
|
||||
|
||||
@kb.add("up")
|
||||
def _up(event):
|
||||
nonlocal current
|
||||
current = (current - 1) % len(completions)
|
||||
|
||||
@kb.add("down")
|
||||
def _down(event):
|
||||
nonlocal current
|
||||
current = (current + 1) % len(completions)
|
||||
|
||||
@kb.add("enter")
|
||||
def _accept(event):
|
||||
event.app.exit(result=completions[current])
|
||||
|
||||
@kb.add("escape")
|
||||
def _cancel(event):
|
||||
event.app.exit(result=None)
|
||||
|
||||
control = FormattedTextControl(get_text)
|
||||
|
||||
# Force output to /dev/tty to avoid interfering with bash command substitution
|
||||
try:
|
||||
output = create_output(stdout=open('/dev/tty', 'w'))
|
||||
input_obj = create_input(stdin=open('/dev/tty', 'r'))
|
||||
except Exception:
|
||||
# Fallback to first completion if tty not available
|
||||
return completions[0]
|
||||
|
||||
app = Application(
|
||||
layout=Layout(HSplit([Window(control, height=len(completions) + 2)])),
|
||||
key_bindings=kb,
|
||||
mouse_support=False,
|
||||
erase_when_done=True,
|
||||
output=output,
|
||||
input=input_obj,
|
||||
)
|
||||
return await app.run_async()
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Cache
|
||||
# --------------------------------------------------------------------------- #
|
||||
def cached(key: str) -> Optional[List[str]]:
|
||||
if not CACHE_DIR.exists():
|
||||
CACHE_DIR.mkdir(parents=True)
|
||||
f = CACHE_DIR / f"{key}.json"
|
||||
if f.exists():
|
||||
return json.loads(f.read_text())
|
||||
return None
|
||||
|
||||
def store_cache(key: str, completions: List[str]) -> None:
|
||||
f = CACHE_DIR / f"{key}.json"
|
||||
f.write_text(json.dumps(completions))
|
||||
# LRU eviction
|
||||
all_files = sorted(CACHE_DIR.glob("*.json"), key=lambda p: p.stat().st_mtime)
|
||||
for old in all_files[: -cfg()["cache_size"]]:
|
||||
old.unlink(missing_ok=True)
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Main entry
|
||||
# --------------------------------------------------------------------------- #
|
||||
async def complete_line(line: str) -> Optional[str]:
|
||||
key = hashlib.md5(line.encode()).hexdigest()
|
||||
comps = cached(key)
|
||||
if comps is None:
|
||||
# Write to /dev/tty if available to avoid interfering with command substitution
|
||||
try:
|
||||
status_console = Console(file=open('/dev/tty', 'w'), stderr=False)
|
||||
except Exception:
|
||||
status_console = console
|
||||
|
||||
with status_console.status("[green]Thinking…[/]"):
|
||||
comps = await llm_complete(build_prompt(line))
|
||||
store_cache(key, comps)
|
||||
return await select_completion(comps)
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# CLI
|
||||
# --------------------------------------------------------------------------- #
|
||||
def install_keybinding():
|
||||
r"""Inject Bash binding Alt+\ -> finish"""
|
||||
rc = Path.home()/".bashrc"
|
||||
marker = "# finish.py key-binding"
|
||||
|
||||
# Use bind -x to allow modifying READLINE_LINE
|
||||
snippet = f'''{marker}
|
||||
_finish_complete() {{
|
||||
local result
|
||||
result=$(finish --readline-complete "$READLINE_LINE" 2>/dev/null)
|
||||
if [[ -n "$result" ]]; then
|
||||
READLINE_LINE="$result"
|
||||
READLINE_POINT=${{#READLINE_LINE}}
|
||||
fi
|
||||
}}
|
||||
bind -x '"\\e\\\\": _finish_complete'
|
||||
'''
|
||||
|
||||
text = rc.read_text() if rc.exists() else ""
|
||||
if marker in text:
|
||||
return
|
||||
rc.write_text(text + "\n" + snippet)
|
||||
console.print("[green]Key-binding installed (Alt+\\)[/] – restart your shell.")
|
||||
|
||||
def main():
|
||||
# Handle readline-complete flag before argparse (for bash bind -x)
|
||||
if "--readline-complete" in sys.argv:
|
||||
idx = sys.argv.index("--readline-complete")
|
||||
line = sys.argv[idx + 1] if idx + 1 < len(sys.argv) else ""
|
||||
if line.strip():
|
||||
choice = asyncio.run(complete_line(line))
|
||||
if choice:
|
||||
print(choice)
|
||||
return
|
||||
|
||||
# Legacy flag support
|
||||
if sys.argv[-1] == "--accept-current-line":
|
||||
line = os.environ.get("READLINE_LINE", "")
|
||||
if line.strip():
|
||||
choice = asyncio.run(complete_line(line))
|
||||
if choice:
|
||||
sys.stdout.write(f"\x1b]0;\a")
|
||||
sys.stdout.write(f"\x1b[2K\r")
|
||||
sys.stdout.write(choice)
|
||||
sys.stdout.flush()
|
||||
return
|
||||
|
||||
parser = argparse.ArgumentParser(prog="finish", description="AI shell completions")
|
||||
parser.add_argument("--version", action="version", version=VERSION)
|
||||
sub = parser.add_subparsers(dest="cmd")
|
||||
sub.add_parser("install", help="add Alt+\\ key-binding to ~/.bashrc")
|
||||
sub.add_parser("config", help="show current config")
|
||||
p = sub.add_parser("command", help="simulate double-tab")
|
||||
p.add_argument("words", nargs="*", help="partial command")
|
||||
p.add_argument("--dry-run", action="store_true", help="show prompt only")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.cmd == "install":
|
||||
install_keybinding()
|
||||
return
|
||||
if args.cmd == "config":
|
||||
console.print_json(json.dumps(cfg()))
|
||||
return
|
||||
if args.cmd == "command":
|
||||
line = " ".join(args.words)
|
||||
if args.dry_run:
|
||||
console.print(build_prompt(line))
|
||||
return
|
||||
choice = asyncio.run(complete_line(line))
|
||||
if choice:
|
||||
# Check if we're in an interactive shell
|
||||
if os.isatty(sys.stdout.fileno()):
|
||||
console.print(f"[dim]Selected:[/] [cyan]{choice}[/]")
|
||||
console.print(f"\n[yellow]Tip:[/] Use [bold]Alt+\\[/] keybinding for seamless completion!")
|
||||
else:
|
||||
print(choice)
|
||||
return
|
||||
if len(sys.argv) == 1:
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@@ -253,6 +253,7 @@ log_request() {
|
||||
created=$(echo "$response_body" | jq -r ".created // $created")
|
||||
api_cost=$(echo "$prompt_tokens_int * $ACSH_API_PROMPT_COST + $completion_tokens_int * $ACSH_API_COMPLETION_COST" | bc)
|
||||
log_file=${ACSH_LOG_FILE:-"$HOME/.finish/finish.log"}
|
||||
mkdir -p "$(dirname "$log_file")" 2>/dev/null
|
||||
echo "$created,$user_input_hash,$prompt_tokens_int,$completion_tokens_int,$api_cost" >> "$log_file"
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user