2 Commits
1.5 ... 1.8

Author SHA1 Message Date
53b6ae3a76 Added some more function. E.g. use of models. 2025-12-17 14:43:47 +01:00
459f6f8165 Some changes, updates and happy thoughts 2025-12-15 12:09:18 +01:00
3 changed files with 571 additions and 60 deletions

3
.gitignore vendored
View File

@@ -22,4 +22,5 @@ Pipfile.lock # Consider if you want to include or exclude
._* ._*
*~.nib *~.nib
*~.xib *~.xib
README.md.old README.md.old
oai.zip

View File

@@ -108,6 +108,17 @@ All configuration is stored in `~/.config/oai/`:
/model /model
``` ```
**Paste from clipboard:**
Paste and send content to model
```
/paste
```
Paste with prompt and send content to model
```
/paste Analyze this text
```
**Start Chatting:** **Start Chatting:**
``` ```
You> Hello, how are you? You> Hello, how are you?

617
oai.py
View File

@@ -10,6 +10,8 @@ from rich.console import Console
from rich.panel import Panel from rich.panel import Panel
from rich.table import Table from rich.table import Table
from rich.text import Text from rich.text import Text
from rich.markdown import Markdown
from rich.live import Live
from openrouter import OpenRouter from openrouter import OpenRouter
import pyperclip import pyperclip
import mimetypes import mimetypes
@@ -22,9 +24,14 @@ import logging # Added missing import for logging
from prompt_toolkit import PromptSession from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory from prompt_toolkit.history import FileHistory
from rich.logging import RichHandler from rich.logging import RichHandler
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
app = typer.Typer() app = typer.Typer()
# Application identification for OpenRouter
APP_NAME = "oAI"
APP_URL = "https://iurl.no/oai"
# Paths # Paths
home = Path.home() home = Path.home()
config_dir = home / '.config' / 'oai' config_dir = home / '.config' / 'oai'
@@ -75,7 +82,7 @@ app_logger.setLevel(logging.INFO)
# DB configuration # DB configuration
database = config_dir / 'oai_config.db' database = config_dir / 'oai_config.db'
DB_FILE = str(database) DB_FILE = str(database)
version = '1.5' version = '1.8'
def create_table_if_not_exists(): def create_table_if_not_exists():
"""Ensure the config and conversation_sessions tables exist.""" """Ensure the config and conversation_sessions tables exist."""
@@ -123,6 +130,13 @@ def load_conversation(name: str) -> Optional[List[Dict[str, str]]]:
return json.loads(result[0]) return json.loads(result[0])
return None return None
def delete_conversation(name: str) -> int:
"""Delete all conversation sessions with the given name. Returns number of deleted rows."""
with sqlite3.connect(DB_FILE) as conn:
cursor = conn.execute('DELETE FROM conversation_sessions WHERE name = ?', (name,))
conn.commit()
return cursor.rowcount
def list_conversations() -> List[Dict[str, Any]]: def list_conversations() -> List[Dict[str, Any]]:
"""List all saved conversations from DB with metadata.""" """List all saved conversations from DB with metadata."""
with sqlite3.connect(DB_FILE) as conn: with sqlite3.connect(DB_FILE) as conn:
@@ -264,11 +278,18 @@ DEFAULT_MODEL_ID = get_config('default_model')
MAX_TOKEN = int(get_config('max_token') or "100000") MAX_TOKEN = int(get_config('max_token') or "100000")
COST_WARNING_THRESHOLD = float(get_config(HIGH_COST_WARNING) or "0.01") # Configurable cost threshold for alerts COST_WARNING_THRESHOLD = float(get_config(HIGH_COST_WARNING) or "0.01") # Configurable cost threshold for alerts
# Fetch models # Fetch models with app identification headers
models_data = [] models_data = []
text_models = [] text_models = []
try: try:
headers = {"Authorization": f"Bearer {API_KEY}"} if API_KEY else {} headers = {
"Authorization": f"Bearer {API_KEY}",
"HTTP-Referer": APP_URL,
"X-Title": APP_NAME
} if API_KEY else {
"HTTP-Referer": APP_URL,
"X-Title": APP_NAME
}
response = requests.get(f"{OPENROUTER_BASE_URL}/models", headers=headers) response = requests.get(f"{OPENROUTER_BASE_URL}/models", headers=headers)
response.raise_for_status() response.raise_for_status()
models_data = response.json()["data"] models_data = response.json()["data"]
@@ -287,7 +308,11 @@ def get_credits(api_key: str, base_url: str = OPENROUTER_BASE_URL) -> Optional[D
if not api_key: if not api_key:
return None return None
url = f"{base_url}/credits" url = f"{base_url}/credits"
headers = {"Authorization": f"Bearer {api_key}"} headers = {
"Authorization": f"Bearer {api_key}",
"HTTP-Referer": APP_URL,
"X-Title": APP_NAME
}
try: try:
response = requests.get(url, headers=headers) response = requests.get(url, headers=headers)
response.raise_for_status() response.raise_for_status()
@@ -322,6 +347,166 @@ def clear_screen():
except: except:
print("\n" * 100) print("\n" * 100)
def has_web_search_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports web search based on supported_parameters."""
supported_params = model.get("supported_parameters", [])
# Web search is typically indicated by 'tools' parameter support
return "tools" in supported_params
def has_image_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports image input based on input modalities."""
architecture = model.get("architecture", {})
input_modalities = architecture.get("input_modalities", [])
return "image" in input_modalities
def supports_online_mode(model: Dict[str, Any]) -> bool:
"""Check if model supports :online suffix for web search."""
# Models that support tools parameter can use :online
return has_web_search_capability(model)
def get_effective_model_id(base_model_id: str, online_enabled: bool) -> str:
"""Get the effective model ID with :online suffix if enabled."""
if online_enabled and not base_model_id.endswith(':online'):
return f"{base_model_id}:online"
return base_model_id
def display_paginated_table(table: Table, title: str):
"""Display a table with pagination support using Rich console for colored output, repeating header on each page."""
# Get terminal height (subtract some lines for prompt and margins)
try:
terminal_height = os.get_terminal_size().lines - 8
except:
terminal_height = 20 # Fallback if terminal size can't be determined
# Create a segment-based approach to capture Rich-rendered output
from rich.segment import Segment
# Render the table to segments
segments = list(console.render(table))
# Convert segments to lines while preserving style
current_line_segments = []
all_lines = []
for segment in segments:
if segment.text == '\n':
all_lines.append(current_line_segments)
current_line_segments = []
else:
current_line_segments.append(segment)
# Add last line if not empty
if current_line_segments:
all_lines.append(current_line_segments)
total_lines = len(all_lines)
# If fits on one screen after segment analysis
if total_lines <= terminal_height:
console.print(Panel(table, title=title, title_align="left"))
return
# Separate header from data rows
# Typically the first 3 lines are: top border, header row, separator
header_lines = []
data_lines = []
# Find where the header ends (usually after the first horizontal line after header text)
header_end_index = 0
found_header_text = False
for i, line_segments in enumerate(all_lines):
# Check if this line contains header-style text (bold/magenta usually)
has_header_style = any(
seg.style and ('bold' in str(seg.style) or 'magenta' in str(seg.style))
for seg in line_segments
)
if has_header_style:
found_header_text = True
# After finding header text, the next line with box-drawing chars is the separator
if found_header_text and i > 0:
line_text = ''.join(seg.text for seg in line_segments)
# Check for horizontal line characters (─ ━ ╌ etc.)
if any(char in line_text for char in ['', '', '', '', '', '']):
header_end_index = i
break
# If we found a header separator, split there
if header_end_index > 0:
header_lines = all_lines[:header_end_index + 1] # Include the separator
data_lines = all_lines[header_end_index + 1:]
else:
# Fallback: assume first 3 lines are header
header_lines = all_lines[:min(3, len(all_lines))]
data_lines = all_lines[min(3, len(all_lines)):]
# Calculate how many data lines fit per page (accounting for header)
lines_per_page = terminal_height - len(header_lines)
# Display with pagination
current_line = 0
page_number = 1
while current_line < len(data_lines):
# Clear screen for each page
clear_screen()
# Print title
console.print(f"[bold cyan]{title} (Page {page_number})[/]")
# Print header on every page
for line_segments in header_lines:
for segment in line_segments:
console.print(segment.text, style=segment.style, end="")
console.print() # New line after each row
# Calculate how many data lines to show on this page
end_line = min(current_line + lines_per_page, len(data_lines))
# Print data lines for this page
for line_segments in data_lines[current_line:end_line]:
for segment in line_segments:
console.print(segment.text, style=segment.style, end="")
console.print() # New line after each row
# Update position
current_line = end_line
page_number += 1
# If there's more content, wait for user
if current_line < len(data_lines):
console.print(f"\n[dim yellow]--- Press SPACE for next page, or any other key to finish (Page {page_number - 1}, showing {end_line}/{len(data_lines)} data rows) ---[/dim yellow]")
try:
import sys
import tty
import termios
# Save terminal settings
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
# Set terminal to raw mode to read single character
tty.setraw(fd)
char = sys.stdin.read(1)
# If not space, break pagination
if char != ' ':
break
finally:
# Restore terminal settings
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
except:
# Fallback for Windows or if termios not available
input_char = input().strip()
if input_char != '':
break
else:
# No more content
break
@app.command() @app.command()
def chat(): def chat():
global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN, COST_WARNING_THRESHOLD global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN, COST_WARNING_THRESHOLD
@@ -334,9 +519,12 @@ def chat():
total_cost = 0.0 total_cost = 0.0
message_count = 0 message_count = 0
middle_out_enabled = False # Session-level middle-out transform flag middle_out_enabled = False # Session-level middle-out transform flag
conversation_memory_enabled = True # Memory ON by default
memory_start_index = 0 # Track when memory was last enabled
saved_conversations_cache = [] # Cache for /list results to use with /load by number saved_conversations_cache = [] # Cache for /list results to use with /load by number
online_mode_enabled = False # Online mode (web search) disabled by default
app_logger.info("Starting new chat session") # Log session start app_logger.info("Starting new chat session with memory enabled") # Log session start
if not API_KEY: if not API_KEY:
console.print("[bold red]API key not found. Use '/config api'.[/]") console.print("[bold red]API key not found. Use '/config api'.[/]")
@@ -365,7 +553,10 @@ def chat():
app_logger.warning(f"Startup credit alerts: {startup_alert_msg}") app_logger.warning(f"Startup credit alerts: {startup_alert_msg}")
selected_model = selected_model_default selected_model = selected_model_default
# Initialize OpenRouter client
client = OpenRouter(api_key=API_KEY) client = OpenRouter(api_key=API_KEY)
if selected_model: if selected_model:
console.print(f"[bold blue]Welcome to oAI![/] [bold red]Active model: {selected_model['name']}[/]") console.print(f"[bold blue]Welcome to oAI![/] [bold red]Active model: {selected_model['name']}[/]")
else: else:
@@ -379,7 +570,7 @@ def chat():
while True: while True:
try: try:
user_input = session.prompt("You> ").strip() user_input = session.prompt("You> ", auto_suggest=AutoSuggestFromHistory()).strip()
if user_input.lower() in ["exit", "quit", "bye"]: if user_input.lower() in ["exit", "quit", "bye"]:
total_tokens = total_input_tokens + total_output_tokens total_tokens = total_input_tokens + total_output_tokens
app_logger.info(f"Session ended. Total messages: {message_count}, Total tokens: {total_tokens}, Total cost: ${total_cost:.4f}") # Log session summary app_logger.info(f"Session ended. Total messages: {message_count}, Total tokens: {total_tokens}, Total cost: ${total_cost:.4f}") # Log session summary
@@ -396,6 +587,122 @@ def chat():
console.print("[bold green]Retrying last prompt...[/]") console.print("[bold green]Retrying last prompt...[/]")
app_logger.info(f"Retrying prompt: {last_prompt[:100]}...") app_logger.info(f"Retrying prompt: {last_prompt[:100]}...")
user_input = last_prompt user_input = last_prompt
elif user_input.lower().startswith("/online"):
args = user_input[8:].strip()
if not args:
status = "enabled" if online_mode_enabled else "disabled"
console.print(f"[bold blue]Online mode (web search) {status}.[/]")
if selected_model:
if supports_online_mode(selected_model):
console.print(f"[dim green]Current model '{selected_model['name']}' supports online mode.[/]")
else:
console.print(f"[dim yellow]Current model '{selected_model['name']}' does not support online mode.[/]")
continue
if args.lower() == "on":
if not selected_model:
console.print("[bold red]No model selected. Select a model first with '/model'.[/]")
continue
if not supports_online_mode(selected_model):
console.print(f"[bold red]Model '{selected_model['name']}' does not support online mode (web search).[/]")
console.print("[dim yellow]Online mode requires models with 'tools' parameter support.[/]")
app_logger.warning(f"Online mode activation failed - model {selected_model['id']} doesn't support it")
continue
online_mode_enabled = True
console.print("[bold green]Online mode enabled. Model will use web search capabilities.[/]")
console.print(f"[dim blue]Effective model ID: {get_effective_model_id(selected_model['id'], True)}[/]")
app_logger.info(f"Online mode enabled for model {selected_model['id']}")
elif args.lower() == "off":
online_mode_enabled = False
console.print("[bold green]Online mode disabled. Model will not use web search.[/]")
if selected_model:
console.print(f"[dim blue]Effective model ID: {selected_model['id']}[/]")
app_logger.info("Online mode disabled")
else:
console.print("[bold yellow]Usage: /online on|off (or /online to view status)[/]")
continue
elif user_input.lower().startswith("/memory"):
args = user_input[8:].strip()
if not args:
status = "enabled" if conversation_memory_enabled else "disabled"
history_count = len(session_history) - memory_start_index if conversation_memory_enabled and memory_start_index < len(session_history) else 0
console.print(f"[bold blue]Conversation memory {status}.[/]")
if conversation_memory_enabled:
console.print(f"[dim blue]Tracking {history_count} message(s) since memory enabled.[/]")
else:
console.print(f"[dim yellow]Memory disabled. Each request is independent (saves tokens/cost).[/]")
continue
if args.lower() == "on":
conversation_memory_enabled = True
memory_start_index = len(session_history) # Remember where we started
console.print("[bold green]Conversation memory enabled. Will remember conversations from this point forward.[/]")
console.print(f"[dim blue]Memory will track messages starting from index {memory_start_index}.[/]")
app_logger.info(f"Conversation memory enabled at index {memory_start_index}")
elif args.lower() == "off":
conversation_memory_enabled = False
console.print("[bold green]Conversation memory disabled. API calls will not include history (lower cost).[/]")
console.print(f"[dim yellow]Note: Messages are still saved locally but not sent to API.[/]")
app_logger.info("Conversation memory disabled")
else:
console.print("[bold yellow]Usage: /memory on|off (or /memory to view status)[/]")
continue
elif user_input.lower().startswith("/paste"):
# Get optional prompt after /paste
optional_prompt = user_input[7:].strip()
try:
clipboard_content = pyperclip.paste()
except Exception as e:
console.print(f"[bold red]Failed to access clipboard: {e}[/]")
app_logger.error(f"Clipboard access error: {e}")
continue
if not clipboard_content or not clipboard_content.strip():
console.print("[bold red]Clipboard is empty.[/]")
app_logger.warning("Paste attempted with empty clipboard")
continue
# Validate it's text (check if it's valid UTF-8 and printable)
try:
# Try to encode/decode to ensure it's valid text
clipboard_content.encode('utf-8')
# Show preview of pasted content
preview_lines = clipboard_content.split('\n')[:10] # First 10 lines
preview_text = '\n'.join(preview_lines)
if len(clipboard_content.split('\n')) > 10:
preview_text += "\n... (content truncated for preview)"
char_count = len(clipboard_content)
line_count = len(clipboard_content.split('\n'))
console.print(Panel(
preview_text,
title=f"[bold cyan]📋 Clipboard Content Preview ({char_count} chars, {line_count} lines)[/]",
title_align="left",
border_style="cyan"
))
# Build the final prompt
if optional_prompt:
final_prompt = f"{optional_prompt}\n\n```\n{clipboard_content}\n```"
console.print(f"[dim blue]Sending with prompt: '{optional_prompt}'[/]")
else:
final_prompt = clipboard_content
console.print("[dim blue]Sending clipboard content without additional prompt[/]")
# Set user_input to the pasted content so it gets processed normally
user_input = final_prompt
app_logger.info(f"Pasted content from clipboard: {char_count} chars, {line_count} lines, with prompt: {bool(optional_prompt)}")
except UnicodeDecodeError:
console.print("[bold red]Clipboard contains non-text (binary) data. Only plain text is supported.[/]")
app_logger.error("Paste failed - clipboard contains binary data")
continue
except Exception as e:
console.print(f"[bold red]Error processing clipboard content: {e}[/]")
app_logger.error(f"Clipboard processing error: {e}")
continue
elif user_input.lower().startswith("/export"): elif user_input.lower().startswith("/export"):
args = user_input[8:].strip().split(maxsplit=1) args = user_input[8:].strip().split(maxsplit=1)
if len(args) != 2: if len(args) != 2:
@@ -477,6 +784,9 @@ def chat():
continue continue
session_history = loaded_data session_history = loaded_data
current_index = len(session_history) - 1 current_index = len(session_history) - 1
# When loading, reset memory tracking if memory is enabled
if conversation_memory_enabled:
memory_start_index = 0 # Include all loaded messages in memory
total_input_tokens = 0 total_input_tokens = 0
total_output_tokens = 0 total_output_tokens = 0
total_cost = 0.0 total_cost = 0.0
@@ -484,6 +794,48 @@ def chat():
console.print(f"[bold green]Conversation '{conversation_name}' loaded with {len(session_history)} messages.[/]") console.print(f"[bold green]Conversation '{conversation_name}' loaded with {len(session_history)} messages.[/]")
app_logger.info(f"Conversation '{conversation_name}' loaded with {len(session_history)} messages") app_logger.info(f"Conversation '{conversation_name}' loaded with {len(session_history)} messages")
continue continue
elif user_input.lower().startswith("/delete"):
args = user_input[8:].strip()
if not args:
console.print("[bold red]Usage: /delete <name|number>[/]")
console.print("[bold yellow]Tip: Use /list to see numbered conversations[/]")
continue
# Check if input is a number
conversation_name = None
if args.isdigit():
conv_number = int(args)
if saved_conversations_cache and 1 <= conv_number <= len(saved_conversations_cache):
conversation_name = saved_conversations_cache[conv_number - 1]['name']
console.print(f"[bold cyan]Deleting conversation #{conv_number}: '{conversation_name}'[/]")
else:
console.print(f"[bold red]Invalid conversation number: {conv_number}[/]")
console.print(f"[bold yellow]Use /list to see available conversations (1-{len(saved_conversations_cache) if saved_conversations_cache else 0})[/]")
continue
else:
conversation_name = args
# Confirm deletion
try:
confirm = typer.confirm(f"Delete conversation '{conversation_name}'? This cannot be undone.", default=False)
if not confirm:
console.print("[bold yellow]Deletion cancelled.[/]")
continue
except (EOFError, KeyboardInterrupt):
console.print("\n[bold yellow]Deletion cancelled.[/]")
continue
deleted_count = delete_conversation(conversation_name)
if deleted_count > 0:
console.print(f"[bold green]Conversation '{conversation_name}' deleted ({deleted_count} version(s) removed).[/]")
app_logger.info(f"Conversation '{conversation_name}' deleted - {deleted_count} version(s)")
# Refresh cache if deleted conversation was in it
if saved_conversations_cache:
saved_conversations_cache = [c for c in saved_conversations_cache if c['name'] != conversation_name]
else:
console.print(f"[bold red]Conversation '{conversation_name}' not found.[/]")
app_logger.warning(f"Delete failed for '{conversation_name}' - not found")
continue
elif user_input.lower() == "/list": elif user_input.lower() == "/list":
conversations = list_conversations() conversations = list_conversations()
if not conversations: if not conversations:
@@ -511,7 +863,7 @@ def chat():
formatted_time formatted_time
) )
console.print(Panel(table, title=f"[bold green]Saved Conversations ({len(conversations)} total)[/]", title_align="left", subtitle="[dim]Use /load <number> or /load <name> to load a conversation[/]", subtitle_align="right")) console.print(Panel(table, title=f"[bold green]Saved Conversations ({len(conversations)} total)[/]", title_align="left", subtitle="[dim]Use /load <number> or /delete <number> to manage conversations[/]", subtitle_align="right"))
app_logger.info(f"User viewed conversation list - {len(conversations)} conversations") app_logger.info(f"User viewed conversation list - {len(conversations)} conversations")
continue continue
elif user_input.lower() == "/prev": elif user_input.lower() == "/prev":
@@ -520,7 +872,9 @@ def chat():
continue continue
current_index -= 1 current_index -= 1
prev_response = session_history[current_index]['response'] prev_response = session_history[current_index]['response']
console.print(Panel(prev_response, title=f"[bold green]Previous Response ({current_index + 1}/{len(session_history)})[/]")) # Render as markdown with proper formatting
md = Markdown(prev_response)
console.print(Panel(md, title=f"[bold green]Previous Response ({current_index + 1}/{len(session_history)})[/]", title_align="left"))
app_logger.debug(f"Viewed previous response at index {current_index}") app_logger.debug(f"Viewed previous response at index {current_index}")
continue continue
elif user_input.lower() == "/next": elif user_input.lower() == "/next":
@@ -529,7 +883,9 @@ def chat():
continue continue
current_index += 1 current_index += 1
next_response = session_history[current_index]['response'] next_response = session_history[current_index]['response']
console.print(Panel(next_response, title=f"[bold green]Next Response ({current_index + 1}/{len(session_history)})[/]")) # Render as markdown with proper formatting
md = Markdown(next_response)
console.print(Panel(md, title=f"[bold green]Next Response ({current_index + 1}/{len(session_history)})[/]", title_align="left"))
app_logger.debug(f"Viewed next response at index {current_index}") app_logger.debug(f"Viewed next response at index {current_index}")
continue continue
elif user_input.lower() == "/stats": elif user_input.lower() == "/stats":
@@ -571,6 +927,7 @@ def chat():
session_history = [] session_history = []
current_index = -1 current_index = -1
session_system_prompt = "" session_system_prompt = ""
memory_start_index = 0 # Reset memory tracking
total_input_tokens = 0 total_input_tokens = 0
total_output_tokens = 0 total_output_tokens = 0
total_cost = 0.0 total_cost = 0.0
@@ -592,7 +949,7 @@ def chat():
console.print(f"[bold red]Model '{args}' not found.[/]") console.print(f"[bold red]Model '{args}' not found.[/]")
continue continue
# Display model info (unchanged) # Display model info
pricing = model_to_show.get("pricing", {}) pricing = model_to_show.get("pricing", {})
architecture = model_to_show.get("architecture", {}) architecture = model_to_show.get("architecture", {})
supported_params = ", ".join(model_to_show.get("supported_parameters", [])) or "None" supported_params = ", ".join(model_to_show.get("supported_parameters", [])) or "None"
@@ -610,6 +967,7 @@ def chat():
table.add_row("Input Modalities", ", ".join(architecture.get("input_modalities", [])) or "None") table.add_row("Input Modalities", ", ".join(architecture.get("input_modalities", [])) or "None")
table.add_row("Output Modalities", ", ".join(architecture.get("output_modalities", [])) or "None") table.add_row("Output Modalities", ", ".join(architecture.get("output_modalities", [])) or "None")
table.add_row("Supported Parameters", supported_params) table.add_row("Supported Parameters", supported_params)
table.add_row("Online Mode Support", "Yes" if supports_online_mode(model_to_show) else "No")
table.add_row("Top Provider Context Length", str(top_provider.get("context_length", "N/A"))) table.add_row("Top Provider Context Length", str(top_provider.get("context_length", "N/A")))
table.add_row("Max Completion Tokens", str(top_provider.get("max_completion_tokens", "N/A"))) table.add_row("Max Completion Tokens", str(top_provider.get("max_completion_tokens", "N/A")))
table.add_row("Moderated", "Yes" if top_provider.get("is_moderated", False) else "No") table.add_row("Moderated", "Yes" if top_provider.get("is_moderated", False) else "No")
@@ -617,7 +975,7 @@ def chat():
console.print(Panel(table, title=f"[bold green]Model Info: {model_to_show['name']}[/]", title_align="left")) console.print(Panel(table, title=f"[bold green]Model Info: {model_to_show['name']}[/]", title_align="left"))
continue continue
# Model selection (unchanged but with logging) # Model selection with colored checkmarks (removed Web column)
elif user_input.startswith("/model"): elif user_input.startswith("/model"):
app_logger.info("User initiated model selection") app_logger.info("User initiated model selection")
args = user_input[7:].strip() args = user_input[7:].strip()
@@ -628,10 +986,17 @@ def chat():
if not filtered_models: if not filtered_models:
console.print(f"[bold red]No models match '{search_term}'. Try '/model'.[/]") console.print(f"[bold red]No models match '{search_term}'. Try '/model'.[/]")
continue continue
table = Table("No.", "Name", "ID", show_header=True, header_style="bold magenta")
# Create table with colored checkmarks (removed Web column)
table = Table("No.", "Name", "ID", "Image", show_header=True, header_style="bold magenta")
for i, model in enumerate(filtered_models, 1): for i, model in enumerate(filtered_models, 1):
table.add_row(str(i), model["name"], model["id"]) image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]"
console.print(Panel(table, title=f"[bold green]Available Models ({'All' if not search_term else f'Search: {search_term}'})[/]", title_align="left")) table.add_row(str(i), model["name"], model["id"], image_support)
# Use pagination for the table
title = f"[bold green]Available Models ({'All' if not search_term else f'Search: {search_term}'})[/]"
display_paginated_table(table, title)
while True: while True:
try: try:
choice = int(typer.prompt("Enter model number (or 0 to cancel)")) choice = int(typer.prompt("Enter model number (or 0 to cancel)"))
@@ -639,7 +1004,13 @@ def chat():
break break
if 1 <= choice <= len(filtered_models): if 1 <= choice <= len(filtered_models):
selected_model = filtered_models[choice - 1] selected_model = filtered_models[choice - 1]
# Disable online mode when switching models (user must re-enable)
if online_mode_enabled:
online_mode_enabled = False
console.print("[dim yellow]Note: Online mode auto-disabled when changing models.[/]")
console.print(f"[bold cyan]Selected: {selected_model['name']} ({selected_model['id']})[/]") console.print(f"[bold cyan]Selected: {selected_model['name']} ({selected_model['id']})[/]")
if supports_online_mode(selected_model):
console.print("[dim green]This model supports online mode. Use '/online on' to enable web search.[/]")
app_logger.info(f"Model selected: {selected_model['name']} ({selected_model['id']})") app_logger.info(f"Model selected: {selected_model['name']} ({selected_model['id']})")
break break
console.print("[bold red]Invalid choice. Try again.[/]") console.print("[bold red]Invalid choice. Try again.[/]")
@@ -692,6 +1063,7 @@ def chat():
if new_api_key.strip(): if new_api_key.strip():
set_config('api_key', new_api_key.strip()) set_config('api_key', new_api_key.strip())
API_KEY = new_api_key.strip() API_KEY = new_api_key.strip()
# Reinitialize client with new API key
client = OpenRouter(api_key=API_KEY) client = OpenRouter(api_key=API_KEY)
console.print("[bold green]API key updated![/]") console.print("[bold green]API key updated![/]")
else: else:
@@ -762,10 +1134,17 @@ def chat():
if not filtered_models: if not filtered_models:
console.print(f"[bold red]No models match '{search_term}'. Try without search.[/]") console.print(f"[bold red]No models match '{search_term}'. Try without search.[/]")
continue continue
table = Table("No.", "Name", "ID", show_header=True, header_style="bold magenta")
# Create table with colored checkmarks (removed Web column)
table = Table("No.", "Name", "ID", "Image", show_header=True, header_style="bold magenta")
for i, model in enumerate(filtered_models, 1): for i, model in enumerate(filtered_models, 1):
table.add_row(str(i), model["name"], model["id"]) image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]"
console.print(Panel(table, title=f"[bold green]Available Models for Default ({'All' if not search_term else f'Search: {search_term}'})[/]", title_align="left")) table.add_row(str(i), model["name"], model["id"], image_support)
# Use pagination for the table
title = f"[bold green]Available Models for Default ({'All' if not search_term else f'Search: {search_term}'})[/]"
display_paginated_table(table, title)
while True: while True:
try: try:
choice = int(typer.prompt("Enter model number (or 0 to cancel)")) choice = int(typer.prompt("Enter model number (or 0 to cancel)"))
@@ -782,7 +1161,9 @@ def chat():
console.print("[bold red]Invalid input. Enter a number.[/]") console.print("[bold red]Invalid input. Enter a number.[/]")
else: else:
DEFAULT_MODEL_ID = get_config('default_model') DEFAULT_MODEL_ID = get_config('default_model')
table = Table("Setting", "Value", show_header=True, header_style="bold magenta") memory_status = "Enabled" if conversation_memory_enabled else "Disabled"
memory_tracked = len(session_history) - memory_start_index if conversation_memory_enabled else 0
table = Table("Setting", "Value", show_header=True, header_style="bold magenta", width=console.width - 10)
table.add_row("API Key", API_KEY or "[Not set]") table.add_row("API Key", API_KEY or "[Not set]")
table.add_row("Base URL", OPENROUTER_BASE_URL or "[Not set]") table.add_row("Base URL", OPENROUTER_BASE_URL or "[Not set]")
table.add_row("DB Path", str(database) or "[Not set]") table.add_row("DB Path", str(database) or "[Not set]")
@@ -790,13 +1171,17 @@ def chat():
table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled") table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled")
table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]") table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]")
table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"])) table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"]))
table.add_row("Online Mode", "Enabled" if online_mode_enabled else "Disabled")
table.add_row("Max Token", str(MAX_TOKEN)) table.add_row("Max Token", str(MAX_TOKEN))
table.add_row("Session Token", "[Not set]" if session_max_token == 0 else str(session_max_token)) table.add_row("Session Token", "[Not set]" if session_max_token == 0 else str(session_max_token))
table.add_row("Session System Prompt", session_system_prompt or "[Not set]") table.add_row("Session System Prompt", session_system_prompt or "[Not set]")
table.add_row("Cost Warning Threshold", f"${COST_WARNING_THRESHOLD:.4f}") table.add_row("Cost Warning Threshold", f"${COST_WARNING_THRESHOLD:.4f}")
table.add_row("Middle-out Transform", "Enabled" if middle_out_enabled else "Disabled") table.add_row("Middle-out Transform", "Enabled" if middle_out_enabled else "Disabled")
table.add_row("Conversation Memory", f"{memory_status} ({memory_tracked} tracked)" if conversation_memory_enabled else memory_status)
table.add_row("History Size", str(len(session_history))) table.add_row("History Size", str(len(session_history)))
table.add_row("Current History Index", str(current_index) if current_index >= 0 else "[None]") table.add_row("Current History Index", str(current_index) if current_index >= 0 else "[None]")
table.add_row("App Name", APP_NAME)
table.add_row("App URL", APP_URL)
credits = get_credits(API_KEY, OPENROUTER_BASE_URL) credits = get_credits(API_KEY, OPENROUTER_BASE_URL)
if credits: if credits:
@@ -808,7 +1193,7 @@ def chat():
table.add_row("Used Credits", "[Unavailable - Check API key]") table.add_row("Used Credits", "[Unavailable - Check API key]")
table.add_row("Credits Left", "[Unavailable - Check API key]") table.add_row("Credits Left", "[Unavailable - Check API key]")
console.print(Panel(table, title="[bold green]Current Configurations[/]", title_align="left")) console.print(Panel(table, title="[bold green]Current Configurations[/]", title_align="left", subtitle="[bold green]oAI Version %s" % version, subtitle_align="right"))
continue continue
if user_input.lower() == "/credits": if user_input.lower() == "/credits":
@@ -824,15 +1209,17 @@ def chat():
continue continue
if user_input.lower() == "/clear": if user_input.lower() == "/clear":
os.system("clear" if os.name == "posix" else "cls") clear_screen()
DEFAULT_MODEL_ID = get_config('default_model') DEFAULT_MODEL_ID = get_config('default_model')
token_value = session_max_token if session_max_token != 0 else " Not set" token_value = session_max_token if session_max_token != 0 else " Not set"
console.print(f"[bold cyan]Token limits: Max= {MAX_TOKEN}, Session={token_value}[/]") console.print(f"[bold cyan]Token limits: Max= {MAX_TOKEN}, Session={token_value}[/]")
console.print("[bold blue]Active model[/] [bold red]%s[/]" %(DEFAULT_MODEL_ID)) console.print("[bold blue]Active model[/] [bold red]%s[/]" %(str(selected_model["name"]) if selected_model else "None"))
if online_mode_enabled:
console.print("[bold cyan]Online mode: Enabled (web search active)[/]")
continue continue
if user_input.lower() == "/help": if user_input.lower() == "/help":
help_table = Table("Command", "Description", "Example", show_header=True, header_style="bold cyan") help_table = Table("Command", "Description", "Example", show_header=True, header_style="bold cyan", width=console.width - 10)
# ===== SESSION COMMANDS ===== # ===== SESSION COMMANDS =====
help_table.add_row( help_table.add_row(
@@ -850,11 +1237,26 @@ def chat():
"Show this help menu with all available commands.", "Show this help menu with all available commands.",
"/help" "/help"
) )
help_table.add_row(
"/memory [on|off]",
"Toggle conversation memory. ON sends history (AI remembers), OFF sends only current message (saves cost).",
"/memory\n/memory off"
)
help_table.add_row( help_table.add_row(
"/next", "/next",
"View the next response in history.", "View the next response in history.",
"/next" "/next"
) )
help_table.add_row(
"/online [on|off]",
"Enable/disable online mode (web search) for current model. Only works with models that support tools.",
"/online on\n/online off"
)
help_table.add_row(
"/paste [prompt]",
"Paste plain text/code from clipboard and send to AI. Optional prompt can be added.",
"/paste\n/paste Explain this code"
)
help_table.add_row( help_table.add_row(
"/prev", "/prev",
"View the previous response in history.", "View the previous response in history.",
@@ -879,12 +1281,12 @@ def chat():
) )
help_table.add_row( help_table.add_row(
"/info [model_id]", "/info [model_id]",
"Display detailed info (pricing, modalities, context length, etc.) for current or specified model.", "Display detailed info (pricing, modalities, context length, online support, etc.) for current or specified model.",
"/info\n/info gpt-4o" "/info\n/info gpt-4o"
) )
help_table.add_row( help_table.add_row(
"/model [search]", "/model [search]",
"Select or change the current model for the session. Supports searching by name or ID.", "Select or change the current model for the session. Supports searching by name or ID. Shows image capabilities.",
"/model\n/model gpt" "/model\n/model gpt"
) )
@@ -916,7 +1318,7 @@ def chat():
) )
help_table.add_row( help_table.add_row(
"/config model [search]", "/config model [search]",
"Set default model that loads on startup. Doesn't change current session model.", "Set default model that loads on startup. Doesn't change current session model. Shows image capabilities.",
"/config model gpt" "/config model gpt"
) )
help_table.add_row( help_table.add_row(
@@ -958,6 +1360,11 @@ def chat():
"", "",
"" ""
) )
help_table.add_row(
"/delete <name|number>",
"Delete a saved conversation by name or number (from /list). Requires confirmation.",
"/delete my_chat\n/delete 3"
)
help_table.add_row( help_table.add_row(
"/export <format> <file>", "/export <format> <file>",
"Export conversation to file. Formats: md (Markdown), json (JSON), html (HTML).", "Export conversation to file. Formats: md (Markdown), json (JSON), html (HTML).",
@@ -998,14 +1405,19 @@ def chat():
# ===== FILE ATTACHMENTS ===== # ===== FILE ATTACHMENTS =====
help_table.add_row( help_table.add_row(
"[bold yellow]━━━ FILE ATTACHMENTS ━━━[/]", "[bold yellow]━━━ INPUT METHODS ━━━[/]",
"", "",
"" ""
) )
help_table.add_row( help_table.add_row(
"@/path/to/file", "@/path/to/file",
"Attach code files ('.py', '.js', '.ts', '.cs', '.java', '.c', '.cpp', '.h', '.hpp', '.rb', '.ruby', '.php', '.swift', '.kt', '.kts', '.go', '.sh', '.bat', '.ps1', '.R', '.scala', '.pl', '.lua', '.dart', '.elm', '.xml', '.json', '.yaml', '.yml', '.md', '.txt') or images to messages using @path syntax.", "Attach files to messages: images (PNG, JPG, etc.), PDFs, and code files (.py, .js, etc.).",
"Debug this @script.py\nAnalyze @image.png" "Debug @script.py\nSummarize @document.pdf\nAnalyze @image.png"
)
help_table.add_row(
"Clipboard paste",
"Use /paste to send clipboard content (plain text/code) to AI.",
"/paste\n/paste Explain this"
) )
# ===== EXIT ===== # ===== EXIT =====
@@ -1024,7 +1436,7 @@ def chat():
help_table, help_table,
title="[bold cyan]oAI Chat Help (Version %s)[/]" % version, title="[bold cyan]oAI Chat Help (Version %s)[/]" % version,
title_align="center", title_align="center",
subtitle="💡 Tip: Commands are case-insensitive • Visit: https://iurl.no/oai", subtitle="💡 Tip: Commands are case-insensitive • Memory ON by default (toggle with /memory) • Visit: https://iurl.no/oai",
subtitle_align="center", subtitle_align="center",
border_style="cyan" border_style="cyan"
)) ))
@@ -1034,7 +1446,7 @@ def chat():
console.print("[bold yellow]Select a model first with '/model'.[/]") console.print("[bold yellow]Select a model first with '/model'.[/]")
continue continue
# Process file attachments (unchanged but log details) # Process file attachments with PDF support
content_blocks = [] content_blocks = []
text_part = user_input text_part = user_input
file_attachments = [] file_attachments = []
@@ -1053,20 +1465,48 @@ def chat():
try: try:
with open(expanded_path, 'rb') as f: with open(expanded_path, 'rb') as f:
file_data = f.read() file_data = f.read()
# Handle images
if mime_type and mime_type.startswith('image/'): if mime_type and mime_type.startswith('image/'):
if "image" not in selected_model.get("modalities", []): modalities = selected_model.get("architecture", {}).get("input_modalities", [])
if "image" not in modalities:
console.print("[bold red]Selected model does not support image attachments.[/]") console.print("[bold red]Selected model does not support image attachments.[/]")
console.print(f"[dim yellow]Supported modalities: {', '.join(modalities) if modalities else 'text only'}[/]")
continue continue
b64_data = base64.b64encode(file_data).decode('utf-8') b64_data = base64.b64encode(file_data).decode('utf-8')
content_blocks.append({"type": "image_url", "image_url": {"url": f"data:{mime_type};base64,{b64_data}"}}) content_blocks.append({"type": "image_url", "image_url": {"url": f"data:{mime_type};base64,{b64_data}"}})
console.print(f"[dim green]✓ Image attached: {os.path.basename(expanded_path)} ({file_size / 1024:.1f} KB)[/]")
# Handle PDFs
elif mime_type == 'application/pdf' or file_ext == '.pdf':
modalities = selected_model.get("architecture", {}).get("input_modalities", [])
# Check for various possible modality indicators for PDFs
supports_pdf = any(mod in modalities for mod in ["document", "pdf", "file"])
if not supports_pdf:
console.print("[bold red]Selected model does not support PDF attachments.[/]")
console.print(f"[dim yellow]Supported modalities: {', '.join(modalities) if modalities else 'text only'}[/]")
continue
b64_data = base64.b64encode(file_data).decode('utf-8')
content_blocks.append({"type": "image_url", "image_url": {"url": f"data:application/pdf;base64,{b64_data}"}})
console.print(f"[dim green]✓ PDF attached: {os.path.basename(expanded_path)} ({file_size / 1024:.1f} KB)[/]")
# Handle code/text files
elif (mime_type == 'text/plain' or file_ext in SUPPORTED_CODE_EXTENSIONS): elif (mime_type == 'text/plain' or file_ext in SUPPORTED_CODE_EXTENSIONS):
text_content = file_data.decode('utf-8') text_content = file_data.decode('utf-8')
content_blocks.append({"type": "text", "text": f"Code File: {os.path.basename(expanded_path)}\n\n{text_content}"}) content_blocks.append({"type": "text", "text": f"Code File: {os.path.basename(expanded_path)}\n\n{text_content}"})
console.print(f"[dim green]✓ Code file attached: {os.path.basename(expanded_path)} ({file_size / 1024:.1f} KB)[/]")
else: else:
console.print(f"[bold red]Unsupported file type ({mime_type}) for {expanded_path}. Supported types: images, plain text, and code files (.py, .js, etc.).[/]") console.print(f"[bold red]Unsupported file type ({mime_type}) for {expanded_path}.[/]")
console.print("[bold yellow]Supported types: images (PNG, JPG, etc.), PDFs, and code files (.py, .js, etc.)[/]")
continue continue
file_attachments.append(file_path) file_attachments.append(file_path)
app_logger.info(f"File attached: {os.path.basename(expanded_path)}, Type: {mime_type}, Size: {file_size} KB") app_logger.info(f"File attached: {os.path.basename(expanded_path)}, Type: {mime_type or file_ext}, Size: {file_size / 1024:.1f} KB")
except UnicodeDecodeError:
console.print(f"[bold red]Cannot decode {expanded_path} as UTF-8. File may be binary or use unsupported encoding.[/]")
app_logger.error(f"UTF-8 decode error for {expanded_path}")
continue
except Exception as e: except Exception as e:
console.print(f"[bold red]Error reading file {expanded_path}: {e}[/]") console.print(f"[bold red]Error reading file {expanded_path}: {e}[/]")
app_logger.error(f"File read error for {expanded_path}: {e}") app_logger.error(f"File read error for {expanded_path}: {e}")
@@ -1083,12 +1523,43 @@ def chat():
console.print("[bold red]Prompt cannot be empty.[/]") console.print("[bold red]Prompt cannot be empty.[/]")
continue continue
# Prepare API messages and params # Build API messages with conversation history if memory is enabled
api_messages = [{"role": "user", "content": message_content}] api_messages = []
# Add system prompt if set
if session_system_prompt: if session_system_prompt:
api_messages.insert(0, {"role": "system", "content": session_system_prompt}) api_messages.append({"role": "system", "content": session_system_prompt})
# Add conversation history only if memory is enabled (from memory start point onwards)
if conversation_memory_enabled:
# Only include history from when memory was last enabled
for i in range(memory_start_index, len(session_history)):
history_entry = session_history[i]
api_messages.append({
"role": "user",
"content": history_entry['prompt']
})
api_messages.append({
"role": "assistant",
"content": history_entry['response']
})
# Add current user message
api_messages.append({"role": "user", "content": message_content})
api_params = {"model": selected_model["id"], "messages": api_messages, "stream": STREAM_ENABLED == "on"} # Get effective model ID with :online suffix if enabled
effective_model_id = get_effective_model_id(selected_model["id"], online_mode_enabled)
# Build API params with app identification headers (using http_headers)
api_params = {
"model": effective_model_id,
"messages": api_messages,
"stream": STREAM_ENABLED == "on",
"http_headers": {
"HTTP-Referer": APP_URL,
"X-Title": APP_NAME
}
}
if session_max_token > 0: if session_max_token > 0:
api_params["max_tokens"] = session_max_token api_params["max_tokens"] = session_max_token
if middle_out_enabled: if middle_out_enabled:
@@ -1096,19 +1567,25 @@ def chat():
# Log API request # Log API request
file_count = len(file_attachments) file_count = len(file_attachments)
app_logger.info(f"API Request: Model '{selected_model['id']}', Prompt length: {len(text_part)} chars, {file_count} file(s) attached, Transforms: middle-out {'enabled' if middle_out_enabled else 'disabled'}.") history_messages_count = len(session_history) - memory_start_index if conversation_memory_enabled else 0
memory_status = "ON" if conversation_memory_enabled else "OFF"
online_status = "ON" if online_mode_enabled else "OFF"
app_logger.info(f"API Request: Model '{effective_model_id}' (Online: {online_status}), Prompt length: {len(text_part)} chars, {file_count} file(s) attached, Memory: {memory_status}, History sent: {history_messages_count} messages, Transforms: middle-out {'enabled' if middle_out_enabled else 'disabled'}, App: {APP_NAME} ({APP_URL}).")
# Send and handle response with metrics and timing # Send and handle response with metrics and timing
is_streaming = STREAM_ENABLED == "on" is_streaming = STREAM_ENABLED == "on"
if is_streaming: if is_streaming:
console.print("[bold green]Streaming response... (Press Ctrl+C to cancel)[/]") console.print("[bold green]Streaming response...[/] [dim](Press Ctrl+C to cancel)[/]")
if online_mode_enabled:
console.print("[dim cyan]🌐 Online mode active - model has web search access[/]")
console.print("") # Add spacing before response
else: else:
console.print("[bold green]Thinking...[/]", end="\r") console.print("[bold green]Thinking...[/]", end="\r")
start_time = time.time() # Start timing request start_time = time.time() # Start timing request
try: try:
response = client.chat.send(**api_params) response = client.chat.send(**api_params)
app_logger.info(f"API call successful for model '{selected_model['id']}'") app_logger.info(f"API call successful for model '{effective_model_id}'")
except Exception as e: except Exception as e:
console.print(f"[bold red]Error sending request: {e}[/]") console.print(f"[bold red]Error sending request: {e}[/]")
app_logger.error(f"API Error: {type(e).__name__}: {e}") app_logger.error(f"API Error: {type(e).__name__}: {e}")
@@ -1119,17 +1596,23 @@ def chat():
full_response = "" full_response = ""
if is_streaming: if is_streaming:
try: try:
for chunk in response: # Use Live display for smooth streaming with proper wrapping
if hasattr(chunk, 'error') and chunk.error: with Live("", console=console, refresh_per_second=10, auto_refresh=True) as live:
console.print(f"\n[bold red]Stream error: {chunk.error.message}[/]") for chunk in response:
app_logger.error(f"Stream error: {chunk.error.message}") if hasattr(chunk, 'error') and chunk.error:
break console.print(f"\n[bold red]Stream error: {chunk.error.message}[/]")
if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content: app_logger.error(f"Stream error: {chunk.error.message}")
content_chunk = chunk.choices[0].delta.content break
print(content_chunk, end="", flush=True) if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
full_response += content_chunk content_chunk = chunk.choices[0].delta.content
if full_response: full_response += content_chunk
console.print() # Update live display with markdown rendering
md = Markdown(full_response)
live.update(md)
# Add newline after streaming completes
console.print("")
except KeyboardInterrupt: except KeyboardInterrupt:
console.print("\n[bold yellow]Streaming cancelled![/]") console.print("\n[bold yellow]Streaming cancelled![/]")
app_logger.info("Streaming cancelled by user") app_logger.info("Streaming cancelled by user")
@@ -1139,7 +1622,11 @@ def chat():
console.print(f"\r{' ' * 20}\r", end="") console.print(f"\r{' ' * 20}\r", end="")
if full_response: if full_response:
console.print(Panel(full_response, title="[bold green]AI Response[/]", title_align="left")) # Render response with proper markdown formatting
if not is_streaming: # Only show panel for non-streaming (streaming already displayed)
md = Markdown(full_response)
console.print(Panel(md, title="[bold green]AI Response[/]", title_align="left", border_style="green"))
session_history.append({'prompt': user_input, 'response': full_response}) session_history.append({'prompt': user_input, 'response': full_response})
current_index = len(session_history) - 1 current_index = len(session_history) - 1
@@ -1155,10 +1642,17 @@ def chat():
message_count += 1 message_count += 1
# Log response metrics # Log response metrics
app_logger.info(f"[bold green]Response: Tokens - I:{input_tokens} O:{output_tokens} T:{input_tokens + output_tokens}, Cost: ${msg_cost:.4f}, Time: {response_time:.2f}s[/]") app_logger.info(f"Response: Tokens - I:{input_tokens} O:{output_tokens} T:{input_tokens + output_tokens}, Cost: ${msg_cost:.4f}, Time: {response_time:.2f}s, Online: {online_mode_enabled}")
# Per-message metrics display # Per-message metrics display with context info
console.print(f"[dim blue]Metrics: {input_tokens + output_tokens} tokens, ${msg_cost:.4f}, {response_time:.2f}s. Session total: {total_input_tokens + total_output_tokens} tokens, ${total_cost:.4f}[/]") if conversation_memory_enabled:
context_count = len(session_history) - memory_start_index
context_info = f", Context: {context_count} msg(s)" if context_count > 1 else ""
else:
context_info = ", Memory: OFF"
online_info = " 🌐" if online_mode_enabled else ""
console.print(f"\n[dim blue]📊 Metrics: {input_tokens + output_tokens} tokens | ${msg_cost:.4f} | {response_time:.2f}s{context_info}{online_info} | Session: {total_input_tokens + total_output_tokens} tokens | ${total_cost:.4f}[/]")
# Cost and credit alerts # Cost and credit alerts
warnings = [] warnings = []
@@ -1169,17 +1663,22 @@ def chat():
warning_alerts = check_credit_alerts(credits_data) warning_alerts = check_credit_alerts(credits_data)
warnings.extend(warning_alerts) warnings.extend(warning_alerts)
if warnings: if warnings:
warning_text = '|'.join(warnings) warning_text = ' | '.join(warnings)
console.print(f"[bold red]⚠️ {warning_text}[/]") console.print(f"[bold red]⚠️ {warning_text}[/]")
app_logger.warning(f"Warnings triggered: {warning_text}") app_logger.warning(f"Warnings triggered: {warning_text}")
# Add spacing before copy prompt
console.print("")
try: try:
copy_choice = input("Type 'c' to copy response, or press Enter: ").strip().lower() copy_choice = input("💾 Type 'c' to copy response, or press Enter to continue: ").strip().lower()
if copy_choice == "c": if copy_choice == "c":
pyperclip.copy(full_response) pyperclip.copy(full_response)
console.print("[bold green]Response copied![/]") console.print("[bold green]Response copied to clipboard![/]")
except (EOFError, KeyboardInterrupt): except (EOFError, KeyboardInterrupt):
pass pass
# Add spacing after interaction
console.print("")
else: else:
console.print("[bold red]No response received.[/]") console.print("[bold red]No response received.[/]")
app_logger.error("No response from API") app_logger.error("No response from API")