From 1ef791829107b2d0783ee44946561c21ccab18fe Mon Sep 17 00:00:00 2001 From: Rune Olsen Date: Tue, 30 Dec 2025 15:46:40 +0100 Subject: [PATCH] 1.9.6 (#1) New functionality and bugfixes. Reviewed-on: https://gitlab.pm/rune/oai/pulls/1 Co-authored-by: Rune Olsen Co-committed-by: Rune Olsen --- README.md | 2 +- oai.py | 486 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 454 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 4275db3..0c91bd6 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ oAI is a command-line chat application that provides an interactive interface to - Python 3.7 or higher - OpenRouter API key (get one at https://openrouter.ai) -## Screenshot +## Screenshot (from version 1.0) [](https://gitlab.pm/rune/oai/src/branch/main/README.md) diff --git a/oai.py b/oai.py index 9bf2cc4..26b92ef 100644 --- a/oai.py +++ b/oai.py @@ -30,7 +30,7 @@ from packaging import version as pkg_version import io # Added for custom handler # App version. Changes by author with new releases. -version = '1.9.5' +version = '1.9.6' app = typer.Typer() @@ -60,6 +60,218 @@ VALID_COMMANDS = { '/info', '/model', '/maxtoken', '/system', '/config', '/credits', '/clear', '/cl', '/help' } +# Detailed command help database +COMMAND_HELP = { + '/clear': { + 'aliases': ['/cl'], + 'description': 'Clear the terminal screen for a clean interface.', + 'usage': '/clear\n/cl', + 'examples': [ + ('Clear screen', '/clear'), + ('Using short alias', '/cl'), + ], + 'notes': 'You can also use the keyboard shortcut Ctrl+L to clear the screen.' + }, + '/help': { + 'description': 'Display help information for commands.', + 'usage': '/help [command]', + 'examples': [ + ('Show all commands', '/help'), + ('Get help for a specific command', '/help /model'), + ('Get help for config', '/help /config'), + ], + 'notes': 'Use /help without arguments to see the full command list, or /help for detailed information about a specific command.' + }, + '/memory': { + 'description': 'Toggle conversation memory. When ON, the AI remembers conversation history. When OFF, each request is independent (saves tokens and cost).', + 'usage': '/memory [on|off]', + 'examples': [ + ('Check current memory status', '/memory'), + ('Enable conversation memory', '/memory on'), + ('Disable memory (save costs)', '/memory off'), + ], + 'notes': 'Memory is ON by default. Disabling memory reduces API costs but the AI won\'t remember previous messages. Messages are still saved locally for your reference.' + }, + '/online': { + 'description': 'Enable or disable online mode (web search capabilities) for the current session.', + 'usage': '/online [on|off]', + 'examples': [ + ('Check online mode status', '/online'), + ('Enable web search', '/online on'), + ('Disable web search', '/online off'), + ], + 'notes': 'Not all models support online mode. The model must have "tools" parameter support. This setting overrides the default online mode configured with /config online.' + }, + '/paste': { + 'description': 'Paste plain text or code from clipboard and send to the AI. Optionally add a prompt.', + 'usage': '/paste [prompt]', + 'examples': [ + ('Paste clipboard content', '/paste'), + ('Paste with a question', '/paste Explain this code'), + ('Paste and ask for review', '/paste Review this for bugs'), + ], + 'notes': 'Only plain text is supported. Binary clipboard data will be rejected. The clipboard content is shown as a preview before sending.' + }, + '/retry': { + 'description': 'Resend the last prompt from conversation history.', + 'usage': '/retry', + 'examples': [ + ('Retry last message', '/retry'), + ], + 'notes': 'Useful when you get an error or want a different response to the same prompt. Requires at least one message in history.' + }, + '/next': { + 'description': 'View the next response in conversation history.', + 'usage': '/next', + 'examples': [ + ('Navigate to next response', '/next'), + ], + 'notes': 'Navigate through conversation history. Use /prev to go backward.' + }, + '/prev': { + 'description': 'View the previous response in conversation history.', + 'usage': '/prev', + 'examples': [ + ('Navigate to previous response', '/prev'), + ], + 'notes': 'Navigate through conversation history. Use /next to go forward.' + }, + '/reset': { + 'description': 'Clear conversation history and reset system prompt. This resets all session metrics.', + 'usage': '/reset', + 'examples': [ + ('Reset conversation', '/reset'), + ], + 'notes': 'Requires confirmation. This clears all message history, resets the system prompt, and resets token/cost counters. Use when starting a completely new conversation topic.' + }, + '/info': { + 'description': 'Display detailed information about a model including pricing, capabilities, context length, and online support.', + 'usage': '/info [model_id]', + 'examples': [ + ('Show current model info', '/info'), + ('Show specific model info', '/info gpt-4o'), + ('Check model capabilities', '/info claude-3-opus'), + ], + 'notes': 'Without arguments, shows info for the currently selected model. Displays pricing per million tokens, supported modalities (text, image, etc.), and parameter support.' + }, + '/model': { + 'description': 'Select or change the AI model for the current session. Shows image and online capabilities.', + 'usage': '/model [search_term]', + 'examples': [ + ('List all models', '/model'), + ('Search for GPT models', '/model gpt'), + ('Search for Claude models', '/model claude'), + ], + 'notes': 'Models are numbered for easy selection. The table shows Image (✓ if model accepts images) and Online (✓ if model supports web search) columns.' + }, + '/config': { + 'description': 'View or modify application configuration settings.', + 'usage': '/config [setting] [value]', + 'examples': [ + ('View all settings', '/config'), + ('Set API key', '/config api'), + ('Set default model', '/config model'), + ('Enable streaming', '/config stream on'), + ('Set cost warning threshold', '/config costwarning 0.05'), + ('Set log level', '/config loglevel debug'), + ('Set default online mode', '/config online on'), + ], + 'notes': 'Available settings: api (API key), url (base URL), model (default model), stream (on/off), costwarning (threshold $), maxtoken (limit), online (default on/off), log (size MB), loglevel (debug/info/warning/error/critical).' + }, + '/maxtoken': { + 'description': 'Set a temporary session token limit (cannot exceed stored max token limit).', + 'usage': '/maxtoken [value]', + 'examples': [ + ('View current session limit', '/maxtoken'), + ('Set session limit to 2000', '/maxtoken 2000'), + ('Set to 50000', '/maxtoken 50000'), + ], + 'notes': 'This is a session-only setting and cannot exceed the stored max token limit (set with /config maxtoken). View without arguments to see current value.' + }, + '/system': { + 'description': 'Set or clear the session-level system prompt to guide AI behavior.', + 'usage': '/system [prompt|clear]', + 'examples': [ + ('View current system prompt', '/system'), + ('Set as Python expert', '/system You are a Python expert'), + ('Set as code reviewer', '/system You are a senior code reviewer. Focus on bugs and best practices.'), + ('Clear system prompt', '/system clear'), + ], + 'notes': 'System prompts influence how the AI responds throughout the session. Use "clear" to remove the current system prompt.' + }, + '/save': { + 'description': 'Save the current conversation history to the database.', + 'usage': '/save ', + 'examples': [ + ('Save conversation', '/save my_chat'), + ('Save with descriptive name', '/save python_debugging_2024'), + ], + 'notes': 'Saved conversations can be loaded later with /load. Use descriptive names to easily find them later.' + }, + '/load': { + 'description': 'Load a saved conversation from the database by name or number.', + 'usage': '/load ', + 'examples': [ + ('Load by name', '/load my_chat'), + ('Load by number from /list', '/load 3'), + ], + 'notes': 'Use /list to see numbered conversations. Loading a conversation replaces current history and resets session metrics.' + }, + '/delete': { + 'description': 'Delete a saved conversation from the database. Requires confirmation.', + 'usage': '/delete ', + 'examples': [ + ('Delete by name', '/delete my_chat'), + ('Delete by number from /list', '/delete 3'), + ], + 'notes': 'Use /list to see numbered conversations. This action cannot be undone!' + }, + '/list': { + 'description': 'List all saved conversations with numbers, message counts, and timestamps.', + 'usage': '/list', + 'examples': [ + ('Show saved conversations', '/list'), + ], + 'notes': 'Conversations are numbered for easy use with /load and /delete commands. Shows message count and last saved time.' + }, + '/export': { + 'description': 'Export the current conversation to a file in various formats.', + 'usage': '/export ', + 'examples': [ + ('Export as Markdown', '/export md notes.md'), + ('Export as JSON', '/export json conversation.json'), + ('Export as HTML', '/export html report.html'), + ], + 'notes': 'Available formats: md (Markdown), json (JSON), html (HTML). The export includes all messages and the system prompt if set.' + }, + '/stats': { + 'description': 'Display session statistics including tokens, costs, and credits.', + 'usage': '/stats', + 'examples': [ + ('View session statistics', '/stats'), + ], + 'notes': 'Shows total input/output tokens, total cost, average cost per message, and remaining credits. Also displays credit warnings if applicable.' + }, + '/credits': { + 'description': 'Display your OpenRouter account credits and usage.', + 'usage': '/credits', + 'examples': [ + ('Check credits', '/credits'), + ], + 'notes': 'Shows total credits, used credits, and credits left. Displays warnings if credits are low (< $1 or < 10% of total).' + }, + '/middleout': { + 'description': 'Enable or disable middle-out transform to compress prompts exceeding context size.', + 'usage': '/middleout [on|off]', + 'examples': [ + ('Check status', '/middleout'), + ('Enable compression', '/middleout on'), + ('Disable compression', '/middleout off'), + ], + 'notes': 'Middle-out transform intelligently compresses long prompts to fit within model context limits. Useful for very long conversations.' + }, +} + # Supported code file extensions SUPPORTED_CODE_EXTENSIONS = { '.py', '.js', '.ts', '.cs', '.java', '.c', '.cpp', '.h', '.hpp', @@ -77,6 +289,15 @@ LOW_CREDIT_RATIO = 0.1 # Warn if credits left < 10% of total LOW_CREDIT_AMOUNT = 1.0 # Warn if credits left < $1 in absolute terms HIGH_COST_WARNING = "cost_warning_threshold" # Configurable key for cost threshold, default $0.01 +# Valid log levels mapping +VALID_LOG_LEVELS = { + 'debug': logging.DEBUG, + 'info': logging.INFO, + 'warning': logging.WARNING, + 'error': logging.ERROR, + 'critical': logging.CRITICAL +} + # DB configuration database = config_dir / 'oai_config.db' DB_FILE = str(database) @@ -148,27 +369,131 @@ class RotatingRichHandler(RotatingFileHandler): except Exception: self.handleError(record) -# Get log configuration from DB +# ============================================================================ +# LOGGING SETUP - MUST BE DONE AFTER CONFIG IS LOADED +# ============================================================================ + +# Load log configuration from DB FIRST (before creating handler) LOG_MAX_SIZE_MB = int(get_config('log_max_size_mb') or "10") LOG_BACKUP_COUNT = int(get_config('log_backup_count') or "2") +LOG_LEVEL_STR = get_config('log_level') or "info" +LOG_LEVEL = VALID_LOG_LEVELS.get(LOG_LEVEL_STR.lower(), logging.INFO) -# Create the custom rotating handler -app_handler = RotatingRichHandler( - filename=str(log_file), - maxBytes=LOG_MAX_SIZE_MB * 1024 * 1024, # Convert MB to bytes - backupCount=LOG_BACKUP_COUNT, - encoding='utf-8' -) +# Global reference to the handler for dynamic reloading +app_handler = None +app_logger = None -logging.basicConfig( - level=logging.NOTSET, - format="%(message)s", # Rich formats it - datefmt="[%X]", - handlers=[app_handler] -) +def setup_logging(): + """Setup or reset logging configuration with current settings.""" + global app_handler, LOG_MAX_SIZE_MB, LOG_BACKUP_COUNT, LOG_LEVEL, app_logger + + # Get the root logger + root_logger = logging.getLogger() + + # Remove existing handler if present + if app_handler is not None: + root_logger.removeHandler(app_handler) + try: + app_handler.close() + except: + pass + + # Check if log file needs immediate rotation + if os.path.exists(log_file): + current_size = os.path.getsize(log_file) + max_bytes = LOG_MAX_SIZE_MB * 1024 * 1024 + + if current_size >= max_bytes: + # Perform immediate rotation + import shutil + timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + backup_file = f"{log_file}.{timestamp}" + try: + shutil.move(str(log_file), backup_file) + except Exception as e: + print(f"Warning: Could not rotate log file: {e}") + + # Clean up old backups if exceeding limit + log_dir = os.path.dirname(log_file) + log_basename = os.path.basename(log_file) + backup_pattern = f"{log_basename}.*" + + import glob + backups = sorted(glob.glob(os.path.join(log_dir, backup_pattern))) + + # Keep only the most recent backups + while len(backups) > LOG_BACKUP_COUNT: + oldest = backups.pop(0) + try: + os.remove(oldest) + except: + pass + + # Create new handler with current settings + app_handler = RotatingRichHandler( + filename=str(log_file), + maxBytes=LOG_MAX_SIZE_MB * 1024 * 1024, + backupCount=LOG_BACKUP_COUNT, + encoding='utf-8' + ) + + # Set handler level to NOTSET so it processes all records + app_handler.setLevel(logging.NOTSET) + + # Configure root logger - set to WARNING to suppress third-party library noise + root_logger.setLevel(logging.WARNING) + root_logger.addHandler(app_handler) + + # Suppress noisy third-party loggers + # These libraries create DEBUG logs that pollute our log file + logging.getLogger('asyncio').setLevel(logging.WARNING) + logging.getLogger('urllib3').setLevel(logging.WARNING) + logging.getLogger('requests').setLevel(logging.WARNING) + logging.getLogger('httpx').setLevel(logging.WARNING) + logging.getLogger('httpcore').setLevel(logging.WARNING) + logging.getLogger('openai').setLevel(logging.WARNING) + logging.getLogger('openrouter').setLevel(logging.WARNING) + + # Get or create app logger and set its level (this filters what gets logged) + app_logger = logging.getLogger("oai_app") + app_logger.setLevel(LOG_LEVEL) + # Don't propagate to avoid root logger filtering + app_logger.propagate = True + + return app_logger -app_logger = logging.getLogger("oai_app") -app_logger.setLevel(logging.INFO) +# Initial logging setup +app_logger = setup_logging() + +def set_log_level(level_str: str) -> bool: + """Set the application log level. Returns True if successful.""" + global LOG_LEVEL, LOG_LEVEL_STR, app_logger + level_str_lower = level_str.lower() + if level_str_lower not in VALID_LOG_LEVELS: + return False + LOG_LEVEL = VALID_LOG_LEVELS[level_str_lower] + LOG_LEVEL_STR = level_str_lower + + # Update the logger level immediately + if app_logger: + app_logger.setLevel(LOG_LEVEL) + + return True + +def reload_logging_config(): + """Reload logging configuration from database and reinitialize handler.""" + global LOG_MAX_SIZE_MB, LOG_BACKUP_COUNT, LOG_LEVEL, LOG_LEVEL_STR, app_logger + + # Reload from database + LOG_MAX_SIZE_MB = int(get_config('log_max_size_mb') or "10") + LOG_BACKUP_COUNT = int(get_config('log_backup_count') or "2") + LOG_LEVEL_STR = get_config('log_level') or "info" + LOG_LEVEL = VALID_LOG_LEVELS.get(LOG_LEVEL_STR.lower(), logging.INFO) + + # Reinitialize logging + app_logger = setup_logging() + + return app_logger # ============================================================================ # END OF LOGGING SETUP @@ -408,7 +733,64 @@ def export_as_html(session_history: List[Dict[str, str]], session_system_prompt: return "\n".join(html_parts) -# Load configs +def show_command_help(command: str): + """Display detailed help for a specific command.""" + # Normalize command to ensure it starts with / + if not command.startswith('/'): + command = '/' + command + + # Check if command exists + if command not in COMMAND_HELP: + console.print(f"[bold red]Unknown command: {command}[/]") + console.print("[bold yellow]Type /help to see all available commands.[/]") + app_logger.warning(f"Help requested for unknown command: {command}") + return + + help_data = COMMAND_HELP[command] + + # Create detailed help panel + help_content = [] + + # Aliases if available + if 'aliases' in help_data: + aliases_str = ", ".join(help_data['aliases']) + help_content.append(f"[bold cyan]Aliases:[/] {aliases_str}") + help_content.append("") + + # Description + help_content.append(f"[bold cyan]Description:[/]") + help_content.append(help_data['description']) + help_content.append("") + + # Usage + help_content.append(f"[bold cyan]Usage:[/]") + help_content.append(f"[yellow]{help_data['usage']}[/]") + help_content.append("") + + # Examples + if 'examples' in help_data and help_data['examples']: + help_content.append(f"[bold cyan]Examples:[/]") + for desc, example in help_data['examples']: + help_content.append(f" [dim]{desc}:[/]") + help_content.append(f" [green]{example}[/]") + help_content.append("") + + # Notes + if 'notes' in help_data: + help_content.append(f"[bold cyan]Notes:[/]") + help_content.append(f"[dim]{help_data['notes']}[/]") + + console.print(Panel( + "\n".join(help_content), + title=f"[bold green]Help: {command}[/]", + title_align="left", + border_style="green", + width=console.width - 4 + )) + + app_logger.info(f"Displayed detailed help for command: {command}") + +# Load configs (AFTER logging is set up) API_KEY = get_config('api_key') OPENROUTER_BASE_URL = get_config('base_url') or "https://openrouter.ai/api/v1" STREAM_ENABLED = get_config('stream_enabled') or "on" @@ -618,7 +1000,7 @@ def display_paginated_table(table: Table, title: str): @app.command() def chat(): - global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN, COST_WARNING_THRESHOLD, DEFAULT_ONLINE_MODE, LOG_MAX_SIZE_MB, LOG_BACKUP_COUNT + global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN, COST_WARNING_THRESHOLD, DEFAULT_ONLINE_MODE, LOG_MAX_SIZE_MB, LOG_BACKUP_COUNT, LOG_LEVEL, LOG_LEVEL_STR, app_logger session_max_token = 0 session_system_prompt = "" session_history = [] @@ -1028,9 +1410,13 @@ def chat(): console.print("[bold yellow]Usage: /middleout on|off (or /middleout to view status)[/]") continue elif user_input.lower() == "/reset": - confirm = typer.confirm("Reset conversation context? This clears history and prompt.", default=False) - if not confirm: - console.print("[bold yellow]Reset cancelled.[/]") + try: + confirm = typer.confirm("Reset conversation context? This clears history and prompt.", default=False) + if not confirm: + console.print("[bold yellow]Reset cancelled.[/]") + continue + except (EOFError, KeyboardInterrupt): + console.print("\n[bold yellow]Reset cancelled.[/]") continue session_history = [] current_index = -1 @@ -1213,11 +1599,28 @@ def chat(): console.print(f"[bold green]Streaming {'enabled' if sub_args == 'on' else 'disabled'}.[/]") else: console.print("[bold yellow]Usage: /config stream on|off[/]") + elif args.startswith("loglevel"): + sub_args = args[8:].strip() + if not sub_args: + console.print(f"[bold blue]Current log level: {LOG_LEVEL_STR.upper()}[/]") + console.print(f"[dim yellow]Valid levels: debug, info, warning, error, critical[/]") + continue + if sub_args.lower() in VALID_LOG_LEVELS: + if set_log_level(sub_args): + set_config('log_level', sub_args.lower()) + console.print(f"[bold green]Log level set to: {sub_args.upper()}[/]") + app_logger.info(f"Log level changed to {sub_args.upper()}") + else: + console.print(f"[bold red]Failed to set log level.[/]") + else: + console.print(f"[bold red]Invalid log level: {sub_args}[/]") + console.print(f"[bold yellow]Valid levels: debug, info, warning, error, critical[/]") elif args.startswith("log"): sub_args = args[4:].strip() if not sub_args: console.print(f"[bold blue]Current log file size limit: {LOG_MAX_SIZE_MB} MB[/]") console.print(f"[bold blue]Log backup count: {LOG_BACKUP_COUNT} files[/]") + console.print(f"[bold blue]Log level: {LOG_LEVEL_STR.upper()}[/]") console.print(f"[dim yellow]Total max disk usage: ~{LOG_MAX_SIZE_MB * (LOG_BACKUP_COUNT + 1)} MB[/]") continue try: @@ -1230,9 +1633,13 @@ def chat(): new_size_mb = 100 set_config('log_max_size_mb', str(new_size_mb)) LOG_MAX_SIZE_MB = new_size_mb - console.print(f"[bold green]Log size limit set to {new_size_mb} MB.[/]") - console.print("[bold yellow]⚠️ Restart the application for this change to take effect.[/]") - app_logger.info(f"Log size limit updated to {new_size_mb} MB (requires restart)") + + # Reload logging configuration immediately + app_logger = reload_logging_config() + + console.print(f"[bold green]Log size limit set to {new_size_mb} MB and applied immediately.[/]") + console.print(f"[dim cyan]Log file rotated if it exceeded the new limit.[/]") + app_logger.info(f"Log size limit updated to {new_size_mb} MB and reloaded") except ValueError: console.print("[bold red]Invalid size. Provide a number in MB.[/]") elif args.startswith("online"): @@ -1315,6 +1722,7 @@ def chat(): table.add_row("Logfile", str(log_file) or "[Not set]") table.add_row("Log Size Limit", f"{LOG_MAX_SIZE_MB} MB") table.add_row("Log Backups", str(LOG_BACKUP_COUNT)) + table.add_row("Log Level", LOG_LEVEL_STR.upper()) table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled") table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]") table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"])) @@ -1366,7 +1774,15 @@ def chat(): console.print("[bold cyan]Online mode: Enabled (web search active)[/]") continue - if user_input.lower() == "/help": + if user_input.lower().startswith("/help"): + args = user_input[6:].strip() + + # If a specific command is requested + if args: + show_command_help(args) + continue + + # Otherwise show the full help menu help_table = Table("Command", "Description", "Example", show_header=True, header_style="bold cyan", width=console.width - 10) # SESSION COMMANDS @@ -1381,9 +1797,9 @@ def chat(): "/clear\n/cl" ) help_table.add_row( - "/help", - "Show this help menu with all available commands.", - "/help" + "/help [command]", + "Show this help menu or get detailed help for a specific command.", + "/help\n/help /model" ) help_table.add_row( "/memory [on|off]", @@ -1461,9 +1877,14 @@ def chat(): ) help_table.add_row( "/config log [size_mb]", - "Set log file size limit in MB. Older logs are rotated automatically. Requires restart.", + "Set log file size limit in MB. Older logs are rotated automatically. Takes effect immediately.", "/config log 20" ) + help_table.add_row( + "/config loglevel [level]", + "Set log verbosity level. Valid levels: debug, info, warning, error, critical. Takes effect immediately.", + "/config loglevel debug\n/config loglevel warning" + ) help_table.add_row( "/config maxtoken [value]", "Set stored max token limit (persisted in DB). View current if no value provided.", @@ -1599,7 +2020,7 @@ def chat(): help_table, title="[bold cyan]oAI Chat Help (Version %s)[/]" % version, title_align="center", - subtitle="💡 Tip: Commands are case-insensitive • Memory ON by default (toggle with /memory) • Use // to escape / at start of input • Visit: https://iurl.no/oai", + subtitle="💡 Tip: Commands are case-insensitive • Use /help for detailed help • Memory ON by default • Use // to escape / • Visit: https://iurl.no/oai", subtitle_align="center", border_style="cyan" )) @@ -1608,8 +2029,7 @@ def chat(): if not selected_model: console.print("[bold yellow]Select a model first with '/model'.[/]") continue - - # Process file attachments with PDF support + # Process file attachments with PDF support content_blocks = [] text_part = user_input file_attachments = []