Added possibility to change MAX_TOKEN and also possible to set a session token that overrides MAX_TOKEN. None of them can exceed 1000000.
This commit is contained in:
103
oai.py
103
oai.py
@@ -14,7 +14,7 @@ import pyperclip
|
|||||||
import mimetypes
|
import mimetypes
|
||||||
import base64
|
import base64
|
||||||
import re
|
import re
|
||||||
import sqlite3 # Added for SQLite DB integration
|
import sqlite3
|
||||||
from prompt_toolkit import PromptSession
|
from prompt_toolkit import PromptSession
|
||||||
|
|
||||||
app = typer.Typer()
|
app = typer.Typer()
|
||||||
@@ -57,6 +57,7 @@ API_KEY = get_config('api_key')
|
|||||||
OPENROUTER_BASE_URL = get_config('base_url') or "https://openrouter.ai/api/v1" # Default if not set
|
OPENROUTER_BASE_URL = get_config('base_url') or "https://openrouter.ai/api/v1" # Default if not set
|
||||||
STREAM_ENABLED = get_config('stream_enabled') or "on" # Default to streaming on
|
STREAM_ENABLED = get_config('stream_enabled') or "on" # Default to streaming on
|
||||||
DEFAULT_MODEL_ID = get_config('default_model') # Load default model ID from DB
|
DEFAULT_MODEL_ID = get_config('default_model') # Load default model ID from DB
|
||||||
|
MAX_TOKEN = int(get_config('max_token') or "100000") # Load max token limit, default to 100k
|
||||||
|
|
||||||
# Fetch models once at module level (with loaded BASE_URL)
|
# Fetch models once at module level (with loaded BASE_URL)
|
||||||
models_data = []
|
models_data = []
|
||||||
@@ -122,8 +123,8 @@ def clear_screen():
|
|||||||
@app.command()
|
@app.command()
|
||||||
def chat():
|
def chat():
|
||||||
"""Start the oAI chat app with OpenRouter models."""
|
"""Start the oAI chat app with OpenRouter models."""
|
||||||
global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED # Allow updates via /config commands
|
global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN # Declare globals for updates
|
||||||
|
session_max_token = 0
|
||||||
if not API_KEY:
|
if not API_KEY:
|
||||||
console.print("[bold red]API key not found in database. Please set it with '/config api'.[/]")
|
console.print("[bold red]API key not found in database. Please set it with '/config api'.[/]")
|
||||||
# Prompt for API key on startup if missing
|
# Prompt for API key on startup if missing
|
||||||
@@ -145,7 +146,6 @@ def chat():
|
|||||||
|
|
||||||
# Initialize selected_model with default if available, else None (session-specific changes via /model)
|
# Initialize selected_model with default if available, else None (session-specific changes via /model)
|
||||||
selected_model = selected_model_default # Set from DB load, or None
|
selected_model = selected_model_default # Set from DB load, or None
|
||||||
|
|
||||||
client = OpenRouter(api_key=API_KEY)
|
client = OpenRouter(api_key=API_KEY)
|
||||||
if selected_model:
|
if selected_model:
|
||||||
console.print("[bold blue]Welcome to oAI! Type your message, use '/help' for examples, or 'exit'/'quit'/'bye' to end.[/] \n[bold red]Active model : %s[/]" %(selected_model["name"]))
|
console.print("[bold blue]Welcome to oAI! Type your message, use '/help' for examples, or 'exit'/'quit'/'bye' to end.[/] \n[bold red]Active model : %s[/]" %(selected_model["name"]))
|
||||||
@@ -196,6 +196,26 @@ def chat():
|
|||||||
console.print("[bold red]Invalid input. Enter a number.[/]")
|
console.print("[bold red]Invalid input. Enter a number.[/]")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Handle /maxtoken command (session limit)
|
||||||
|
if user_input.startswith("/maxtoken"):
|
||||||
|
args = user_input[10:].strip()
|
||||||
|
if not args:
|
||||||
|
console.print(f"[bold blue]Current session max tokens: {session_max_token}[/]")
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
new_limit = int(args)
|
||||||
|
if new_limit < 1:
|
||||||
|
console.print("[bold red]Session token limit must be at least 1.[/]")
|
||||||
|
continue
|
||||||
|
if new_limit > MAX_TOKEN:
|
||||||
|
console.print(f"[bold yellow]Warning: Cannot exceed stored max token limit ({MAX_TOKEN}). Capping to {MAX_TOKEN}.[/]")
|
||||||
|
new_limit = MAX_TOKEN
|
||||||
|
session_max_token = new_limit
|
||||||
|
console.print(f"[bold green]Session max tokens set to: {session_max_token}[/]")
|
||||||
|
except ValueError:
|
||||||
|
console.print("[bold red]Invalid token limit. Provide a positive integer.[/]")
|
||||||
|
continue
|
||||||
|
|
||||||
# Handle /config command
|
# Handle /config command
|
||||||
if user_input.startswith("/config"):
|
if user_input.startswith("/config"):
|
||||||
args = user_input[8:].strip().lower() # Get args after "/config"
|
args = user_input[8:].strip().lower() # Get args after "/config"
|
||||||
@@ -230,7 +250,30 @@ def chat():
|
|||||||
console.print(f"[bold green]Streaming {'enabled' if sub_args == 'on' else 'disabled'}.[/]")
|
console.print(f"[bold green]Streaming {'enabled' if sub_args == 'on' else 'disabled'}.[/]")
|
||||||
else:
|
else:
|
||||||
console.print("[bold yellow]Usage: /config stream on|off[/]")
|
console.print("[bold yellow]Usage: /config stream on|off[/]")
|
||||||
# **UPDATED:** Handle /config model for setting default only
|
# Handle /config maxtoken (global already declared at top)
|
||||||
|
elif args.startswith("maxtoken"):
|
||||||
|
sub_args = args[9:].strip()
|
||||||
|
if not sub_args:
|
||||||
|
console.print(f"[bold blue]Stored max token limit: {MAX_TOKEN}[/]")
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
new_max = int(sub_args)
|
||||||
|
if new_max < 1:
|
||||||
|
console.print("[bold red]Max token limit must be at least 1.[/]")
|
||||||
|
continue
|
||||||
|
if new_max > 1000000: # Reasonable upper cap to prevent typos
|
||||||
|
console.print("[bold yellow]Warning: Max token limit capped at 1M for safety.[/]")
|
||||||
|
new_max = 1000000
|
||||||
|
set_config('max_token', str(new_max))
|
||||||
|
MAX_TOKEN = new_max
|
||||||
|
console.print(f"[bold green]Stored max token limit updated to: {MAX_TOKEN}[/]")
|
||||||
|
# Adjust session if needed
|
||||||
|
if session_max_token > MAX_TOKEN:
|
||||||
|
session_max_token = MAX_TOKEN
|
||||||
|
console.print(f"[bold yellow]Session token limit adjusted to match: {session_max_token}[/]")
|
||||||
|
except ValueError:
|
||||||
|
console.print("[bold red]Invalid token limit. Provide a positive integer.[/]")
|
||||||
|
# Handle /config model (unchanged)
|
||||||
elif args.startswith("model"):
|
elif args.startswith("model"):
|
||||||
sub_args = args[6:].strip() # After "model" (optional search term)
|
sub_args = args[6:].strip() # After "model" (optional search term)
|
||||||
search_term = sub_args if sub_args else ""
|
search_term = sub_args if sub_args else ""
|
||||||
@@ -262,7 +305,7 @@ def chat():
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
console.print("[bold red]Invalid input. Enter a number.[/]")
|
console.print("[bold red]Invalid input. Enter a number.[/]")
|
||||||
else:
|
else:
|
||||||
# /config with no args: Display current configs.
|
# Display all configs
|
||||||
DEFAULT_MODEL_ID = get_config('default_model') # Load default model ID from DB
|
DEFAULT_MODEL_ID = get_config('default_model') # Load default model ID from DB
|
||||||
table = Table("Setting", "Value", show_header=True, header_style="bold magenta")
|
table = Table("Setting", "Value", show_header=True, header_style="bold magenta")
|
||||||
table.add_row("API Key", API_KEY or "[Not set]")
|
table.add_row("API Key", API_KEY or "[Not set]")
|
||||||
@@ -270,6 +313,8 @@ def chat():
|
|||||||
table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled")
|
table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled")
|
||||||
table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]")
|
table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]")
|
||||||
table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"]))
|
table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"]))
|
||||||
|
table.add_row("Max Token", str(MAX_TOKEN))
|
||||||
|
table.add_row("Session Token", "[Not set]" if session_max_token == 0 else str(session_max_token))
|
||||||
|
|
||||||
# Fetch and display credit info
|
# Fetch and display credit info
|
||||||
credits = get_credits(API_KEY, OPENROUTER_BASE_URL)
|
credits = get_credits(API_KEY, OPENROUTER_BASE_URL)
|
||||||
@@ -285,6 +330,7 @@ def chat():
|
|||||||
console.print(Panel(table, title="[bold green]Current Configurations[/]", title_align="left"))
|
console.print(Panel(table, title="[bold green]Current Configurations[/]", title_align="left"))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
# **Handle /credits command to display credits left**
|
# **Handle /credits command to display credits left**
|
||||||
if user_input.lower() == "/credits":
|
if user_input.lower() == "/credits":
|
||||||
credits = get_credits(API_KEY, OPENROUTER_BASE_URL)
|
credits = get_credits(API_KEY, OPENROUTER_BASE_URL)
|
||||||
@@ -296,8 +342,10 @@ def chat():
|
|||||||
|
|
||||||
# Handle /clear command to clear the screen
|
# Handle /clear command to clear the screen
|
||||||
if user_input.lower() == "/clear":
|
if user_input.lower() == "/clear":
|
||||||
clear_screen()
|
os.system("clear" if os.name == "posix" else "cls") # Cross-platform clear
|
||||||
DEFAULT_MODEL_ID = get_config('default_model')
|
DEFAULT_MODEL_ID = get_config('default_model')
|
||||||
|
token_value = session_max_token if session_max_token != 0 else " Not set"
|
||||||
|
console.print(f"[bold cyan]Token limits: Max= {MAX_TOKEN}, Session={token_value}[/]")
|
||||||
console.print("[bold blue]Active model[/] [bold red]%s[/]" %(DEFAULT_MODEL_ID))
|
console.print("[bold blue]Active model[/] [bold red]%s[/]" %(DEFAULT_MODEL_ID))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -309,30 +357,40 @@ def chat():
|
|||||||
"Select or change the current model for the session. Supports searching by name or ID (not persisted).",
|
"Select or change the current model for the session. Supports searching by name or ID (not persisted).",
|
||||||
"/model gpt\nYou: 1\n(Selects first matching model for this chat only)"
|
"/model gpt\nYou: 1\n(Selects first matching model for this chat only)"
|
||||||
)
|
)
|
||||||
|
help_table.add_row(
|
||||||
|
"/maxtoken [value]",
|
||||||
|
"Set temporary session token limit (≤ stored max). View limit if no value provided.",
|
||||||
|
"/maxtoken 2000\n[bold green]Session max tokens set to: 2000[/bold green]"
|
||||||
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/config model [search]",
|
"/config model [search]",
|
||||||
"Set a default model that loads on startup; displays models for selection (persisted in DB, doesn't change current session model).",
|
"Set a default model that loads on startup; displays models for selection (persisted in DB, doesn't change current session model).",
|
||||||
"/config model gpt\n(Shows GPT models; user selects to set as default)"
|
"/config model gpt\n(Shows GPT models; user selects to set as default)"
|
||||||
)
|
)
|
||||||
|
help_table.add_row(
|
||||||
|
"/config maxtoken [value]",
|
||||||
|
"Set stored max token limit (persisted in DB).",
|
||||||
|
"/config maxtoken 50000\n[bold green]Stored max token limit updated to: 50000[/bold green]"
|
||||||
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/config api",
|
"/config api",
|
||||||
"Set or update the OpenRouter API key.",
|
"Set or update the OpenRouter API key.",
|
||||||
"/config api\nEnter new API key: sk-...\n[bold green]API key updated and client reinitialized![/bold green]"
|
"/config api\nEnter new API key: sk-...\n[bold green]API key updated and client reinitialized![/]"
|
||||||
)
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/config url",
|
"/config url",
|
||||||
"Set or update the base URL for OpenRouter.",
|
"Set or update the base URL for OpenRouter.",
|
||||||
"/config url\nEnter new base URL: https://api.example.com/v1\n[bold green]Base URL updated![/bold green]"
|
"/config url\nEnter new base URL: https://api.example.com/v1\n[bold green]Base URL updated![/]"
|
||||||
)
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/config stream on/off",
|
"/config stream on/off",
|
||||||
"Enable or disable response streaming.",
|
"Enable or disable response streaming.",
|
||||||
"/config stream off\n[bold green]Streaming disabled.[/bold green]"
|
"/config stream off\n[bold green]Streaming disabled.[/]"
|
||||||
)
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/config",
|
"/config",
|
||||||
"View all current configurations, including credits.",
|
"View all current configurations, including credits and token limits.",
|
||||||
"/config\n(Displays table with credits info)"
|
"/config\n(Displays table with token limits & credits)"
|
||||||
)
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/credits",
|
"/credits",
|
||||||
@@ -342,7 +400,7 @@ def chat():
|
|||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/clear",
|
"/clear",
|
||||||
"Clear the terminal screen for a clean interface.",
|
"Clear the terminal screen for a clean interface.",
|
||||||
"/clear\n[bold cyan]Screen cleared. Ready for your next input![/bold cyan]"
|
"/clear\n[bold cyan]Screen cleared. Ready for your next input![/]"
|
||||||
)
|
)
|
||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"/help",
|
"/help",
|
||||||
@@ -357,7 +415,7 @@ def chat():
|
|||||||
help_table.add_row(
|
help_table.add_row(
|
||||||
"Exiting",
|
"Exiting",
|
||||||
"Quit the chat app with either 'exit', 'quit' or 'bye'",
|
"Quit the chat app with either 'exit', 'quit' or 'bye'",
|
||||||
"exit\n[bold yellow]Goodbye![/bold yellow]"
|
"exit\n[bold yellow]Goodbye![/]"
|
||||||
)
|
)
|
||||||
console.print(Panel(help_table, title="[bold cyan]oAI (Version %s) Chat Help - Command Examples[/]" %(version), title_align="center", subtitle="oAI can be found at https://iurl.no/oai", subtitle_align="center"))
|
console.print(Panel(help_table, title="[bold cyan]oAI (Version %s) Chat Help - Command Examples[/]" %(version), title_align="center", subtitle="oAI can be found at https://iurl.no/oai", subtitle_align="center"))
|
||||||
continue
|
continue
|
||||||
@@ -372,7 +430,7 @@ def chat():
|
|||||||
file_attachments = []
|
file_attachments = []
|
||||||
|
|
||||||
# Regex to find @path (e.g., @/Users/user/file.jpg or @c:\folder\file.txt)
|
# Regex to find @path (e.g., @/Users/user/file.jpg or @c:\folder\file.txt)
|
||||||
file_pattern = r'@([^\s]+)' # @ followed by non spaces
|
file_pattern = r'@([^\s]+)' # @ followed by non-spaces
|
||||||
for match in re.finditer(file_pattern, user_input):
|
for match in re.finditer(file_pattern, user_input):
|
||||||
file_path = match.group(1)
|
file_path = match.group(1)
|
||||||
expanded_path = os.path.expanduser(os.path.abspath(file_path))
|
expanded_path = os.path.expanduser(os.path.abspath(file_path))
|
||||||
@@ -416,6 +474,15 @@ def chat():
|
|||||||
console.print("[bold red]Prompt cannot be empty.[/]")
|
console.print("[bold red]Prompt cannot be empty.[/]")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Prepare API call params, including max_tokens if session limit is set
|
||||||
|
api_params = {
|
||||||
|
"model": selected_model["id"],
|
||||||
|
"messages": [{"role": "user", "content": message_content}],
|
||||||
|
"stream": STREAM_ENABLED == "on"
|
||||||
|
}
|
||||||
|
if session_max_token > 0:
|
||||||
|
api_params["max_tokens"] = session_max_token
|
||||||
|
|
||||||
# Send to model with streaming (enable/disable based on config)
|
# Send to model with streaming (enable/disable based on config)
|
||||||
is_streaming = STREAM_ENABLED == "on"
|
is_streaming = STREAM_ENABLED == "on"
|
||||||
if is_streaming:
|
if is_streaming:
|
||||||
@@ -424,11 +491,7 @@ def chat():
|
|||||||
console.print("[bold green]Thinking...[/]", end="\r")
|
console.print("[bold green]Thinking...[/]", end="\r")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = client.chat.send(
|
response = client.chat.send(**api_params)
|
||||||
model=selected_model["id"],
|
|
||||||
messages=[{"role": "user", "content": message_content}],
|
|
||||||
stream=is_streaming # Enable/disable streaming
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
console.print(f"[bold red]Error sending request: {e}[/]")
|
console.print(f"[bold red]Error sending request: {e}[/]")
|
||||||
continue
|
continue
|
||||||
|
|||||||
40
test.txt
Normal file
40
test.txt
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
[H[JWelcome to oAI! Type your message, use '/help' for examples, or 'exit'/'quit'/'bye' to end.
|
||||||
|
Active model : Perplexity: Sonar Reasoning Pro
|
||||||
|
You>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
You>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Input interrupted. Continuing...
|
||||||
|
You>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
You>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Input interrupted. Continuing...
|
||||||
|
You>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
You>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Input interrupted. Continuing...
|
||||||
|
You>
|
||||||
|
|
||||||
Reference in New Issue
Block a user