diff --git a/docs/docs/04-command-line-reference/gptscript.md b/docs/docs/04-command-line-reference/gptscript.md index 0c485603..6cd3feb6 100644 --- a/docs/docs/04-command-line-reference/gptscript.md +++ b/docs/docs/04-command-line-reference/gptscript.md @@ -12,37 +12,38 @@ gptscript [flags] PROGRAM_FILE [INPUT...] ### Options ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) - --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) - -h, --help help for gptscript - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) - --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) - --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) - --ui Launch the UI ($GPTSCRIPT_UI) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + --chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + --force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT) + --force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL) + -h, --help help for gptscript + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS) + --list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE) + --sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL) + --ui Launch the UI ($GPTSCRIPT_UI) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_eval.md b/docs/docs/04-command-line-reference/gptscript_eval.md index 0fdd0249..ff9e6446 100644 --- a/docs/docs/04-command-line-reference/gptscript_eval.md +++ b/docs/docs/04-command-line-reference/gptscript_eval.md @@ -25,27 +25,28 @@ gptscript eval [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_fmt.md b/docs/docs/04-command-line-reference/gptscript_fmt.md index 5780c838..7aceb957 100644 --- a/docs/docs/04-command-line-reference/gptscript_fmt.md +++ b/docs/docs/04-command-line-reference/gptscript_fmt.md @@ -19,27 +19,28 @@ gptscript fmt [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/docs/docs/04-command-line-reference/gptscript_parse.md b/docs/docs/04-command-line-reference/gptscript_parse.md index 680aebf6..3d84622b 100644 --- a/docs/docs/04-command-line-reference/gptscript_parse.md +++ b/docs/docs/04-command-line-reference/gptscript_parse.md @@ -19,27 +19,28 @@ gptscript parse [flags] ### Options inherited from parent commands ``` - --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) - -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) - --color Use color in output (default true) ($GPTSCRIPT_COLOR) - --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) - --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) - --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") - --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) - --debug Enable debug logging ($GPTSCRIPT_DEBUG) - --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) - --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") - --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) - --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) - --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) - -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) - --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) - --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) - --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) - --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) - -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) - -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) - --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) + --cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR) + -C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR) + --color Use color in output (default true) ($GPTSCRIPT_COLOR) + --config string Path to GPTScript config file ($GPTSCRIPT_CONFIG) + --confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM) + --credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default") + --credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE) + --debug Enable debug logging ($GPTSCRIPT_DEBUG) + --debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES) + --default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o") + --default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER) + --disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE) + --dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE) + --events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO) + -f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE) + --no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC) + --openai-api-key string OpenAI API KEY ($OPENAI_API_KEY) + --openai-base-url string OpenAI base URL ($OPENAI_BASE_URL) + --openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID) + -o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT) + -q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET) + --workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE) ``` ### SEE ALSO diff --git a/pkg/cli/gptscript.go b/pkg/cli/gptscript.go index c22a25d2..84d7de71 100644 --- a/pkg/cli/gptscript.go +++ b/pkg/cli/gptscript.go @@ -54,24 +54,25 @@ type GPTScript struct { Output string `usage:"Save output to a file, or - for stdout" short:"o"` EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"` // Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions - Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` - SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` - Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` - ListModels bool `usage:"List the models available and exit" local:"true"` - ListTools bool `usage:"List built-in tools and exit" local:"true"` - ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` - Chdir string `usage:"Change current working directory" short:"C"` - Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` - Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` - CredentialContext string `usage:"Context name in which to store credentials" default:"default"` - CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` - ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` - ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` - ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` - Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` - UI bool `usage:"Launch the UI" local:"true" name:"ui"` - DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` - SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"` + SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"` + Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"` + ListModels bool `usage:"List the models available and exit" local:"true"` + ListTools bool `usage:"List built-in tools and exit" local:"true"` + ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"` + Chdir string `usage:"Change current working directory" short:"C"` + Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"` + Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"` + CredentialContext string `usage:"Context name in which to store credentials" default:"default"` + CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"` + ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"` + ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"` + ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"` + Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"` + UI bool `usage:"Launch the UI" local:"true" name:"ui"` + DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"` + SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"` + DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"` readData []byte } @@ -136,11 +137,12 @@ func (r *GPTScript) NewGPTScriptOpts() (gptscript.Options, error) { CredentialOverrides: r.CredentialOverride, Sequential: r.ForceSequential, }, - Quiet: r.Quiet, - Env: os.Environ(), - CredentialContext: r.CredentialContext, - Workspace: r.Workspace, - DisablePromptServer: r.UI, + Quiet: r.Quiet, + Env: os.Environ(), + CredentialContext: r.CredentialContext, + Workspace: r.Workspace, + DisablePromptServer: r.UI, + DefaultModelProvider: r.DefaultModelProvider, } if r.Confirm { diff --git a/pkg/gptscript/gptscript.go b/pkg/gptscript/gptscript.go index 462ee8b5..43f429fc 100644 --- a/pkg/gptscript/gptscript.go +++ b/pkg/gptscript/gptscript.go @@ -40,15 +40,16 @@ type GPTScript struct { } type Options struct { - Cache cache.Options - OpenAI openai.Options - Monitor monitor.Options - Runner runner.Options - CredentialContext string - Quiet *bool - Workspace string - DisablePromptServer bool - Env []string + Cache cache.Options + OpenAI openai.Options + Monitor monitor.Options + Runner runner.Options + DefaultModelProvider string + CredentialContext string + Quiet *bool + Workspace string + DisablePromptServer bool + Env []string } func Complete(opts ...Options) Options { @@ -64,6 +65,7 @@ func Complete(opts ...Options) Options { result.Workspace = types.FirstSet(opt.Workspace, result.Workspace) result.Env = append(result.Env, opt.Env...) result.DisablePromptServer = types.FirstSet(opt.DisablePromptServer, result.DisablePromptServer) + result.DefaultModelProvider = types.FirstSet(opt.DefaultModelProvider, result.DefaultModelProvider) } if result.Quiet == nil { @@ -106,16 +108,18 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { return nil, err } - oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ - Cache: cacheClient, - SetSeed: true, - }) - if err != nil { - return nil, err - } + if opts.DefaultModelProvider == "" { + oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{ + Cache: cacheClient, + SetSeed: true, + }) + if err != nil { + return nil, err + } - if err := registry.AddClient(oaiClient); err != nil { - return nil, err + if err := registry.AddClient(oaiClient); err != nil { + return nil, err + } } if opts.Runner.MonitorFactory == nil { @@ -143,7 +147,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) { fullEnv := append(opts.Env, extraEnv...) - remoteClient := remote.New(runner, fullEnv, cacheClient, credStore) + remoteClient := remote.New(runner, fullEnv, cacheClient, credStore, opts.DefaultModelProvider) if err := registry.AddClient(remoteClient); err != nil { closeServer() return nil, err diff --git a/pkg/remote/remote.go b/pkg/remote/remote.go index 6cb3644e..8b9d2162 100644 --- a/pkg/remote/remote.go +++ b/pkg/remote/remote.go @@ -22,21 +22,23 @@ import ( ) type Client struct { - clientsLock sync.Mutex - cache *cache.Client - clients map[string]*openai.Client - models map[string]*openai.Client - runner *runner.Runner - envs []string - credStore credentials.CredentialStore + clientsLock sync.Mutex + cache *cache.Client + clients map[string]*openai.Client + models map[string]*openai.Client + runner *runner.Runner + envs []string + credStore credentials.CredentialStore + defaultProvider string } -func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credentials.CredentialStore) *Client { +func New(r *runner.Runner, envs []string, cache *cache.Client, credStore credentials.CredentialStore, defaultProvider string) *Client { return &Client{ - cache: cache, - runner: r, - envs: envs, - credStore: credStore, + cache: cache, + runner: r, + envs: envs, + credStore: credStore, + defaultProvider: defaultProvider, } } @@ -73,13 +75,23 @@ func (c *Client) ListModels(ctx context.Context, providers ...string) (result [] return } -func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { - toolName, modelNameSuffix := types.SplitToolRef(modelName) - if modelNameSuffix == "" { +func (c *Client) parseModel(modelString string) (modelName, providerName string) { + toolName, subTool := types.SplitToolRef(modelString) + if subTool == "" { + // This is just a plain model string "gpt4o" + return toolName, c.defaultProvider + } + // This is a provider string "modelName from provider" + return subTool, toolName +} + +func (c *Client) Supports(ctx context.Context, modelString string) (bool, error) { + _, providerName := c.parseModel(modelString) + if providerName == "" { return false, nil } - client, err := c.load(ctx, toolName) + client, err := c.load(ctx, providerName) if err != nil { return false, err } @@ -91,7 +103,7 @@ func (c *Client) Supports(ctx context.Context, modelName string) (bool, error) { c.models = map[string]*openai.Client{} } - c.models[modelName] = client + c.models[modelString] = client return true, nil } diff --git a/pkg/sdkserver/routes.go b/pkg/sdkserver/routes.go index e0977c9e..4bed2b37 100644 --- a/pkg/sdkserver/routes.go +++ b/pkg/sdkserver/routes.go @@ -204,6 +204,7 @@ func (s *server) execHandler(w http.ResponseWriter, r *http.Request) { CredentialOverrides: reqObject.CredentialOverrides, Sequential: reqObject.ForceSequential, }, + DefaultModelProvider: reqObject.DefaultModelProvider, } if reqObject.Confirm { diff --git a/pkg/sdkserver/types.go b/pkg/sdkserver/types.go index 06119c35..a07cfb9e 100644 --- a/pkg/sdkserver/types.go +++ b/pkg/sdkserver/types.go @@ -52,17 +52,18 @@ type toolOrFileRequest struct { cacheOptions `json:",inline"` openAIOptions `json:",inline"` - ToolDefs toolDefs `json:"toolDefs,inline"` - SubTool string `json:"subTool"` - Input string `json:"input"` - ChatState string `json:"chatState"` - Workspace string `json:"workspace"` - Env []string `json:"env"` - CredentialContext string `json:"credentialContext"` - CredentialOverrides []string `json:"credentialOverrides"` - Confirm bool `json:"confirm"` - Location string `json:"location,omitempty"` - ForceSequential bool `json:"forceSequential"` + ToolDefs toolDefs `json:"toolDefs,inline"` + SubTool string `json:"subTool"` + Input string `json:"input"` + ChatState string `json:"chatState"` + Workspace string `json:"workspace"` + Env []string `json:"env"` + CredentialContext string `json:"credentialContext"` + CredentialOverrides []string `json:"credentialOverrides"` + Confirm bool `json:"confirm"` + Location string `json:"location,omitempty"` + ForceSequential bool `json:"forceSequential"` + DefaultModelProvider string `json:"defaultModelProvider,omitempty"` } type content struct {