smartsh/smartsh.py

55 lines
1.7 KiB
Python
Raw Normal View History

2023-04-09 15:43:18 +00:00
import openai
import os
import sys
#Get the OpenAI API key from environment variable
openai.api_key = os.environ.get("OPENAI_API_KEY")
2023-04-10 06:20:38 +00:00
# check if the API key is set
if openai.api_key is None:
print("Please set the OPENAI_API_KEY environment variable")
sys.exit(1)
api_model = os.environ.get("OPENAI_MODEL_ID")
if api_model is None:
api_model = "gpt-3.5-turbo"
print("Warning: OPENAI_MODEL_ID not set. Supported models are text-davinci-003, gpt-3.5-turbo")
print("Using default model " + api_model)
smarsh_debug_mode = os.environ.get("SMARTSH_DEBUG")
if smarsh_debug_mode == "1" or smarsh_debug_mode == "true":
print("Debug mode enabled")
print("OpenAI API key: " + openai.api_key)
print("OpenAI model: " + api_model)
2023-04-09 15:43:18 +00:00
# argcmd contains the entire command line arguments as a space separated string
argcmd = " ".join(sys.argv)
2023-04-10 06:20:38 +00:00
prompttxt = "Suggest a linux shell command to accomplish the following: " + argcmd
completion = None
if api_model == "text-davinci-003":
print("Using model " + api_model)
# Get the completion from OpenAI
completion = openai.Completion.create(
model="text-davinci-003",
prompt=prompttxt,
temperature=0,
max_tokens=50,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
# print the output from davinci model to stdout
print(completion['choices'][0]['text'].strip())
elif api_model == "gpt-3.5-turbo":
completion = openai.ChatCompletion.create(
model = api_model,
2023-04-09 15:43:18 +00:00
messages = [
2023-04-10 06:20:38 +00:00
{'role': 'user', 'content': prompttxt}
2023-04-09 15:43:18 +00:00
],
temperature = 0
2023-04-10 06:20:38 +00:00
)
# print the response to stdout
print(completion['choices'][0]['message']['content'])
2023-04-09 15:43:18 +00:00