minor inprovements and provision to execute the command

This commit is contained in:
Ashik K 2023-04-10 10:34:10 +02:00
parent a81d229399
commit 7e93eb08c4
1 changed files with 19 additions and 6 deletions

View File

@ -10,8 +10,10 @@ if openai.api_key is None:
sys.exit(1) sys.exit(1)
api_model = os.environ.get("OPENAI_MODEL_ID") api_model = os.environ.get("OPENAI_MODEL_ID")
smarsh_dont_warn = os.environ.get("SMARTSH_DONT_WARN")
if api_model is None: if api_model is None:
api_model = "gpt-3.5-turbo" api_model = "gpt-3.5-turbo"
if smarsh_dont_warn is None:
print("Warning: OPENAI_MODEL_ID not set. Supported models are text-davinci-003, gpt-3.5-turbo") print("Warning: OPENAI_MODEL_ID not set. Supported models are text-davinci-003, gpt-3.5-turbo")
print("Using default model " + api_model) print("Using default model " + api_model)
@ -25,8 +27,9 @@ if smarsh_debug_mode == "1" or smarsh_debug_mode == "true":
# argcmd contains the entire command line arguments as a space separated string # argcmd contains the entire command line arguments as a space separated string
argcmd = " ".join(sys.argv) argcmd = " ".join(sys.argv)
prompttxt = "Suggest a linux shell command to accomplish the following: " + argcmd prompttxt = "You suggest a valid and correct {os.environ.get('SHELL')} command to accomplish the following, without any further explanation or additional text: " + argcmd
completion = None completion = None
shellcmd = None
if api_model == "text-davinci-003": if api_model == "text-davinci-003":
print("Using model " + api_model) print("Using model " + api_model)
# Get the completion from OpenAI # Get the completion from OpenAI
@ -40,15 +43,25 @@ if api_model == "text-davinci-003":
presence_penalty=0, presence_penalty=0,
) )
# print the output from davinci model to stdout # print the output from davinci model to stdout
print(completion['choices'][0]['text'].strip()) shellcmd = completion['choices'][0]['text'].strip()
elif api_model == "gpt-3.5-turbo": elif api_model == "gpt-3.5-turbo":
completion = openai.ChatCompletion.create( completion = openai.ChatCompletion.create(
model = api_model, model = api_model,
messages = [ messages = [
{'role': 'user', 'content': prompttxt} {'role': 'system', 'content': prompttxt}
], ],
temperature = 0 temperature = 0
) )
# print the response to stdout # print the response to stdout
print(completion['choices'][0]['message']['content']) shellcmd = completion['choices'][0]['message']['content'].strip()
# Ask the user if the suggested command shall be executed
if shellcmd is not None:
print("Suggested command: " + shellcmd)
print("Do you want to execute this command? (y/n)")
user_input = input()
if user_input == 'y':
print("Executing command: " + shellcmd)
os.system(shellcmd)
else:
print("Command not executed")