mirror of https://github.com/ashikslab/smartsh.git
Compare commits
No commits in common. "f8d599bfa20f258b61f4a44d2a67e856b2b2fb67" and "a81d2293991af30567e1f27d30fa6d752e7b1580" have entirely different histories.
f8d599bfa2
...
a81d229399
|
@ -46,7 +46,3 @@ You can set the following additional environment variables which smartsh can mak
|
|||
If the variable is not set, we use "gpt-3.5-turbo" by default
|
||||
|
||||
`SMARTSH_DEBUG` : Will print additional debug info when running smartsh
|
||||
|
||||
`SMARTSH_SILENT_MODE` When set to 1, smartsh will try not to print warnings about missing environment variables, current mode etc. Off by default.
|
||||
|
||||
`SMARTSH_TEACHER_MODE` When set to 1, smartsh will provide you an explanation about the command it synthesized. Note that this will disable the prompt to execute the synthesized command. Off by default.
|
||||
|
|
38
smartsh.py
38
smartsh.py
|
@ -10,23 +10,13 @@ if openai.api_key is None:
|
|||
sys.exit(1)
|
||||
|
||||
api_model = os.environ.get("OPENAI_MODEL_ID")
|
||||
smarsh_dont_warn = os.environ.get("SMARTSH_SILENT_MODE") == "1"
|
||||
if api_model is None:
|
||||
api_model = "gpt-3.5-turbo"
|
||||
if smarsh_dont_warn != True:
|
||||
print("Warning: OPENAI_MODEL_ID not set. Supported models are text-davinci-003, gpt-3.5-turbo")
|
||||
print("Using default model " + api_model)
|
||||
|
||||
smarsh_debug_mode = os.environ.get("SMARTSH_DEBUG")
|
||||
|
||||
is_in_teacher_mode = False
|
||||
smartsh_teacher_mode = os.environ.get("SMARTSH_TEACHER_MODE")
|
||||
if smartsh_teacher_mode == "1" or smartsh_teacher_mode == "true":
|
||||
if smarsh_dont_warn != True:
|
||||
print("Teacher mode enabled")
|
||||
is_in_teacher_mode = True
|
||||
|
||||
|
||||
if smarsh_debug_mode == "1" or smarsh_debug_mode == "true":
|
||||
print("Debug mode enabled")
|
||||
print("OpenAI API key: " + openai.api_key)
|
||||
|
@ -34,15 +24,10 @@ if smarsh_debug_mode == "1" or smarsh_debug_mode == "true":
|
|||
|
||||
# argcmd contains the entire command line arguments as a space separated string
|
||||
argcmd = " ".join(sys.argv)
|
||||
prompttxt = ""
|
||||
if is_in_teacher_mode:
|
||||
prompttxt = "You suggest a valid shell command to accomplish the following, together with an explanation: " + argcmd
|
||||
else:
|
||||
prompttxt = "You suggest a valid and correct {os.environ.get('SHELL')} command to accomplish the following, without any further explanation or additional text: " + argcmd
|
||||
|
||||
prompttxt = "Suggest a linux shell command to accomplish the following: " + argcmd
|
||||
completion = None
|
||||
apioutput = None
|
||||
if api_model == "text-davinci-003":
|
||||
if smarsh_dont_warn != True:
|
||||
print("Using model " + api_model)
|
||||
# Get the completion from OpenAI
|
||||
completion = openai.Completion.create(
|
||||
|
@ -54,27 +39,16 @@ if api_model == "text-davinci-003":
|
|||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
)
|
||||
apioutput = completion['choices'][0]['text'].strip()
|
||||
# print the output from davinci model to stdout
|
||||
print(completion['choices'][0]['text'].strip())
|
||||
elif api_model == "gpt-3.5-turbo":
|
||||
completion = openai.ChatCompletion.create(
|
||||
model = api_model,
|
||||
messages = [
|
||||
{'role': 'system', 'content': prompttxt}
|
||||
{'role': 'user', 'content': prompttxt}
|
||||
],
|
||||
temperature = 0
|
||||
)
|
||||
# print the response to stdout
|
||||
apioutput = completion['choices'][0]['message']['content'].strip()
|
||||
print(completion['choices'][0]['message']['content'])
|
||||
|
||||
# Ask the user if the suggested command shall be executed
|
||||
if is_in_teacher_mode == False and apioutput is not None:
|
||||
print("Suggested command: " + apioutput)
|
||||
print("Do you want to execute this command? (y/n)")
|
||||
user_input = input()
|
||||
if user_input == 'y':
|
||||
print("Executing command: " + apioutput)
|
||||
os.system(apioutput)
|
||||
else:
|
||||
print("Command not executed")
|
||||
else:
|
||||
print(apioutput)
|
||||
|
|
Loading…
Reference in New Issue