From 7594d8bbfa51e94a0fbfd5fb0649af3be0181ccc Mon Sep 17 00:00:00 2001 From: Hsieh Chin Fan Date: Thu, 23 Mar 2023 12:38:20 +0800 Subject: Big improvement --- bin/gpt/gpt | 89 +++++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 26 deletions(-) (limited to 'bin') diff --git a/bin/gpt/gpt b/bin/gpt/gpt index 4fbbc5b..63241ac 100755 --- a/bin/gpt/gpt +++ b/bin/gpt/gpt @@ -1,9 +1,12 @@ #! /bin/bash # TODO -# - Put previous response into tokens # - Use suggested block to wrap data: # https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api +# - Add signal trap for API parameters (like stop) + +# If script is interupt by SIGINT, simply print leave message +trap 'echo -e "\nChat Finished, cached file: $cache"; exit 0' INT # Function for printing helper message _print_helper_message() { @@ -86,36 +89,70 @@ model=${model:-gpt-3.5-turbo} behavior="${behavior:-You are a helpful programming assistant}" temperature=${temperature:-0.7} n=${n:-1} +cache=`mktemp` && touch $cache # && trap "rm $cache" EXIT +session=() +count=0 + +# Use while to keep chat session +while true; do + # Read prompt from terminal + # If content is not specified by argumment, read it from terminal + [[ -z "$content" || $count >=1 ]] && read -e -r -p "Let's Chat: " content >$cache + + # Add an empty line between prompt and response + echo -e '\n------\n' + + # API call + # Save original response into cache file + # And only print content of message + response="` + curl https://api.openai.com/$ROUTE \ + --silent \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d "$body" | \ + jq . | tee -a $cache | \ + jq -r .choices[0].message.content + `" + echo -e "${response}\n\n------\n" + + # Append newest message into session + assistant_message="$(cat <