diff options
author | Hsieh Chin Fan <pham@topo.tw> | 2023-03-23 12:38:20 +0800 |
---|---|---|
committer | Hsieh Chin Fan <pham@topo.tw> | 2023-03-23 12:38:20 +0800 |
commit | 7594d8bbfa51e94a0fbfd5fb0649af3be0181ccc (patch) | |
tree | f7de6ee352e6b73caa06e5afd78beca1a2db100e | |
parent | ca4f040ab9e46ce4e900a5e5a6dc57bb94c41393 (diff) |
Big improvement
-rwxr-xr-x | bin/gpt/gpt | 89 |
1 files changed, 63 insertions, 26 deletions
diff --git a/bin/gpt/gpt b/bin/gpt/gpt index 4fbbc5b..63241ac 100755 --- a/bin/gpt/gpt +++ b/bin/gpt/gpt | |||
@@ -1,9 +1,12 @@ | |||
1 | #! /bin/bash | 1 | #! /bin/bash |
2 | 2 | ||
3 | # TODO | 3 | # TODO |
4 | # - Put previous response into tokens | ||
5 | # - Use suggested block to wrap data: | 4 | # - Use suggested block to wrap data: |
6 | # https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api | 5 | # https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api |
6 | # - Add signal trap for API parameters (like stop) | ||
7 | |||
8 | # If script is interupt by SIGINT, simply print leave message | ||
9 | trap 'echo -e "\nChat Finished, cached file: $cache"; exit 0' INT | ||
7 | 10 | ||
8 | # Function for printing helper message | 11 | # Function for printing helper message |
9 | _print_helper_message() { | 12 | _print_helper_message() { |
@@ -86,36 +89,70 @@ model=${model:-gpt-3.5-turbo} | |||
86 | behavior="${behavior:-You are a helpful programming assistant}" | 89 | behavior="${behavior:-You are a helpful programming assistant}" |
87 | temperature=${temperature:-0.7} | 90 | temperature=${temperature:-0.7} |
88 | n=${n:-1} | 91 | n=${n:-1} |
92 | cache=`mktemp` && touch $cache # && trap "rm $cache" EXIT | ||
93 | session=() | ||
94 | count=0 | ||
95 | |||
96 | # Use while to keep chat session | ||
97 | while true; do | ||
98 | # Read prompt from terminal | ||
99 | # If content is not specified by argumment, read it from terminal | ||
100 | [[ -z "$content" || $count >=1 ]] && read -e -r -p "Let's Chat: " content </dev/tty | ||
101 | # If no user input, simple print helper message and exit with code 1 | ||
102 | [ -z "$content" ] && { echo -e "No message is given\n"; _print_helper_message; exit 1; } | ||
103 | # Read data from STDIN | ||
104 | [ ! -t 0 ] && data="$(cat)" | ||
105 | # Append data to the end of content | ||
106 | [ -n "$data" ] && content="$(printf "%s\\n\\n%s" "$content" "$data")" | ||
107 | |||
108 | # Put user message into session | ||
109 | user_message="$(cat <<EOF | ||
110 | {"role": "user", "content": $(<<<"$content" jq -sR .)} | ||
111 | EOF | ||
112 | )" | ||
113 | session+=("$user_message") | ||
89 | 114 | ||
90 | # Read prompt from terminal | 115 | # Create request body |
91 | [ -z "$content" ] && read -e -r -p "Let's Chat: " content </dev/tty | 116 | # Consider quotes, back slashes, use jq to ensure content texts are OK to put in JSON |
92 | [ -z "$content" ] && { echo -e "No message is given\n"; _print_helper_message; exit 1; } | 117 | body="$(cat <<EOF |
93 | [ ! -t 0 ] && data="$(cat)" | ||
94 | [ -n "$data" ] && content="$(printf "%s\\n\\n%s" "$content" "$data")" | ||
95 | |||
96 | # Create request body | ||
97 | # Consider quotes, back slashes, use jq to ensure content texts are OK to put in JSON | ||
98 | body="$(cat <<EOF | ||
99 | { | 118 | { |
100 | "model": "$model", | 119 | "model": "$model", |
101 | "messages": [ | 120 | "messages": [ |
102 | {"role": "system", "content": $(echo $behavior | jq -R .)}, | 121 | {"role": "system", "content": $(echo $behavior | jq -sR .)}, |
103 | {"role": "user", "content": $(echo $content | jq -R .)} | 122 | `IFS=',\n'; echo "${session[*]}"` |
104 | ], | 123 | ], |
105 | "temperature": $temperature, | 124 | "temperature": $temperature, |
106 | "n": $n | 125 | "n": $n, |
126 | "max_tokens": 50 | ||
107 | } | 127 | } |
108 | EOF | 128 | EOF |
109 | )" | 129 | )" |
110 | 130 | # Append request body into cache | |
111 | # Add an empty line between prompt and response | 131 | echo "$body" >>$cache |
112 | echo | 132 | |
113 | 133 | # Add an empty line between prompt and response | |
114 | # API call | 134 | echo -e '\n------\n' |
115 | curl https://api.openai.com/$ROUTE \ | 135 | |
116 | --silent \ | 136 | # API call |
117 | -H "Content-Type: application/json" \ | 137 | # Save original response into cache file |
118 | -H "Authorization: Bearer $OPENAI_API_KEY" \ | 138 | # And only print content of message |
119 | -d "$body" | \ | 139 | response="` |
120 | jq . | tee .gpt | \ | 140 | curl https://api.openai.com/$ROUTE \ |
121 | jq -r .choices[0].message.content | 141 | --silent \ |
142 | -H "Content-Type: application/json" \ | ||
143 | -H "Authorization: Bearer $OPENAI_API_KEY" \ | ||
144 | -d "$body" | \ | ||
145 | jq . | tee -a $cache | \ | ||
146 | jq -r .choices[0].message.content | ||
147 | `" | ||
148 | echo -e "${response}\n\n------\n" | ||
149 | |||
150 | # Append newest message into session | ||
151 | assistant_message="$(cat <<EOF | ||
152 | {"role": "assistant", "content": $(<<<"$response" jq -sR .)} | ||
153 | EOF | ||
154 | )" | ||
155 | session+=("$assistant_message") | ||
156 | |||
157 | (( count+=1 )) | ||
158 | done | ||