diff options
| -rwxr-xr-x | bin/gpt/gpt | 52 |
1 files changed, 39 insertions, 13 deletions
diff --git a/bin/gpt/gpt b/bin/gpt/gpt index 6d34075..c3941a3 100755 --- a/bin/gpt/gpt +++ b/bin/gpt/gpt | |||
| @@ -7,10 +7,10 @@ | |||
| 7 | # - History for prompt | 7 | # - History for prompt |
| 8 | 8 | ||
| 9 | # Necessary commands | 9 | # Necessary commands |
| 10 | stacks=( curl jq ) | 10 | stacks=( curl jq sed ) |
| 11 | 11 | ||
| 12 | # User can dynamically change these options for API call | 12 | # User can dynamically change these options for API call |
| 13 | configurable_options=( behavior temperature max_tokens ) | 13 | configurable_options=( model behavior temperature max_tokens stream ) |
| 14 | 14 | ||
| 15 | # If script is interupt by SIGINT, simply print leave message | 15 | # If script is interupt by SIGINT, simply print leave message |
| 16 | trap _print_leave_message INT | 16 | trap _print_leave_message INT |
| @@ -58,6 +58,8 @@ Options: | |||
| 58 | 58 | ||
| 59 | -s|--skip Skip message, STDIN would be treated as your message | 59 | -s|--skip Skip message, STDIN would be treated as your message |
| 60 | 60 | ||
| 61 | -S|--stream Use stream mode. If this is set, token usage would not be shown | ||
| 62 | |||
| 61 | * The other arguments would be treated as message content. | 63 | * The other arguments would be treated as message content. |
| 62 | If no message is specified, user should input content by hands. | 64 | If no message is specified, user should input content by hands. |
| 63 | If STDIN is given, it would be append to the end of message. | 65 | If STDIN is given, it would be append to the end of message. |
| @@ -70,9 +72,14 @@ Reference: https://platform.openai.com/docs/api-reference/completions | |||
| 70 | EOF | 72 | EOF |
| 71 | } | 73 | } |
| 72 | 74 | ||
| 75 | # Message when user exit prompt | ||
| 73 | _print_leave_message(){ | 76 | _print_leave_message(){ |
| 74 | tokens=$(jq '.usage? // empty | .total_tokens' "$cache" | paste -sd+ | bc) | 77 | echo -en "\nChat ends. " |
| 75 | echo -e "\nChat ends, ${tokens:-0} tokens used. Session is cached in: $cache" | 78 | if [ "$stream" = false ]; then |
| 79 | tokens=$(jq '.usage? // empty | .total_tokens' "$cache" | paste -sd+ | bc) | ||
| 80 | echo -en "${tokens:-0} tokens used. " | ||
| 81 | fi | ||
| 82 | echo Session is cached in: "$cache" | ||
| 76 | exit 0 | 83 | exit 0 |
| 77 | } | 84 | } |
| 78 | 85 | ||
| @@ -133,6 +140,25 @@ _get_content() { | |||
| 133 | fi | 140 | fi |
| 134 | } | 141 | } |
| 135 | 142 | ||
| 143 | _API_call() { | ||
| 144 | curl https://api.openai.com/$ROUTE \ | ||
| 145 | --silent \ | ||
| 146 | -H "Content-Type: application/json" \ | ||
| 147 | -H "Authorization: Bearer $OPENAI_API_KEY" \ | ||
| 148 | -d "$body" | ||
| 149 | } | ||
| 150 | |||
| 151 | _process_completion() { | ||
| 152 | if [ "$stream" = true ]; then | ||
| 153 | sed -E 's/data: (\[DONE\])?//g;/^$/d' | \ | ||
| 154 | jq . | tee -a "$cache" | \ | ||
| 155 | jq -jr '.choices?[0]?.delta?.content? // empty' | ||
| 156 | else | ||
| 157 | jq . | tee -a "$cache" | \ | ||
| 158 | jq -r .choices[0].message.content | ||
| 159 | fi | ||
| 160 | } | ||
| 161 | |||
| 136 | # Check OPENAI API KEY in env | 162 | # Check OPENAI API KEY in env |
| 137 | # Exit with 6 (configuration issue) if it is not set | 163 | # Exit with 6 (configuration issue) if it is not set |
| 138 | [ -z "$OPENAI_API_KEY" ] && which token &>/dev/null && OPENAI_API_KEY=$(token openai) | 164 | [ -z "$OPENAI_API_KEY" ] && which token &>/dev/null && OPENAI_API_KEY=$(token openai) |
| @@ -165,6 +191,10 @@ while [ "$#" -gt 0 ]; do | |||
| 165 | SKIP_INPUT=true | 191 | SKIP_INPUT=true |
| 166 | shift 1 | 192 | shift 1 |
| 167 | ;; | 193 | ;; |
| 194 | -S|--stream) | ||
| 195 | stream=true | ||
| 196 | shift 1 | ||
| 197 | ;; | ||
| 168 | -h|--help) | 198 | -h|--help) |
| 169 | _print_helper_message | 199 | _print_helper_message |
| 170 | exit 0 | 200 | exit 0 |
| @@ -186,6 +216,7 @@ model=${model:-gpt-3.5-turbo} | |||
| 186 | behavior="${behavior:-You are a helpful programming assistant}" | 216 | behavior="${behavior:-You are a helpful programming assistant}" |
| 187 | temperature=${temperature:-0.7} | 217 | temperature=${temperature:-0.7} |
| 188 | max_tokens=${max_tokens:-1024} | 218 | max_tokens=${max_tokens:-1024} |
| 219 | stream=${stream:-false} | ||
| 189 | 220 | ||
| 190 | # Prepare for chat session | 221 | # Prepare for chat session |
| 191 | cache=$(mktemp) && touch "$cache" | 222 | cache=$(mktemp) && touch "$cache" |
| @@ -217,7 +248,8 @@ EOF | |||
| 217 | $(IFS=','; echo "${session[*]}") | 248 | $(IFS=','; echo "${session[*]}") |
| 218 | ], | 249 | ], |
| 219 | "temperature": $temperature, | 250 | "temperature": $temperature, |
| 220 | "max_tokens": $max_tokens | 251 | "max_tokens": $max_tokens, |
| 252 | "stream": $stream | ||
| 221 | } | 253 | } |
| 222 | EOF | 254 | EOF |
| 223 | )" | 255 | )" |
| @@ -230,14 +262,8 @@ EOF | |||
| 230 | # API call | 262 | # API call |
| 231 | # Save original response into cache file | 263 | # Save original response into cache file |
| 232 | # And only print content of message | 264 | # And only print content of message |
| 233 | curl https://api.openai.com/$ROUTE \ | 265 | _API_call | _process_completion |
| 234 | --silent \ | 266 | echo -e "\n\n======\n" |
| 235 | -H "Content-Type: application/json" \ | ||
| 236 | -H "Authorization: Bearer $OPENAI_API_KEY" \ | ||
| 237 | -d "$body" | \ | ||
| 238 | jq . | tee -a "$cache" | \ | ||
| 239 | jq -r .choices[0].message.content | ||
| 240 | echo -e "${response}\n\n======\n" | ||
| 241 | 267 | ||
| 242 | # Append newest message into session | 268 | # Append newest message into session |
| 243 | assistant_message="$(cat <<EOF | 269 | assistant_message="$(cat <<EOF |