diff options
Diffstat (limited to 'bin')
-rwxr-xr-x | bin/gpt/gpt | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/bin/gpt/gpt b/bin/gpt/gpt index 7dc55dd..1c24d7e 100755 --- a/bin/gpt/gpt +++ b/bin/gpt/gpt | |||
@@ -44,12 +44,12 @@ Options: | |||
44 | -h, --help show this help message and exit | 44 | -h, --help show this help message and exit |
45 | 45 | ||
46 | -m|--model specify model, available: | 46 | -m|--model specify model, available: |
47 | gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, | 47 | gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, |
48 | gpt-3.5-turbo, gpt-3.5-turbo-0301 | 48 | gpt-3.5-turbo, gpt-3.5-turbo-0301 |
49 | (Defaults to gpt-3.5-turbo) | 49 | (Defaults to gpt-3.5-turbo) |
50 | 50 | ||
51 | -m4 Use model gpt-4 | 51 | -m4 Use model gpt-4 |
52 | 52 | ||
53 | -b|--behavior How model behave on response, for example: | 53 | -b|--behavior How model behave on response, for example: |
54 | "You are a helpful assistant." | 54 | "You are a helpful assistant." |
55 | (Defaults to "You are a helpful programming assistant") | 55 | (Defaults to "You are a helpful programming assistant") |
@@ -57,8 +57,8 @@ Options: | |||
57 | -B|--sys-message Use prompt to set system message (How model behave). | 57 | -B|--sys-message Use prompt to set system message (How model behave). |
58 | User can still modify it by special prompt '.c' | 58 | User can still modify it by special prompt '.c' |
59 | 59 | ||
60 | -t|--temperature Value between 0 and 2. Higher values like 0.8 will make the | 60 | -t|--temperature Value between 0 and 2. Higher values like 0.8 will make the |
61 | output more random, while lower values like 0.2 will make it | 61 | output more random, while lower values like 0.2 will make it |
62 | more focused and deterministic. | 62 | more focused and deterministic. |
63 | (Defaults to 0.7) | 63 | (Defaults to 0.7) |
64 | 64 | ||
@@ -69,9 +69,9 @@ Options: | |||
69 | 69 | ||
70 | -S|--stream Use stream mode. If this is set, token usage would not be shown | 70 | -S|--stream Use stream mode. If this is set, token usage would not be shown |
71 | 71 | ||
72 | -c|--context Number of messages in session. If it is set, API calls only | 72 | -c|--context Number of messages in session. If it is set, API calls only |
73 | contains limited previous chats. If it is 1, then GPT only get | 73 | contains limited previous chats. If it is 1, then GPT only get |
74 | your latest prompt input. (By default, API calls use the | 74 | your latest prompt input. (By default, API calls use the |
75 | whole previous chats, which is not friendly to token usage) | 75 | whole previous chats, which is not friendly to token usage) |
76 | 76 | ||
77 | -v|--verbose If set, print token usage after each completion. | 77 | -v|--verbose If set, print token usage after each completion. |
@@ -81,7 +81,7 @@ Options: | |||
81 | If STDIN is given, it would be append to the end of message. | 81 | If STDIN is given, it would be append to the end of message. |
82 | 82 | ||
83 | Special Input: | 83 | Special Input: |
84 | .c A special prompt of options shows up. User can dynamically modify | 84 | .c A special prompt of options shows up. User can dynamically modify |
85 | option values for API calls. | 85 | option values for API calls. |
86 | 86 | ||
87 | .r Rewind previous chats. This is useful when terminal is occupied by | 87 | .r Rewind previous chats. This is useful when terminal is occupied by |
@@ -98,7 +98,7 @@ EOF | |||
98 | 98 | ||
99 | # Message when user exit prompt | 99 | # Message when user exit prompt |
100 | _print_leave_message(){ | 100 | _print_leave_message(){ |
101 | echo -en "\n\nChat ends. " | 101 | echo -en "\n\nChat ends. " |
102 | if [ "$stream" = false ]; then | 102 | if [ "$stream" = false ]; then |
103 | tokens=$(jq '.usage? // empty | .total_tokens' "$cache" | paste -sd+ | bc) | 103 | tokens=$(jq '.usage? // empty | .total_tokens' "$cache" | paste -sd+ | bc) |
104 | echo -en "${tokens:-0} tokens used. " | 104 | echo -en "${tokens:-0} tokens used. " |
@@ -138,9 +138,9 @@ _configure_options() { | |||
138 | _print_previous_chats() { | 138 | _print_previous_chats() { |
139 | echo | 139 | echo |
140 | jq -r ' | 140 | jq -r ' |
141 | def colors: { | 141 | def colors: { |
142 | "white": "\u001b[37m", | 142 | "white": "\u001b[37m", |
143 | "yellow": "\u001b[33m", | 143 | "yellow": "\u001b[33m", |
144 | }; | 144 | }; |
145 | if .id|not then .messages[-1].content + "\n" else colors.yellow + .choices[0].message.content + colors.white + "\n" end | 145 | if .id|not then .messages[-1].content + "\n" else colors.yellow + .choices[0].message.content + colors.white + "\n" end |
146 | ' "$cache" | 146 | ' "$cache" |
@@ -308,7 +308,7 @@ stream=${stream:-false} | |||
308 | INDEX= | 308 | INDEX= |
309 | 309 | ||
310 | # Prepare for chat session | 310 | # Prepare for chat session |
311 | cache=$(mktemp -t gpt.XXXXXX -p ${CACHE_DIR}) && touch "$cache" | 311 | cache=$(mktemp -t gpt.XXXXXX -p ${CACHE_DIR}) && touch "$cache" |
312 | #trap "rm $cache" EXIT | 312 | #trap "rm $cache" EXIT |
313 | session=() | 313 | session=() |
314 | 314 | ||
@@ -369,7 +369,7 @@ EOF | |||
369 | # Append newest message into session | 369 | # Append newest message into session |
370 | assistant_message="$(cat <<EOF | 370 | assistant_message="$(cat <<EOF |
371 | { | 371 | { |
372 | "role": "assistant", | 372 | "role": "assistant", |
373 | "content": $(jq -sr '.[-1].choices[0].message.content' "$cache" | jq -sR .) | 373 | "content": $(jq -sr '.[-1].choices[0].message.content' "$cache" | jq -sR .) |
374 | } | 374 | } |
375 | EOF | 375 | EOF |