diff --git a/.pnp.cjs b/.pnp.cjs
index 8a4d600..0bd297d 100755
--- a/.pnp.cjs
+++ b/.pnp.cjs
@@ -67,7 +67,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["mime-db", "npm:1.52.0"],\
["mime-type", "npm:4.0.0"],\
["next", "virtual:122228a85041d9011382076e3461d3732767a4d968795f3a5cb35471da2a790895a6d3bc882a29d504f73e089535a8a2db9819540334928ded52ebc8fb1c60ce#npm:12.3.4"],\
- ["openai", "npm:3.1.0"],\
+ ["openai", "npm:3.2.1"],\
["orbit-db", "npm:0.28.7"],\
["postcss", "npm:8.4.20"],\
["rc-tabs", "virtual:122228a85041d9011382076e3461d3732767a4d968795f3a5cb35471da2a790895a6d3bc882a29d504f73e089535a8a2db9819540334928ded52ebc8fb1c60ce#npm:12.5.5"],\
@@ -22346,10 +22346,10 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
}]\
]],\
["openai", [\
- ["npm:3.1.0", {\
- "packageLocation": "./.yarn/cache/openai-npm-3.1.0-64264fb99a-2277d9e2b4.zip/node_modules/openai/",\
+ ["npm:3.2.1", {\
+ "packageLocation": "./.yarn/cache/openai-npm-3.2.1-8bb9cccf51-ef3942e9b5.zip/node_modules/openai/",\
"packageDependencies": [\
- ["openai", "npm:3.1.0"],\
+ ["openai", "npm:3.2.1"],\
["axios", "npm:0.26.1"],\
["form-data", "npm:4.0.0"]\
],\
@@ -28916,7 +28916,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) {
["mime-db", "npm:1.52.0"],\
["mime-type", "npm:4.0.0"],\
["next", "virtual:122228a85041d9011382076e3461d3732767a4d968795f3a5cb35471da2a790895a6d3bc882a29d504f73e089535a8a2db9819540334928ded52ebc8fb1c60ce#npm:12.3.4"],\
- ["openai", "npm:3.1.0"],\
+ ["openai", "npm:3.2.1"],\
["orbit-db", "npm:0.28.7"],\
["postcss", "npm:8.4.20"],\
["rc-tabs", "virtual:122228a85041d9011382076e3461d3732767a4d968795f3a5cb35471da2a790895a6d3bc882a29d504f73e089535a8a2db9819540334928ded52ebc8fb1c60ce#npm:12.5.5"],\
diff --git a/README.md b/README.md
index d428c7b..3e63acb 100644
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@ See the latest demo here! https://twitter.com/flngr/status/1609616068057698304
## Presentation
-this project is free but experimental, you will have to configure your own API credentials to access OpenAI (for GPT-3 / text-davinci-003, and dall-e 2).
+this project is free but experimental, you will have to configure your own API credentials to access OpenAI (for GPT-3 / text-davinci-003 / gpt-3.5-turbo / gpt-4, and dall-e 2).
Currently Replicate (stable diffusion) is disabled, as I had some latency issues with it (maybe I will put it back in the future!)
diff --git a/package.json b/package.json
index 5c586f6..5ad0284 100644
--- a/package.json
+++ b/package.json
@@ -31,7 +31,7 @@
"mime-db": "^1.52.0",
"mime-type": "^4.0.0",
"next": "^12.2.5",
- "openai": "^3.1.0",
+ "openai": "^3.2.1",
"orbit-db": "^0.28.7",
"postcss": "^8.4.19",
"rc-tabs": "^12.5.5",
diff --git a/src/components/loaders/ModelProgressBar.tsx b/src/components/loaders/ModelProgressBar.tsx
index 6d2aca1..f84961e 100644
--- a/src/components/loaders/ModelProgressBar.tsx
+++ b/src/components/loaders/ModelProgressBar.tsx
@@ -14,6 +14,7 @@ export const ModelProgressBar = ({
stage?: string
}) => {
const elapsedTimeSec = elapsedTimeMs / 1000
+ const actualEstimatedTime = model === 'gpt-4' ? estimatedTimeSec * 2.75 : estimatedTimeSec
return (
Generating {stage} using {model} -{' '}
- {elapsedTimeSec < estimatedTimeSec
- ? `${Math.round(estimatedTimeSec - elapsedTimeSec)}s remaining`
- : elapsedTimeSec > estimatedTimeSec + 15
+ {elapsedTimeSec < actualEstimatedTime
+ ? `${Math.round(actualEstimatedTime - elapsedTimeSec)}s remaining`
+ : elapsedTimeSec > actualEstimatedTime + 15
? 'Sigh.. how is your day going anyway?'
- : elapsedTimeSec > estimatedTimeSec + 5
+ : elapsedTimeSec > actualEstimatedTime + 5
? 'Sorry, this is taking longer than expected..'
: 'Hold on tight!'}{' '}
- {elapsedTimeSec < estimatedTimeSec
- ? ` (${Math.round((elapsedTimeSec / estimatedTimeSec) * 100)}%)`
+ {elapsedTimeSec < actualEstimatedTime
+ ? ` (${Math.round((elapsedTimeSec / actualEstimatedTime) * 100)}%)`
: ''}
)
diff --git a/src/providers/openai/index.ts b/src/providers/openai/index.ts
index 291247a..e214b27 100644
--- a/src/providers/openai/index.ts
+++ b/src/providers/openai/index.ts
@@ -40,7 +40,24 @@ export const imagineString = async (
}
persisted.model = model || 'text-davinci-003'
- const tokenHardLimit = 4097
+ let tokenHardLimit
+ switch (model) {
+ case 'gpt-4':
+ tokenHardLimit = 8192
+ break;
+ case 'code-davinci-002':
+ tokenHardLimit = 8000
+ break;
+ case 'gpt-3.5-turbo':
+ case 'text-davinci-003':
+ case 'text-davinci-002':
+ tokenHardLimit = 4096
+ break;
+ case 'text-davinci-001':
+ default:
+ tokenHardLimit = 2049
+ break;
+ }
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
// 1 token ~= 4 chars in English
@@ -55,22 +72,46 @@ export const imagineString = async (
)
const openai = await getOpenAI(apiKey)
- const response = await openai.createCompletion({
- model: persisted.model,
- prompt,
- user: 'default_user',
- temperature: settings.temperature,
- max_tokens: maxTokens,
- n: settings.n,
- // top_p: 1,
- best_of: settings.bestOf,
- frequency_penalty: settings.frequencyPenalty,
- presence_penalty: settings.presencePenalty,
- logit_bias: settings.gptLogitBias,
- stop: settings.stop?.length ? settings.stop : undefined,
- })
+ if (isChatModel(model)) {
+ const response = await openai.createChatCompletion({
+ model: 'gpt-4',
+ messages: [
+ {"role": "user", "content": prompt}
+ ],
+ user: 'default_user',
+ temperature: settings.temperature,
+ max_tokens: maxTokens,
+ n: settings.n,
+ // top_p: 1,
+ // best_of: settings.bestOf,
+ frequency_penalty: settings.frequencyPenalty,
+ presence_penalty: settings.presencePenalty,
+ logit_bias: settings.gptLogitBias,
+ stop: settings.stop?.length ? settings.stop : undefined,
+ })
+
+ return response?.data?.choices?.[0]?.message?.content?.trim() || ''
+ } else {
+ const response = await openai.createCompletion({
+ model: persisted.model,
+ prompt,
+ user: 'default_user',
+ temperature: settings.temperature,
+ max_tokens: maxTokens,
+ n: settings.n,
+ // top_p: 1,
+ best_of: settings.bestOf,
+ frequency_penalty: settings.frequencyPenalty,
+ presence_penalty: settings.presencePenalty,
+ logit_bias: settings.gptLogitBias,
+ stop: settings.stop?.length ? settings.stop : undefined,
+ })
+ return response?.data?.choices?.[0]?.text?.trim() || ''
+ }
+}
- return response?.data?.choices?.[0]?.text?.trim() || ''
+function isChatModel(model) {
+ return ['gpt-4', 'gpt-3.5-turbo'].includes(model)
}
export const imagineHTML = async (
@@ -200,9 +241,15 @@ export const imagineJSON = async (
// try to fix GPT-3 adding commas at the end of each line
const regex = /\,(?!\s*?[\{\[\"\'\w])/g
- const input = raw.replace(regex, '')
+ let input = raw.replace(regex, '')
console.log(`input: ${input}`)
+ if (input.trimStart().startsWith('{') && !input.trimEnd().endsWith('}')) {
+ input += '}'
+ }
+ if (input.trimStart().startsWith('[') && !input.trimEnd().endsWith(']')) {
+ input += ']'
+ }
const json = JSON.parse(input) as T
// remove all trailing commas (`input` variable holds the erroneous JSON)
diff --git a/yarn.lock b/yarn.lock
index 92bbf06..6fced07 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -17056,13 +17056,13 @@ __metadata:
languageName: node
linkType: hard
-"openai@npm:^3.1.0":
- version: 3.1.0
- resolution: "openai@npm:3.1.0"
+"openai@npm:^3.2.1":
+ version: 3.2.1
+ resolution: "openai@npm:3.2.1"
dependencies:
axios: ^0.26.0
form-data: ^4.0.0
- checksum: 2277d9e2b419b0ba17fb5ee7187bcff285a2a90120990c8da9658b8a8c8d4a927c95cec6cdc5503aa33108b4933288ef1f3756cc09033c33700d4bb2025138c0
+ checksum: ef3942e9b527cf27273c4355bb8fb9ebd94ae3a88c12eec0ac51c4ef0ad8c18864683759471597390816bcd822bdc9f2f1cea7a3eb1e432c9101f568f7c6d19a
languageName: node
linkType: hard
@@ -22514,7 +22514,7 @@ __metadata:
mime-db: ^1.52.0
mime-type: ^4.0.0
next: ^12.2.5
- openai: ^3.1.0
+ openai: ^3.2.1
orbit-db: ^0.28.7
postcss: ^8.4.19
rc-tabs: ^12.5.5