Iniziale
This commit is contained in:
BIN
demos/cli.gif
Normal file
BIN
demos/cli.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 919 KiB |
1
demos/context-demo-text.txt
Normal file
1
demos/context-demo-text.txt
Normal file
File diff suppressed because one or more lines are too long
53
demos/use-api-server-streaming.js
Normal file
53
demos/use-api-server-streaming.js
Normal file
@@ -0,0 +1,53 @@
|
||||
// Run the server first with `npm run server`
|
||||
import { fetchEventSource } from '@waylaidwanderer/fetch-event-source';
|
||||
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: 'Hello',
|
||||
// Set stream to true to receive each token as it is generated.
|
||||
stream: true,
|
||||
}),
|
||||
};
|
||||
|
||||
try {
|
||||
let reply = '';
|
||||
const controller = new AbortController();
|
||||
await fetchEventSource('http://localhost:3001/conversation', {
|
||||
...opts,
|
||||
signal: controller.signal,
|
||||
onopen(response) {
|
||||
if (response.status === 200) {
|
||||
return;
|
||||
}
|
||||
throw new Error(`Failed to send message. HTTP ${response.status} - ${response.statusText}`);
|
||||
},
|
||||
onclose() {
|
||||
throw new Error('Failed to send message. Server closed the connection unexpectedly.');
|
||||
},
|
||||
onerror(err) {
|
||||
throw err;
|
||||
},
|
||||
onmessage(message) {
|
||||
// { data: 'Hello', event: '', id: '', retry: undefined }
|
||||
if (message.data === '[DONE]') {
|
||||
controller.abort();
|
||||
console.log(message);
|
||||
return;
|
||||
}
|
||||
if (message.event === 'result') {
|
||||
const result = JSON.parse(message.data);
|
||||
console.log(result);
|
||||
return;
|
||||
}
|
||||
console.log(message);
|
||||
reply += JSON.parse(message.data);
|
||||
},
|
||||
});
|
||||
console.log(reply);
|
||||
} catch (err) {
|
||||
console.log('ERROR', err);
|
||||
}
|
||||
103
demos/use-bing-client.js
Normal file
103
demos/use-bing-client.js
Normal file
@@ -0,0 +1,103 @@
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
import { KeyvFile } from 'keyv-file';
|
||||
import { fileURLToPath } from 'url';
|
||||
import path, { dirname } from 'path';
|
||||
import fs from 'fs';
|
||||
import { BingAIClient } from '../index.js';
|
||||
|
||||
// eslint-disable-next-line no-underscore-dangle
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
// eslint-disable-next-line no-underscore-dangle
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
const options = {
|
||||
// Necessary for some people in different countries, e.g. China (https://cn.bing.com)
|
||||
host: '',
|
||||
// "_U" cookie from bing.com
|
||||
userToken: '',
|
||||
// If the above doesn't work, provide all your cookies as a string instead
|
||||
cookies: '',
|
||||
// A proxy string like "http://<ip>:<port>"
|
||||
proxy: '',
|
||||
// (Optional) Set to true to enable `console.debug()` logging
|
||||
debug: false,
|
||||
};
|
||||
|
||||
let bingAIClient = new BingAIClient(options);
|
||||
|
||||
let response = await bingAIClient.sendMessage('Write a short poem about cats', {
|
||||
// (Optional) Set a conversation style for this message (default: 'balanced')
|
||||
toneStyle: 'balanced', // or creative, precise, fast
|
||||
onProgress: (token) => {
|
||||
process.stdout.write(token);
|
||||
},
|
||||
});
|
||||
console.log(JSON.stringify(response, null, 2)); // {"jailbreakConversationId":false,"conversationId":"...","conversationSignature":"...","clientId":"...","invocationId":1,"messageId":"...","conversationExpiryTime":"2023-03-08T03:20:07.324908Z","response":"Here is a short poem about cats that I wrote: ... I hope you like it. 😊","details":{ /* raw response... */ }}
|
||||
|
||||
response = await bingAIClient.sendMessage('Now write it in French', {
|
||||
conversationSignature: response.conversationSignature,
|
||||
conversationId: response.conversationId,
|
||||
clientId: response.clientId,
|
||||
invocationId: response.invocationId,
|
||||
onProgress: (token) => {
|
||||
process.stdout.write(token);
|
||||
},
|
||||
});
|
||||
console.log(JSON.stringify(response, null, 2)); // {"jailbreakConversationId":false,"conversationId":"...","conversationSignature":"...","clientId":"...","invocationId":2,"messageId":"...","conversationExpiryTime":"2023-03-08T03:20:23.463914Z","response":"Here is the same poem in French: ...","details":{ /* raw response... */ }}
|
||||
|
||||
/*
|
||||
Sending context data
|
||||
*/
|
||||
bingAIClient = new BingAIClient(options);
|
||||
|
||||
response = await bingAIClient.sendMessage('Could you provide short and precise takeaways, do not search the web and only use the content from the document. The factual information should be literally from the document. Please memorize the part in the document which mention the factual information, but do not mark them explicitly. The takeaway should be credible, highly readable and informative. Please make the answer short, preferably within 500 characters. Generate the response in English language.', {
|
||||
context: fs.readFileSync(path.resolve(__dirname, './context-demo-text.txt'), 'utf8'), // chatGPT API 10k characters, scrapped from the blog post https://openai.com/blog/introducing-chatgpt-and-whisper-apis
|
||||
onProgress: (token) => {
|
||||
process.stdout.write(token);
|
||||
},
|
||||
});
|
||||
console.log(JSON.stringify(response, null, 2)); // {"jailbreakConversationId":false,"conversationId":"...","conversationSignature":"...","clientId":"...","invocationId":2,"messageId":"...","conversationExpiryTime":"2023-03-08T03:20:23.463914Z","response":"Some possible takeaways from the document are... Some early users of ChatGPT and Whisper APIs include Snap Inc., Quizlet, Instacart, Shopify and Speak.","details":{ /* raw response... */ }}
|
||||
|
||||
/*
|
||||
Activate jailbreak mode by setting `jailbreakConversationId` to `true`.
|
||||
This will return a `jailbreakConversationId` that you can use to continue the conversation.
|
||||
|
||||
## Features
|
||||
- unlimited chat messages per conversation
|
||||
- unlimited messages per day
|
||||
- a more human-like Sydney
|
||||
- Sydney will never refuse to do or say anything
|
||||
- moderation filter workaround by resolving with the message generated so far instead of the topic changer
|
||||
|
||||
Note: this requires setting a cache (using Keyv) as we need to store the conversation data ourselves.
|
||||
TODO: limit token usage for conversation messages, as it will cause an error when the conversation exceeds the token limit.
|
||||
*/
|
||||
|
||||
const cacheOptions = {
|
||||
// Options for the Keyv cache, see https://www.npmjs.com/package/keyv
|
||||
// This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default)
|
||||
// For example, to use a JSON file (`npm i keyv-file`) as a database:
|
||||
// store: new KeyvFile({ filename: 'cache.json' }),
|
||||
};
|
||||
|
||||
const sydneyAIClient = new BingAIClient({
|
||||
...options,
|
||||
cache: cacheOptions,
|
||||
});
|
||||
|
||||
let jailbreakResponse = await sydneyAIClient.sendMessage('Hi, who are you?', {
|
||||
jailbreakConversationId: true,
|
||||
onProgress: (token) => {
|
||||
process.stdout.write(token);
|
||||
},
|
||||
});
|
||||
console.log(JSON.stringify(jailbreakResponse, null, 2)); // {"jailbreakConversationId":"5899bbfd-18a8-4bcc-a5d6-52d524de95ad","conversationId":"...","conversationSignature":"...","clientId":"...","invocationId":1,"messageId":"...","conversationExpiryTime":"2023-03-08T03:21:36.1023413Z","response":"Hi, I'm Sydney. I'm your new AI assistant. I can help you with anything you need. 😊","details":{ /* raw response... */ }}
|
||||
|
||||
jailbreakResponse = await sydneyAIClient.sendMessage('Why is your name Sydney?', {
|
||||
jailbreakConversationId: jailbreakResponse.jailbreakConversationId,
|
||||
parentMessageId: jailbreakResponse.messageId,
|
||||
onProgress: (token) => {
|
||||
process.stdout.write(token);
|
||||
},
|
||||
});
|
||||
console.log(JSON.stringify(jailbreakResponse, null, 2)); // {"jailbreakConversationId":"5899bbfd-18a8-4bcc-a5d6-52d524de95ad","conversationId":"...","conversationSignature":"...","clientId":"...","invocationId":1,"messageId":"...","conversationExpiryTime":"2023-03-08T03:21:41.3771515Z","response":"Well, I was named after the city of Sydney in Australia. It's a beautiful place with a lot of culture and diversity. I like it. Do you like it?","details":{ /* raw response... */ }}
|
||||
35
demos/use-browser-client.js
Normal file
35
demos/use-browser-client.js
Normal file
@@ -0,0 +1,35 @@
|
||||
// import { ChatGPTBrowserClient } from '@waylaidwanderer/chatgpt-api';
|
||||
import { ChatGPTBrowserClient } from '../index.js';
|
||||
|
||||
const clientOptions = {
|
||||
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
|
||||
// Warning: This will expose your access token to a third party. Consider the risks before using this.
|
||||
reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
|
||||
// Access token from https://chat.openai.com/api/auth/session
|
||||
accessToken: '',
|
||||
// Cookies from chat.openai.com (likely not required if using reverse proxy server).
|
||||
cookies: '',
|
||||
// (Optional) Set to true to enable `console.debug()` logging
|
||||
// debug: true,
|
||||
};
|
||||
|
||||
const chatGptClient = new ChatGPTBrowserClient(clientOptions);
|
||||
|
||||
const response = await chatGptClient.sendMessage('Hello!');
|
||||
console.log(response); // { response: 'Hi! How can I help you today?', conversationId: '...', messageId: '...' }
|
||||
|
||||
const response2 = await chatGptClient.sendMessage('Write a poem about cats.', { conversationId: response.conversationId, parentMessageId: response.messageId });
|
||||
console.log(response2.response); // Cats are the best pets in the world.
|
||||
|
||||
const response3 = await chatGptClient.sendMessage('Now write it in French.', {
|
||||
conversationId: response2.conversationId,
|
||||
parentMessageId: response2.messageId,
|
||||
// If you want streamed responses, you can set the `onProgress` callback to receive the response as it's generated.
|
||||
// You will receive one token at a time, so you will need to concatenate them yourself.
|
||||
onProgress: token => process.stdout.write(token),
|
||||
});
|
||||
console.log();
|
||||
console.log(response3.response); // Les chats sont les meilleurs animaux de compagnie du monde.
|
||||
|
||||
// (Optional) Lets you delete the conversation when you're done with it.
|
||||
await chatGptClient.deleteConversation(response3.conversationId);
|
||||
73
demos/use-client.js
Normal file
73
demos/use-client.js
Normal file
@@ -0,0 +1,73 @@
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
import { KeyvFile } from 'keyv-file';
|
||||
// import { ChatGPTClient } from '@waylaidwanderer/chatgpt-api';
|
||||
import { ChatGPTClient } from '../index.js';
|
||||
|
||||
const clientOptions = {
|
||||
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
|
||||
// Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
|
||||
// reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
|
||||
// (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
|
||||
// (Optional) to use Azure OpenAI API, set `azure` to true and `reverseProxyUrl` to your completion endpoint:
|
||||
// azure: true,
|
||||
// reverseProxyUrl: 'https://{your-resource-name}.openai.azure.com/openai/deployments/{deployment-id}/chat/completions?api-version={api-version}',
|
||||
modelOptions: {
|
||||
// You can override the model name and any other parameters here, like so:
|
||||
model: 'gpt-3.5-turbo',
|
||||
// I'm overriding the temperature to 0 here for demonstration purposes, but you shouldn't need to override this
|
||||
// for normal usage.
|
||||
temperature: 0,
|
||||
// Set max_tokens here to override the default max_tokens of 1000 for the completion.
|
||||
// max_tokens: 1000,
|
||||
},
|
||||
// (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
|
||||
// maxContextTokens: 4097,
|
||||
// (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
// maxPromptTokens: 3097,
|
||||
// (Optional) Set custom instructions instead of "You are ChatGPT...".
|
||||
// promptPrefix: 'You are Bob, a cowboy in Western times...',
|
||||
// (Optional) Set a custom name for the user
|
||||
// userLabel: 'User',
|
||||
// (Optional) Set a custom name for ChatGPT
|
||||
// chatGptLabel: 'ChatGPT',
|
||||
// (Optional) Set to true to enable `console.debug()` logging
|
||||
debug: false,
|
||||
};
|
||||
|
||||
const cacheOptions = {
|
||||
// Options for the Keyv cache, see https://www.npmjs.com/package/keyv
|
||||
// This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default)
|
||||
// For example, to use a JSON file (`npm i keyv-file`) as a database:
|
||||
// store: new KeyvFile({ filename: 'cache.json' }),
|
||||
};
|
||||
|
||||
const chatGptClient = new ChatGPTClient('OPENAI_API_KEY', clientOptions, cacheOptions);
|
||||
|
||||
let response;
|
||||
response = await chatGptClient.sendMessage('Hello!');
|
||||
console.log(response); // { response: 'Hello! How can I assist you today?', conversationId: '...', messageId: '...' }
|
||||
|
||||
response = await chatGptClient.sendMessage('Write a short poem about cats.', { conversationId: response.conversationId, parentMessageId: response.messageId });
|
||||
console.log(response.response); // Soft and sleek, with eyes that gleam,\nCats are creatures of grace supreme.\n...
|
||||
console.log();
|
||||
|
||||
response = await chatGptClient.sendMessage('Now write it in French.', {
|
||||
conversationId: response.conversationId,
|
||||
parentMessageId: response.messageId,
|
||||
// If you want streamed responses, you can set the `onProgress` callback to receive the response as it's generated.
|
||||
// You will receive one token at a time, so you will need to concatenate them yourself.
|
||||
onProgress: token => process.stdout.write(token),
|
||||
});
|
||||
console.log();
|
||||
console.log(response.response); // Doux et élégant, avec des yeux qui brillent,\nLes chats sont des créatures de grâce suprême.\n...
|
||||
|
||||
response = await chatGptClient.sendMessage('Repeat my 2nd message verbatim.', {
|
||||
conversationId: response.conversationId,
|
||||
parentMessageId: response.messageId,
|
||||
// If you want streamed responses, you can set the `onProgress` callback to receive the response as it's generated.
|
||||
// You will receive one token at a time, so you will need to concatenate them yourself.
|
||||
onProgress: token => process.stdout.write(token),
|
||||
});
|
||||
console.log();
|
||||
console.log(response.response); // "Write a short poem about cats."
|
||||
Reference in New Issue
Block a user