ChatGPT
generative pre-trained transformers |
ChatGPT is designed to handle multi-turn conversations, making it ideal for tasks that require back-and-forth interactions |
What is ChatGPT for?
- Text generation First, we find text generation. The tool can be used to develop coherent and natural paragraphs in different formats, from articles to stories, or answers to different questions.
This allows you to automate different parts of the business. This is how we see many companies employing these bots to provide answers to customer questions, replacing people who charge salaries and much more.
-
Chatbots Chatbots are messaging systems that can hold natural conversations with a human being. The platform is able to identify patterns in common and responds according to how it has been programmed.
-
Improves search engine accuracy On the other hand, with this artificial intelligence model, you can significantly improve the accuracy of information search systems because it is able to identify the intention of each user when using a certain keyword.
-
Improves natural language processing The problem that there’s always been with bots is that they can’t tell the difference between information and sentiment, so the results they usually give aren’t as accurate as you want them to be.
Models
Model | Description |
---|---|
GPT-4 and GPT-4 Turbo | A set of models that improve on GPT-3.5 and can understand as well as generate natural language or code |
GPT-3.5 | A set of models that improve on GPT-3 and can understand as well as generate natural language or code |
DALL·E | A model that can generate and edit images given a natural language prompt |
TTS | A set of models that can convert text into natural sounding spoken audio |
Whisper | A model that can convert audio into text |
Embeddings | A set of models that can convert text into a numerical form |
Moderation | A fine-tuned model that can detect whether text may be sensitive or unsafe |
GPT base | A set of models without instruction following that can understand as well as generate natural language or code |
GPT-3Legacy | A set of models that can understand and generate natural language |
Terms
-
Temperature
allows you to control how creative you want the AI to be (on a scale of 0 to 1). A lower score makes the bot less creative and more likely to say the same thing given the same prompt. A higher score gives the bot more flexibility and will cause it to write different responses each time you try the same prompt. The default of 0.7 is pretty good for most use cases. -
Maximum length
is a control of how long the combined prompt and response can be. If you notice the AI is stopping its response mid-sentence, it’s likely because you’ve hit your max length, so increase it a bit and test again.
Ask clever questions?? Get smarter response??
- Mona Lisa Replication Guidance
- prompt imporvise
- 15 Rules For Crafting Effective GPT Chat Prompts
- Meta prompting concept
Best practices
- Clear and Specific Instructions
- Use System-Level Instructions
- Iterative Conversations
- Limitations Acknowledgment
const OpenAI = require('openai')
const { HttpsProxyAgent } = require('https-proxy-agent')
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
httpAgent: new HttpsProxyAgent(process.env.PROXY_URL)
});
const ai = {
baseURL: "https://api.openai.com/v1",
maxRetries: 2,
timeout: 600000,
httpAgent: {
_events: {
free: (socket, options) => {
const name = this.getName(options);
debug('agent.on(free)', name);
// TODO(ronag): socket.destroy(err) might have been called
// before coming here and have an 'error' scheduled. In the
// case of socket.destroy() below this 'error' has no handler
// and could cause unhandled exception.
if (!socket.writable) {
socket.destroy();
return;
}
const requests = this.requests[name];
if (requests && requests.length) {
const req = ArrayPrototypeShift(requests);
const reqAsyncRes = req[kRequestAsyncResource];
if (reqAsyncRes) {
// Run request within the original async context.
reqAsyncRes.runInAsyncScope(() => {
asyncResetHandle(socket);
setRequestSocket(this, req, socket);
});
req[kRequestAsyncResource] = null;
} else {
setRequestSocket(this, req, socket);
}
if (requests.length === 0) {
delete this.requests[name];
}
return;
}
// If there are no pending requests, then put it in
// the freeSockets pool, but only if we're allowed to do so.
const req = socket._httpMessage;
if (!req || !req.shouldKeepAlive || !this.keepAlive) {
socket.destroy();
return;
}
const freeSockets = this.freeSockets[name] || [];
const freeLen = freeSockets.length;
let count = freeLen;
if (this.sockets[name])
count += this.sockets[name].length;
if (this.totalSocketCount > this.maxTotalSockets ||
count > this.maxSockets ||
freeLen >= this.maxFreeSockets ||
!this.keepSocketAlive(socket)) {
socket.destroy();
return;
}
this.freeSockets[name] = freeSockets;
socket[async_id_symbol] = -1;
socket._httpMessage = null;
this.removeSocket(socket, options);
socket.once('error', freeSocketErrorListener);
ArrayPrototypePush(freeSockets, socket);
},
newListener: function maybeEnableKeylog(eventName) {
if (eventName === 'keylog') {
this.removeListener('newListener', maybeEnableKeylog);
// Future sockets will listen on keylog at creation.
const agent = this;
this[kOnKeylog] = function onkeylog(keylog) {
agent.emit('keylog', keylog, this);
};
// Existing sockets will start listening on keylog now.
const sockets = ObjectValues(this.sockets);
for (let i = 0; i < sockets.length; i++) {
sockets[i].on('keylog', this[kOnKeylog]);
}
}
},
},
_eventsCount: 2,
_maxListeners: undefined,
options: {
path: undefined,
},
requests: {
},
sockets: {
},
freeSockets: {
},
keepAliveMsecs: 1000,
keepAlive: false,
maxSockets: Infinity,
maxFreeSockets: 256,
scheduling: "lifo",
maxTotalSockets: Infinity,
totalSocketCount: 0,
proxy: {
},
proxyHeaders: {
},
connectOpts: {
ALPNProtocols: [
"http/1.1",
],
host: "localhost",
port: 59527,
},
},
fetch: function fetch(url, opts) {
// allow custom promise
if (!fetch.Promise) {
throw new Error('native promise missing, set fetch.Promise to your favorite alternative');
}
Body.Promise = fetch.Promise;
// wrap http.request into fetch
return new fetch.Promise(function (resolve, reject) {
// build request object
const request = new Request(url, opts);
const options = getNodeRequestOptions(request);
const send = (options.protocol === 'https:' ? https : http).request;
const signal = request.signal;
let response = null;
const abort = function abort() {
let error = new AbortError('The user aborted a request.');
reject(error);
if (request.body && request.body instanceof Stream.Readable) {
destroyStream(request.body, error);
}
if (!response || !response.body) return;
response.body.emit('error', error);
};
if (signal && signal.aborted) {
abort();
return;
}
const abortAndFinalize = function abortAndFinalize() {
abort();
finalize();
};
// send request
const req = send(options);
let reqTimeout;
if (signal) {
signal.addEventListener('abort', abortAndFinalize);
}
function finalize() {
req.abort();
if (signal) signal.removeEventListener('abort', abortAndFinalize);
clearTimeout(reqTimeout);
}
if (request.timeout) {
req.once('socket', function (socket) {
reqTimeout = setTimeout(function () {
reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout'));
finalize();
}, request.timeout);
});
}
req.on('error', function (err) {
reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err));
if (response && response.body) {
destroyStream(response.body, err);
}
finalize();
});
fixResponseChunkedTransferBadEnding(req, function (err) {
if (signal && signal.aborted) {
return;
}
if (response && response.body) {
destroyStream(response.body, err);
}
});
/* c8 ignore next 18 */
if (parseInt(process.version.substring(1)) < 14) {
// Before Node.js 14, pipeline() does not fully support async iterators and does not always
// properly handle when the socket close/end events are out of order.
req.on('socket', function (s) {
s.addListener('close', function (hadError) {
// if a data listener is still present we didn't end cleanly
const hasDataListener = s.listenerCount('data') > 0;
// if end happened before close but the socket didn't emit an error, do it now
if (response && hasDataListener && !hadError && !(signal && signal.aborted)) {
const err = new Error('Premature close');
err.code = 'ERR_STREAM_PREMATURE_CLOSE';
response.body.emit('error', err);
}
});
});
}
req.on('response', function (res) {
clearTimeout(reqTimeout);
const headers = createHeadersLenient(res.headers);
// HTTP fetch step 5
if (fetch.isRedirect(res.statusCode)) {
// HTTP fetch step 5.2
const location = headers.get('Location');
// HTTP fetch step 5.3
let locationURL = null;
try {
locationURL = location === null ? null : new URL$1(location, request.url).toString();
} catch (err) {
// error here can only be invalid URL in Location: header
// do not throw when options.redirect == manual
// let the user extract the errorneous redirect URL
if (request.redirect !== 'manual') {
reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect'));
finalize();
return;
}
}
// HTTP fetch step 5.5
switch (request.redirect) {
case 'error':
reject(new FetchError(`uri requested responds with a redirect, redirect mode is set to error: ${request.url}`, 'no-redirect'));
finalize();
return;
case 'manual':
// node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL.
if (locationURL !== null) {
// handle corrupted header
try {
headers.set('Location', locationURL);
} catch (err) {
// istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request
reject(err);
}
}
break;
case 'follow':
// HTTP-redirect fetch step 2
if (locationURL === null) {
break;
}
// HTTP-redirect fetch step 5
if (request.counter >= request.follow) {
reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect'));
finalize();
return;
}
// HTTP-redirect fetch step 6 (counter increment)
// Create a new Request object.
const requestOpts = {
headers: new Headers(request.headers),
follow: request.follow,
counter: request.counter + 1,
agent: request.agent,
compress: request.compress,
method: request.method,
body: request.body,
signal: request.signal,
timeout: request.timeout,
size: request.size
};
if (!isDomainOrSubdomain(request.url, locationURL) || !isSameProtocol(request.url, locationURL)) {
for (const name of ['authorization', 'www-authenticate', 'cookie', 'cookie2']) {
requestOpts.headers.delete(name);
}
}
// HTTP-redirect fetch step 9
if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) {
reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect'));
finalize();
return;
}
// HTTP-redirect fetch step 11
if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') {
requestOpts.method = 'GET';
requestOpts.body = undefined;
requestOpts.headers.delete('content-length');
}
// HTTP-redirect fetch step 15
resolve(fetch(new Request(locationURL, requestOpts)));
finalize();
return;
}
}
// prepare response
res.once('end', function () {
if (signal) signal.removeEventListener('abort', abortAndFinalize);
});
let body = res.pipe(new PassThrough$1());
const response_options = {
url: request.url,
status: res.statusCode,
statusText: res.statusMessage,
headers: headers,
size: request.size,
timeout: request.timeout,
counter: request.counter
};
// HTTP-network fetch step 12.1.1.3
const codings = headers.get('Content-Encoding');
// HTTP-network fetch step 12.1.1.4: handle content codings
// in following scenarios we ignore compression support
// 1. compression support is disabled
// 2. HEAD request
// 3. no Content-Encoding header
// 4. no content response (204)
// 5. content not modified response (304)
if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) {
response = new Response(body, response_options);
resolve(response);
return;
}
// For Node v6+
// Be less strict when decoding compressed responses, since sometimes
// servers send slightly invalid responses that are still accepted
// by common browsers.
// Always using Z_SYNC_FLUSH is what cURL does.
const zlibOptions = {
flush: zlib.Z_SYNC_FLUSH,
finishFlush: zlib.Z_SYNC_FLUSH
};
// for gzip
if (codings == 'gzip' || codings == 'x-gzip') {
body = body.pipe(zlib.createGunzip(zlibOptions));
response = new Response(body, response_options);
resolve(response);
return;
}
// for deflate
if (codings == 'deflate' || codings == 'x-deflate') {
// handle the infamous raw deflate response from old servers
// a hack for old IIS and Apache servers
const raw = res.pipe(new PassThrough$1());
raw.once('data', function (chunk) {
// see http://stackoverflow.com/questions/37519828
if ((chunk[0] & 0x0F) === 0x08) {
body = body.pipe(zlib.createInflate());
} else {
body = body.pipe(zlib.createInflateRaw());
}
response = new Response(body, response_options);
resolve(response);
});
raw.on('end', function () {
// some old IIS servers return zero-length OK deflate responses, so 'data' is never emitted.
if (!response) {
response = new Response(body, response_options);
resolve(response);
}
});
return;
}
// for br
if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') {
body = body.pipe(zlib.createBrotliDecompress());
response = new Response(body, response_options);
resolve(response);
return;
}
// otherwise, use response as-is
response = new Response(body, response_options);
resolve(response);
});
writeToStream(req, request);
});
},
completions: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
chat: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
completions: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
edits: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
embeddings: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
files: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
images: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
audio: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
transcriptions: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
translations: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
speech: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
moderations: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
models: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
fineTuning: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
jobs: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
fineTunes: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
beta: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
chat: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
completions: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
assistants: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
files: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
threads: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
runs: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
steps: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
messages: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
files: {
client: [Circular],
get: function () { [native code] },
post: function () { [native code] },
patch: function () { [native code] },
put: function () { [native code] },
delete: function () { [native code] },
getAPIList: function () { [native code] },
},
},
},
},
_options: {
apiKey: "your super super secret key will be here",
organization: null,
httpAgent: {
_events: {
free: (socket, options) => {
const name = this.getName(options);
debug('agent.on(free)', name);
// TODO(ronag): socket.destroy(err) might have been called
// before coming here and have an 'error' scheduled. In the
// case of socket.destroy() below this 'error' has no handler
// and could cause unhandled exception.
if (!socket.writable) {
socket.destroy();
return;
}
const requests = this.requests[name];
if (requests && requests.length) {
const req = ArrayPrototypeShift(requests);
const reqAsyncRes = req[kRequestAsyncResource];
if (reqAsyncRes) {
// Run request within the original async context.
reqAsyncRes.runInAsyncScope(() => {
asyncResetHandle(socket);
setRequestSocket(this, req, socket);
});
req[kRequestAsyncResource] = null;
} else {
setRequestSocket(this, req, socket);
}
if (requests.length === 0) {
delete this.requests[name];
}
return;
}
// If there are no pending requests, then put it in
// the freeSockets pool, but only if we're allowed to do so.
const req = socket._httpMessage;
if (!req || !req.shouldKeepAlive || !this.keepAlive) {
socket.destroy();
return;
}
const freeSockets = this.freeSockets[name] || [];
const freeLen = freeSockets.length;
let count = freeLen;
if (this.sockets[name])
count += this.sockets[name].length;
if (this.totalSocketCount > this.maxTotalSockets ||
count > this.maxSockets ||
freeLen >= this.maxFreeSockets ||
!this.keepSocketAlive(socket)) {
socket.destroy();
return;
}
this.freeSockets[name] = freeSockets;
socket[async_id_symbol] = -1;
socket._httpMessage = null;
this.removeSocket(socket, options);
socket.once('error', freeSocketErrorListener);
ArrayPrototypePush(freeSockets, socket);
},
newListener: function maybeEnableKeylog(eventName) {
if (eventName === 'keylog') {
this.removeListener('newListener', maybeEnableKeylog);
// Future sockets will listen on keylog at creation.
const agent = this;
this[kOnKeylog] = function onkeylog(keylog) {
agent.emit('keylog', keylog, this);
};
// Existing sockets will start listening on keylog now.
const sockets = ObjectValues(this.sockets);
for (let i = 0; i < sockets.length; i++) {
sockets[i].on('keylog', this[kOnKeylog]);
}
}
},
},
_eventsCount: 2,
_maxListeners: undefined,
options: {
path: undefined,
},
requests: {
},
sockets: {
},
freeSockets: {
},
keepAliveMsecs: 1000,
keepAlive: false,
maxSockets: Infinity,
maxFreeSockets: 256,
scheduling: "lifo",
maxTotalSockets: Infinity,
totalSocketCount: 0,
proxy: {
},
proxyHeaders: {
},
connectOpts: {
ALPNProtocols: [
"http/1.1",
],
host: "localhost",
port: 59527,
},
},
baseURL: "https://api.openai.com/v1",
},
apiKey: "your super super secret key will be here",
organization: null,
}