Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
4fedf6e
feat(PluginsClient.js): add conversationId to options object in the c…
danny-avila Aug 5, 2023
501fff3
chore(api): update langchain dependency to version 0.0.123
danny-avila Aug 5, 2023
4bd5a76
fix(CodeInterpreter.js): add support for extracting environment from …
danny-avila Aug 6, 2023
c537e62
fix(tools): rename CodeInterpreterTools to E2BTools
danny-avila Aug 8, 2023
f9f4bcd
chore(PluginsClient.js): comment out unused import and function findM…
danny-avila Aug 8, 2023
0f52982
feat(PluginsClient.js): add support for CodeSherpa v2 plugin
danny-avila Aug 8, 2023
00ca5c5
feat(initializeFunctionsAgent.js): add prefix to agentArgs in initial…
danny-avila Aug 8, 2023
d280800
feat(PluginsClient.js): add ChatTool to the list of tools if it meets…
danny-avila Aug 8, 2023
d0c26c5
fix(initializeFunctionsAgent.js): update PREFIX message to include sh…
danny-avila Aug 8, 2023
4da845d
chore: rebuild package-lock after rebase
danny-avila Aug 23, 2023
9a0ddbc
chore: remove deleted file from rebase
danny-avila Aug 23, 2023
36639ca
wip: refactor plugin message handling to mirror chat.openai.com, hand…
danny-avila Aug 24, 2023
39393ca
wip: new plugin handling
danny-avila Aug 24, 2023
61fcd24
wip: show multiple plugins handling
danny-avila Aug 24, 2023
97da28c
feat(plugins): save new plugins array
danny-avila Aug 25, 2023
ef776f9
chore: bump langchain
danny-avila Aug 25, 2023
3fb00fe
feat(experimental): support streaming in between plugins
danny-avila Aug 25, 2023
51946cf
refactor(PluginsClient): factor out helper methods to avoid bloating …
danny-avila Aug 26, 2023
27eef3a
fix(handleTools): fix tests by adding condition to return original to…
danny-avila Aug 26, 2023
478d461
refactor(MessageContent): Allow the last index to be last in case it …
danny-avila Aug 26, 2023
ae147f6
feat(Plugins): add handleParsingErrors, useful when LLM does not invo…
danny-avila Aug 26, 2023
25bfe74
chore: edit out experimental codesherpa integration
danny-avila Aug 26, 2023
6c305c0
refactor(OpenAPIPlugin): rework tool to be 'function-first', as the s…
danny-avila Aug 26, 2023
db2610f
refactor(initializeFunctionsAgent): improve error handling and system…
danny-avila Aug 26, 2023
34477b7
refactor(CodeSherpa, Wolfram): optimize token usage by delegating bul…
danny-avila Aug 26, 2023
ac58aae
style(Plugins): match official style with input/outputs
danny-avila Aug 26, 2023
5ed1cae
chore: remove unnecessary console logs used for testing
danny-avila Aug 26, 2023
5d76201
fix(abortMiddleware): render markdown when message is aborted
danny-avila Aug 26, 2023
dec1e3f
feat(plugins): add BrowserOp
danny-avila Aug 27, 2023
8e38196
refactor(OpenAPIPlugin): improve prompt handling
danny-avila Aug 27, 2023
bed47c2
fix(useGenerations): hide edit button when message is submitting/stre…
danny-avila Aug 27, 2023
1cb48c1
refactor(loadSpecs): optimize OpenAPI spec loading by only loading re…
danny-avila Aug 27, 2023
686f6f8
fix(loadSpecs): will retain original behavior when no tools are passe…
danny-avila Aug 27, 2023
339c32b
fix(MessageContent): ensure cursor only shows up for last message and…
danny-avila Aug 27, 2023
3f09fe1
chore: remove console.logs
danny-avila Aug 27, 2023
7980961
docs: update docs based on breaking changes and new features
danny-avila Aug 28, 2023
d815db5
docs(azure): make plugins section more clear
danny-avila Aug 28, 2023
96b9541
refactor(structured/SD): change default payload to SD-WebUI to prefer…
danny-avila Aug 28, 2023
137b388
refactor(structured/SD): further improve system message prompt
danny-avila Aug 28, 2023
1946f4d
docs: update breaking changes after rebase
danny-avila Aug 28, 2023
951b911
refactor(MessageContent): factor out EditMessage, types, Container to…
danny-avila Aug 28, 2023
b90c6c9
fix(CodeInterpreter): linting errors
danny-avila Aug 28, 2023
0ec5558
chore: reduce browser console logs from message streams
danny-avila Aug 28, 2023
0f6c447
chore: re-enable debug logs for plugins/langchain to help with user t…
danny-avila Aug 28, 2023
8735798
chore(manifest.json): add [Experimental] tag to CodeInterpreter plugi…
danny-avila Aug 28, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
243 changes: 78 additions & 165 deletions api/app/clients/PluginsClient.js
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
const OpenAIClient = require('./OpenAIClient');
const { ChatOpenAI } = require('langchain/chat_models/openai');
const { CallbackManager } = require('langchain/callbacks');
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents/');
const { findMessageContent } = require('../../utils');
const { loadTools } = require('./tools/util');
const { addImages, createLLM, buildErrorInput, buildPromptPrefix } = require('./agents/methods/');
const { SelfReflectionTool } = require('./tools/');
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
const { instructions, imageInstructions, errorInstructions } = require('./prompts/instructions');
const { loadTools } = require('./tools/util');

class PluginsClient extends OpenAIClient {
constructor(apiKey, options = {}) {
Expand All @@ -19,89 +17,6 @@ class PluginsClient extends OpenAIClient {
this.executor = null;
}

getActions(input = null) {
let output = 'Internal thoughts & actions taken:\n"';
let actions = input || this.actions;

if (actions[0]?.action && this.functionsAgent) {
actions = actions.map((step) => ({
log: `Action: ${step.action?.tool || ''}\nInput: ${
JSON.stringify(step.action?.toolInput) || ''
}\nObservation: ${step.observation}`,
}));
} else if (actions[0]?.action) {
actions = actions.map((step) => ({
log: `${step.action.log}\nObservation: ${step.observation}`,
}));
}

actions.forEach((actionObj, index) => {
output += `${actionObj.log}`;
if (index < actions.length - 1) {
output += '\n';
}
});

return output + '"';
}

buildErrorInput(message, errorMessage) {
const log = errorMessage.includes('Could not parse LLM output:')
? `A formatting error occurred with your response to the human's last message. You didn't follow the formatting instructions. Remember to ${instructions}`
: `You encountered an error while replying to the human's last message. Attempt to answer again or admit an answer cannot be given.\nError: ${errorMessage}`;

return `
${log}

${this.getActions()}

Human's last message: ${message}
`;
}

buildPromptPrefix(result, message) {
if ((result.output && result.output.includes('N/A')) || result.output === undefined) {
return null;
}

if (
result?.intermediateSteps?.length === 1 &&
result?.intermediateSteps[0]?.action?.toolInput === 'N/A'
) {
return null;
}

const internalActions =
result?.intermediateSteps?.length > 0
? this.getActions(result.intermediateSteps)
: 'Internal Actions Taken: None';

const toolBasedInstructions = internalActions.toLowerCase().includes('image')
? imageInstructions
: '';

const errorMessage = result.errorMessage ? `${errorInstructions} ${result.errorMessage}\n` : '';

const preliminaryAnswer =
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
const prefix = preliminaryAnswer
? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.'
: 'respond to the User Message below based on your preliminary thoughts & actions.';

return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
${preliminaryAnswer}
Reply conversationally to the User based on your ${
preliminaryAnswer ? 'preliminary answer, ' : ''
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
${
preliminaryAnswer
? ''
: '\nIf there is an incomplete thought or action, you are expected to complete it in your response now.\n'
}You must cite sources if you are using any web links. ${toolBasedInstructions}
Only respond with your conversational reply to the following User Message:
"${message}"`;
}

setOptions(options) {
this.agentOptions = options.agentOptions;
this.functionsAgent = this.agentOptions?.agent === 'functions';
Expand Down Expand Up @@ -149,27 +64,6 @@ Only respond with your conversational reply to the following User Message:
};
}

createLLM(modelOptions, configOptions) {
let azure = {};
let credentials = { openAIApiKey: this.openAIApiKey };
let configuration = {
apiKey: this.openAIApiKey,
};

if (this.azure) {
credentials = {};
configuration = {};
({ azure } = this);
}

if (this.options.debug) {
console.debug('createLLM: configOptions');
console.debug(configOptions);
}

return new ChatOpenAI({ credentials, configuration, ...azure, ...modelOptions }, configOptions);
}

async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
const modelOptions = {
modelName: this.agentOptions.model,
Expand All @@ -182,35 +76,36 @@ Only respond with your conversational reply to the following User Message:
configOptions.basePath = this.langchainProxy;
}

const model = this.createLLM(modelOptions, configOptions);
const model = createLLM({
modelOptions,
configOptions,
openAIApiKey: this.openAIApiKey,
azure: this.azure,
});

if (this.options.debug) {
console.debug(
`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}----->`,
);
}

this.availableTools = await loadTools({
this.tools = await loadTools({
user,
model,
tools: this.options.tools,
functions: this.functionsAgent,
options: {
openAIApiKey: this.openAIApiKey,
conversationId: this.conversationId,
debug: this.options?.debug,
message,
},
});
// load tools
for (const tool of this.options.tools) {
const validTool = this.availableTools[tool];

if (tool === 'plugins') {
const plugins = await validTool();
this.tools = [...this.tools, ...plugins];
} else if (validTool) {
this.tools.push(await validTool());
}

if (this.tools.length > 0 && !this.functionsAgent) {
this.tools.push(new SelfReflectionTool({ message, isGpt3: false }));
} else if (this.tools.length === 0) {
return;
}

if (this.options.debug) {
Expand All @@ -220,21 +115,15 @@ Only respond with your conversational reply to the following User Message:
console.debug(this.tools.map((tool) => tool.name));
}

if (this.tools.length > 0 && !this.functionsAgent) {
this.tools.push(new SelfReflectionTool({ message, isGpt3: false }));
} else if (this.tools.length === 0) {
return;
}

const handleAction = (action, callback = null) => {
const handleAction = (action, runId, callback = null) => {
this.saveLatestAction(action);

if (this.options.debug) {
console.debug('Latest Agent Action ', this.actions[this.actions.length - 1]);
}

if (typeof callback === 'function') {
callback(action);
callback(action, runId);
}
};

Expand All @@ -258,8 +147,8 @@ Only respond with your conversational reply to the following User Message:
verbose: this.options.debug,
returnIntermediateSteps: true,
callbackManager: CallbackManager.fromHandlers({
async handleAgentAction(action) {
handleAction(action, onAgentAction);
async handleAgentAction(action, runId) {
handleAction(action, runId, onAgentAction);
},
async handleChainEnd(action) {
if (typeof onChainEnd === 'function') {
Expand All @@ -274,12 +163,17 @@ Only respond with your conversational reply to the following User Message:
}
}

async executorCall(message, signal) {
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
let errorMessage = '';
const maxAttempts = 1;

for (let attempts = 1; attempts <= maxAttempts; attempts++) {
const errorInput = this.buildErrorInput(message, errorMessage);
const errorInput = buildErrorInput({
message,
errorMessage,
actions: this.actions,
functionsAgent: this.functionsAgent,
});
const input = attempts > 1 ? errorInput : message;

if (this.options.debug) {
Expand All @@ -291,12 +185,28 @@ Only respond with your conversational reply to the following User Message:
}

try {
this.result = await this.executor.call({ input, signal });
this.result = await this.executor.call({ input, signal }, [
{
async handleToolStart(...args) {
await onToolStart(...args);
},
async handleToolEnd(...args) {
await onToolEnd(...args);
},
async handleLLMEnd(output) {
const { generations } = output;
const { text } = generations[0][0];
if (text && typeof stream === 'function') {
await stream(text);
}
},
},
]);
break; // Exit the loop if the function call is successful
} catch (err) {
console.error(err);
errorMessage = err.message;
const content = findMessageContent(message);
let content = '';
if (content) {
errorMessage = content;
break;
Expand All @@ -311,31 +221,6 @@ Only respond with your conversational reply to the following User Message:
}
}

addImages(intermediateSteps, responseMessage) {
if (!intermediateSteps || !responseMessage) {
return;
}

intermediateSteps.forEach((step) => {
const { observation } = step;
if (!observation || !observation.includes('![')) {
return;
}

// Extract the image file path from the observation
const observedImagePath = observation.match(/\(\/images\/.*\.\w*\)/g)[0];

// Check if the responseMessage already includes the image file path
if (!responseMessage.text.includes(observedImagePath)) {
// If the image file path is not found, append the whole observation
responseMessage.text += '\n' + observation;
if (this.options.debug) {
console.debug('added image from intermediateSteps');
}
}
});
}

async handleResponseMessage(responseMessage, saveOptions, user) {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
responseMessage.completionTokens = responseMessage.tokenCount;
Expand All @@ -351,7 +236,9 @@ Only respond with your conversational reply to the following User Message:
this.setOptions(opts);
return super.sendMessage(message, opts);
}
console.log('Plugins sendMessage', message, opts);
if (this.options.debug) {
console.log('Plugins sendMessage', message, opts);
}
const {
user,
conversationId,
Expand All @@ -360,8 +247,11 @@ Only respond with your conversational reply to the following User Message:
userMessage,
onAgentAction,
onChainEnd,
onToolStart,
onToolEnd,
} = await this.handleStartMethods(message, opts);

this.conversationId = conversationId;
this.currentMessages.push(userMessage);

let {
Expand Down Expand Up @@ -413,19 +303,38 @@ Only respond with your conversational reply to the following User Message:
onAgentAction,
onChainEnd,
signal: this.abortController.signal,
onProgress: opts.onProgress,
});

// const stream = async (text) => {
// await this.generateTextStream.call(this, text, opts.onProgress, { delay: 1 });
// };
await this.executorCall(message, {
signal: this.abortController.signal,
// stream,
onToolStart,
onToolEnd,
});
await this.executorCall(message, this.abortController.signal);

// If message was aborted mid-generation
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
responseMessage.text = 'Cancelled.';
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}

if (this.agentOptions.skipCompletion && this.result.output && this.functionsAgent) {
const partialText = opts.getPartialText();
const trimmedPartial = opts.getPartialText().replaceAll(':::plugin:::\n', '');
responseMessage.text =
trimmedPartial.length === 0 ? `${partialText}${this.result.output}` : partialText;
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}

if (this.agentOptions.skipCompletion && this.result.output) {
responseMessage.text = this.result.output;
this.addImages(this.result.intermediateSteps, responseMessage);
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 8 });
addImages(this.result.intermediateSteps, responseMessage);
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
return await this.handleResponseMessage(responseMessage, saveOptions, user);
}

Expand All @@ -434,7 +343,11 @@ Only respond with your conversational reply to the following User Message:
console.debug(this.result);
}

const promptPrefix = this.buildPromptPrefix(this.result, message);
const promptPrefix = buildPromptPrefix({
result: this.result,
message,
functionsAgent: this.functionsAgent,
});

if (this.options.debug) {
console.debug('Plugins: promptPrefix');
Expand Down
Loading