Skip to content

Commit

Permalink
Merge pull request #7 from interrobangc/feat/langchainPrompts
Browse files Browse the repository at this point in the history
feat: rough implementation of langchain prompts
  • Loading branch information
robert-bo-davis committed Mar 4, 2024
2 parents 2582034 + ae21c93 commit 456636c
Show file tree
Hide file tree
Showing 30 changed files with 700 additions and 194 deletions.
27 changes: 21 additions & 6 deletions cli/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,32 @@ llmProvider: ollama # openai | ollama

cacheDir: ../.cache

# The format to use for the output of LLM messages so that the agent can understand it
# is sent to the LLM in the initial system message and all subsequent user messages
# when formatInUserMessage is true. This helps less powerful LLMs to "remember" how to
# format the output for the user. If you are using a more powerful LLM and paying for
# message tokens, you can set this to false and the agent will only send the format to
# the LLM in the initial system message which will save you tokens and processing time.
formatInUserMessage: true

tools:
codeSummaryQuery:
module: '@codellm/codellm-tool-code-summary-query'
codeElementsQuery:
module: '@codellm/codellm-tool-code-elements-query'
config:
vectorDbName: chromadb
docSummaryQuery:
module: '@codellm/codellm-tool-doc-summary-query'
vectorDbCollectionName: codeElements
codeSummaryQuery:
module: '@codellm/codellm-tool-code-summary-query'
config:
vectorDbName: chromadb
jsDependencyTree:
module: '@codellm/codellm-tool-js-dependency-tree'
vectorDbCollectionName: codeSummary
# docSummaryQuery:
# module: '@codellm/codellm-tool-doc-summary-query'
# config:
# vectorDbName: chromadb
# vectorDbCollectionName: docSummary
# jsDependencyTree:
# module: '@codellm/codellm-tool-js-dependency-tree'

# The configuration for individual providers can be set here. You probably want to use
# the OPEN_AI_API_KEY or MISTRAL_API_KEY environment variable instead of hardcoding your key here
Expand Down
2 changes: 2 additions & 0 deletions core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,13 @@
"@langchain/core": "^0.1.40",
"crypto": "^1.0.1",
"globby": "^14.0.1",
"js-yaml": "^4.1.0",
"lodash": "^4.17.21",
"winston": "^3.11.0",
"zod": "^3.22.4"
},
"devDependencies": {
"@types/js-yaml": "^4.0.9",
"@types/lodash": "^4.14.202",
"tsc-alias": "^1.8.8"
}
Expand Down
34 changes: 20 additions & 14 deletions core/src/agent/chat.test.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { describe, expect, it, jest } from '@jest/globals';
import { dump as dumpYaml } from 'js-yaml';

import { decodeResponse } from './chat';

Expand All @@ -7,31 +8,36 @@ jest.mock('@/log/index.js');
describe('decodeResponse', () => {
it('should decode a valid response', () => {
const response = {
type: 'response',
content: 'some content',
type: 'tool',
reason:
'To provide context for answering user questions, we can use the `codeSummaryQuery` tool, which searches relevant code snippets and summaries in the vector database. This tool will help us understand the codebase better.',
name: 'codeSummaryQuery',
params: {
includeCode: true,
query: 'parseFiles',
},
};
const encodedResponse = JSON.stringify(response);
const encodedResponse = dumpYaml(response);
expect(decodeResponse(encodedResponse)).toEqual(response);
});

it('should handle an error decoding a response', () => {
it('should handle an error decoding an invalid json response', () => {
const response = 'some invalid response';
expect(decodeResponse(response)).toEqual({
type: 'error',
content: response,
});

expect(decodeResponse(response)).toEqual(
expect.objectContaining({ type: 'error' }),
);
});

it('should handle an error decoding a response', () => {
it('should handle an error decoding a valid json response with incorrect type', () => {
const response = {
type: 'response',
text: 'some content',
};

const encodedResponse = JSON.stringify(response);
expect(decodeResponse(encodedResponse)).toEqual({
type: 'error',
content: encodedResponse,
});
const encodedResponse = dumpYaml(response);
expect(decodeResponse(encodedResponse)).toEqual(
expect.objectContaining({ type: 'error' }),
);
});
});
166 changes: 98 additions & 68 deletions core/src/agent/chat.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
import type { LlmClient, Llms, MessageList, Tools } from '@/.';

import { load as loadYaml } from 'js-yaml';
import { conversation } from '@/llm/index.js';
import log from '@/log/index.js';
import { getPrompt } from '@/prompt/index.js';
import { newPrompt } from '@/prompt/index.js';
import * as agentTypes from './types.js';

const prompt = newPrompt();

/**
* Send a chat message to the LLM
*
Expand All @@ -28,27 +31,86 @@ export const decodeResponse = (
content: string,
): agentTypes.AgentSelectToolResponse => {
try {
return agentTypes.agentLlmResponseSchema.parse(JSON.parse(content.trim()));
return agentTypes.agentLlmResponseSchema.parse(loadYaml(content.trim()));
} catch (e) {
log('Error decoding response', 'debug', { content, e });
log('Error decoding response', 'error', { content, e });
return {
type: 'error',
content,
content: String(e),
};
}
};

export const handleMessage = async (
llms: Llms,
message: string,
tools: Tools | undefined,
depth: number = 0,
): Promise<agentTypes.AgentResponse> => {
export const getToolResponses = (
toolResponses: agentTypes.AgentToolResponses,
) => {
return Object.entries(toolResponses)
.map(
([tool, response]) => `####${tool}:
${response}
`,
)
.join('\n');
};

export const handleToolResponse = async ({
llms,
response,
toolResponses,
tools,
}: agentTypes.AgentHandleToolResponseParams): Promise<agentTypes.AgentToolResponses> => {
if (!agentTypes.isAgentToolResponse(response)) return toolResponses || {};
const toolName = response.name;

const tool = tools?.[toolName];

if (!tool) {
log('Tool not found', 'error', { toolName, tools });
return {
...toolResponses,
[toolName]: 'Tool not found',
};
}

log(`Running the ${response.name} tool`);

let toolResponse;
try {
toolResponse = await tool.run({
llm: llms.tool,
params: response.params,
});
} catch (e) {
log('Error running tool', 'error', { toolName, e });
return {
...toolResponses,
[toolName]: 'Error running tool: ' + e,
};
}

return {
...toolResponses,
[toolName]: toolResponse.content,
};
};

export const handleQuestion = async ({
depth = 0,
error = null,
llms,
question,
toolResponses = {},
tools,
}: agentTypes.AgentHandleQuestionParams): Promise<agentTypes.AgentResponse> => {
const messages: MessageList = [];

messages.push({
role: 'user',
content: message,
content: await prompt.get('agentQuestion', {
error,
question,
toolResponses: getToolResponses(toolResponses),
}),
});

const response = decodeResponse(await sendChat(llms.agent, messages));
Expand All @@ -58,15 +120,14 @@ export const handleMessage = async (
});

if (agentTypes.isAgentErrorResponse(response)) {
conversation.addMessages('agent', [
{
role: 'user',
content: `
Be sure to use the correct json format in all further responses.
`,
},
]);
return handleMessage(llms, message, tools, depth + 1);
return handleQuestion({
depth: depth + 1,
error: response.content,
llms,
question,
toolResponses,
tools,
});
}

if (agentTypes.isAgentResponseResponse(response)) {
Expand All @@ -81,49 +142,18 @@ export const handleMessage = async (
}

// eslint-disable-next-line @typescript-eslint/no-use-before-define
return handleToolResponse(llms, message, response, tools, depth);
};

export const handleToolResponse = async (
llms: Llms,
message: string,
response: agentTypes.AgentSelectToolResponse,
tools: Tools | undefined,
depth: number = 0,
) => {
if (!agentTypes.isAgentToolResponse(response)) return response;

const tool = tools?.[response.name];

if (!tool) {
log('Tool not found', 'debug', { response, tools });
const toolNotFoundMessage = `
${message}
The ${response.name} tool was not found. Please select a different tool.
`;

return handleMessage(llms, toolNotFoundMessage, tools, depth + 1);
}

log(`Running the ${response.name} tool`);

const toolResponse = await tool.run({
llm: llms.tool,
params: response.params,
return handleQuestion({
depth: depth + 1,
llms,
question,
toolResponses: await handleToolResponse({
llms,
response,
toolResponses,
tools,
}),
tools,
});

const toolResponseMessage = `
${message}
${getPrompt('toolResponseStart')}
Tool: ${response.name}
Response:
${toolResponse.content}
`;

return handleMessage(llms, toolResponseMessage, tools, depth + 1);
};

/**
Expand All @@ -139,12 +169,12 @@ export const handleToolResponse = async (
export const chat =
(llms: Llms, tools: Tools | undefined) =>
async (question: string): Promise<agentTypes.AgentResponse> => {
const message = `
${getPrompt('userQuestionStart')}
${question}
`;

return handleMessage(llms, message, tools);
log('chat', 'debug', { question });
return handleQuestion({
llms,
question,
tools,
});
};

export default chat;
16 changes: 5 additions & 11 deletions core/src/agent/newAgent.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
import type { Agent, PartialConfig, Tools } from '@/.';
import type { Agent, PartialConfig } from '@/.';
import { getConfig, initConfig } from '@/config/index.js';
import { conversation, initLlms } from '@/llm/index.js';
import { log } from '@/log/index.js';
import { getPrompt } from '@/prompt/index.js';
import { initPrompts } from '@/prompt/index.js';
import { initTools } from '@/tool/index.js';
import chat from './chat.js';

export const getToolDescriptions = (tools: Tools = {}) => {
return JSON.stringify(Object.values(tools).map((tool) => tool.description));
};

/**
* Create a new agent which is the primary interface to interact with the LLMs
*
Expand All @@ -27,14 +23,12 @@ export const newAgent = async (configParam: PartialConfig): Promise<Agent> => {
const tools = await initTools(config);
log('newAgent tools', 'silly', { tools });

const prompt = initPrompts({ config, tools });

conversation.addMessages('agent', [
{
role: 'system',
content: `
${getPrompt('agent')}
${getPrompt('selectTool')}
${getToolDescriptions(tools)}
`,
content: await prompt.get('agentSystem'),
},
]);

Expand Down
21 changes: 20 additions & 1 deletion core/src/agent/types.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import type { Llms, Tools } from '@/.';

import { z } from 'zod';

import { toolRunParamsParamSchema } from '@/tool/types.js';
Expand Down Expand Up @@ -28,7 +30,6 @@ export const agentToolResponseSchema = z.object({
type: z.literal('tool'),
reason: z.string(),
name: z.string(),
query: z.string(),
params: toolRunParamsParamSchema,
});

Expand Down Expand Up @@ -73,3 +74,21 @@ export type AgentSelectToolResponse =
| AgentToolResponse;

export type AgentResponse = AgentErrorResponse | AgentResponseResponse;

export type AgentToolResponses = Record<string, string>;

export type AgentHandleQuestionParams = {
depth?: number;
error?: string | null;
llms: Llms;
question: string;
toolResponses?: AgentToolResponses;
tools: Tools | undefined;
};

export type AgentHandleToolResponseParams = {
llms: Llms;
response: AgentSelectToolResponse;
toolResponses: AgentToolResponses;
tools: Tools | undefined;
};
Loading

0 comments on commit 456636c

Please sign in to comment.