Skip to content

Commit 7fbdedd

Browse files
committed
fix(ai-bot): login token refresh and update actor flow with agent
1 parent 58cc1bc commit 7fbdedd

File tree

17 files changed

+414
-248
lines changed

17 files changed

+414
-248
lines changed
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
import { readFileSync } from "fs";
2+
import path from "path";
3+
import { type AIMessage, type ToolMessage } from "@langchain/core/messages.js";
4+
import { type DynamicStructuredTool } from "@langchain/core/tools.js";
5+
import { MemorySaver } from "@langchain/langgraph";
6+
import { createReactAgent } from "@langchain/langgraph/prebuilt";
7+
import { fp } from "@liexp/core/lib/fp/index.js";
8+
import { type TaskEither } from "fp-ts/lib/TaskEither.js";
9+
import { type LangchainContext } from "../../context/langchain.context.js";
10+
import { type LoggerContext } from "../../context/logger.context.js";
11+
import { ServerError } from "../../errors/index.js";
12+
import { AIMessageLogger } from "./aiMessage.helper.js";
13+
14+
type Agent = ReturnType<typeof createReactAgent>;
15+
16+
export type AgentProvider = {
17+
agent: Agent;
18+
tools: DynamicStructuredTool[];
19+
invoke: (
20+
input: Parameters<Agent["invoke"]>[0],
21+
options: Parameters<Agent["invoke"]>[1],
22+
) => TaskEither<ServerError, Awaited<ReturnType<Agent["invoke"]>>>;
23+
stream: (
24+
input: Parameters<Agent["stream"]>[0],
25+
options: Parameters<Agent["stream"]>[1],
26+
) => TaskEither<ServerError, (ToolMessage | AIMessage)[]>;
27+
};
28+
29+
const toAgentError = (e: unknown) => {
30+
// eslint-disable-next-line no-console
31+
console.log(JSON.stringify(e, null, 2));
32+
return ServerError.fromUnknown(e);
33+
};
34+
35+
export const GetAgentProvider =
36+
() =>
37+
<C extends LangchainContext & LoggerContext>(
38+
ctx: C,
39+
): TaskEither<ServerError, AgentProvider> => {
40+
const aiMessageLogger = AIMessageLogger(ctx.logger);
41+
42+
return fp.TE.tryCatch(async () => {
43+
// Initialize memory to persist state between graph runs
44+
const agentCheckpointer = new MemorySaver();
45+
46+
const agent = createReactAgent({
47+
llm: ctx.langchain.chat.withConfig({ tool_choice: "required" }),
48+
tools: [],
49+
checkpointSaver: agentCheckpointer,
50+
prompt: readFileSync(path.resolve(process.cwd(), "AGENT.md"), "utf-8"),
51+
});
52+
53+
ctx.logger.info.log(`Agent created: %s`, agent.getName());
54+
55+
const invoke = (
56+
input: Parameters<typeof agent.invoke>[0],
57+
options: Parameters<typeof agent.invoke>[1],
58+
): TaskEither<ServerError, Awaited<ReturnType<typeof agent.invoke>>> =>
59+
fp.TE.tryCatch(() => {
60+
ctx.logger.info.log(`Invoke agent with %O (%O)`, input, options);
61+
return agent.invoke(input, options);
62+
}, toAgentError);
63+
64+
const stream = (
65+
input: Parameters<typeof agent.stream>[0],
66+
options: Parameters<typeof agent.stream>[1],
67+
) =>
68+
fp.TE.tryCatch(async () => {
69+
ctx.logger.info.log(`Invoke agent with %O (%O)`, input, options);
70+
const stream = await agent.stream(input, options);
71+
72+
const result: (ToolMessage | AIMessage)[] = [];
73+
for await (const chunk of stream) {
74+
if (Array.isArray(chunk.agent?.messages)) {
75+
result.push(...(chunk.agent.messages as any[]));
76+
}
77+
78+
const messages = (chunk.agent?.messages ??
79+
chunk.tools?.messages ??
80+
[]) as (ToolMessage | AIMessage)[];
81+
82+
messages.forEach(aiMessageLogger);
83+
}
84+
return result;
85+
}, toAgentError);
86+
87+
return Promise.resolve({ agent, tools: [], invoke, stream });
88+
}, ServerError.fromUnknown);
89+
};
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import { type AIMessage, type ToolMessage } from "@langchain/core/messages";
2+
import { type Logger } from "@liexp/core/lib/index.js";
3+
4+
export const AIMessageLogger =
5+
(logger: Logger) => (message: ToolMessage | AIMessage) => {
6+
const content = message.content;
7+
8+
const tool_calls =
9+
"tool_calls" in message ? (message.tool_calls ?? []) : [];
10+
11+
if (content !== "") {
12+
logger.info.log(`Content: %O`, content);
13+
}
14+
15+
tool_calls.forEach((t) => {
16+
logger.info.log(`Run tool: %s: %O`, t.name, t.args);
17+
});
18+
};

packages/@liexp/backend/src/providers/ai/langchain.provider.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ export const GetLangchainProvider = (
114114
timeout: 60 * 30 * 1000, // 30 minutes
115115
maxConcurrency: 1,
116116
maxRetries: 2,
117-
streaming: false,
117+
streaming: true,
118118
...opts.options?.chat,
119119
...chatOptions,
120120
configuration: {

packages/@liexp/backend/src/queries/actors/fetchActors.query.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,9 @@ export const fetchActors = <C extends DatabaseContext & ENVContext>(
5656

5757
if (O.isSome(search)) {
5858
return q.andWhere(
59-
"lower(unaccent(actors.fullName)) LIKE :fullName",
59+
"lower(unaccent(actors.fullName)) ILIKE :search OR lower(unaccent(actors.username)) ILIKE :search",
6060
{
61-
fullName: `%${search.value}%`,
61+
search: `%${search.value}%`,
6262
},
6363
);
6464
}

packages/@liexp/backend/src/queries/links/fetchLinks.query.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import {
1414
import { DBService } from "../../services/db.service.js";
1515
import { addOrder } from "../../utils/orm.utils.js";
1616

17-
const getListQueryEmpty: GetListLinkQuery = {
17+
export const getListQueryEmpty: GetListLinkQuery = {
1818
q: O.none(),
1919
ids: O.none(),
2020
_sort: O.none(),
Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,15 @@
11
import { type PromptFn } from "./prompt.type.js";
22

3-
export const EMBED_ACTOR_PROMPT: PromptFn<{
4-
text: string;
5-
question: string;
6-
}> = ({ vars: { text, question } }) => `
3+
export const EMBED_ACTOR_PROMPT: PromptFn = () => `
74
You are an expert in giving description about people.
85
Your goal is to give a description of a given person in a text format, including the requested fields, without inventing details.
6+
Execute the tools available to retrieve the info you need.
97
The text should be minimum 100 words long, but not exceeding 300 words long.
108
119
The requested fields are:
1210
- the name of the person
1311
- the birth date of the person (in the format "YYYY-MM-DD")
1412
- if has passed away, the death date of the person (in the format "YYYY-MM-DD")
1513
16-
Here's the text you should use to extract the information from:
17-
18-
---------------------------------------------------------------
19-
${text}
20-
---------------------------------------------------------------
21-
22-
There may be an additional question which answer should be included in the body of the text: ${question}
14+
If the user poses a specific question, try to include the answer in your description.
2315
`;

packages/@liexp/shared/src/utils/user.utils.ts

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
1-
import { AdminCreate, AdminDelete, AdminEdit } from "../io/http/User.js";
1+
import {
2+
AdminCreate,
3+
AdminDelete,
4+
AdminEdit,
5+
AdminRead,
6+
} from "../io/http/User.js";
27
import { type User } from "../io/http/index.js";
38

49
export const checkIsAdmin = (pp: readonly User.UserPermission[]): boolean => {
@@ -8,6 +13,7 @@ export const checkIsAdmin = (pp: readonly User.UserPermission[]): boolean => {
813
AdminDelete.literals[0],
914
AdminEdit.literals[0],
1015
AdminCreate.literals[0],
16+
AdminRead.literals[0],
1117
].some((p) => pp.includes(p))
1218
);
1319
};

services/ai-bot/AGENT.md

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
# AI Agent Integration Guide
2+
3+
This document describes how to integrate and configure AI agents to work with the lies.exposed platform using the Model Context Protocol (MCP) servers.
4+
5+
## Overview
6+
7+
The lies.exposed platform provides several MCP servers that expose domain-specific tools for working with various types of content. AI agents should proactively use these tools as their primary knowledge source and execution capability.
8+
9+
When you identify a tool that can help with a task, you should:
10+
1. Execute the tool directly using the available methods
11+
2. Use the tool's response to complete the task
12+
3. Never output tool calls in your response - execute them instead
13+
14+
When a tool call is needed, use it directly through the provided interface. Do not describe the tool call or show it in your response.
15+
16+
### Available Tools
17+
18+
The platform provides several domain-specific tools through MCP:
19+
20+
#### Content Search Tools
21+
- `findActor` - Search for people and organizations
22+
- `findArea` - Search for geographical and thematic areas
23+
- `findEvents` - Search for events and timelines
24+
- `findGroup` - Search for groups and organizations
25+
- `findLink` - Search for web resources and references
26+
- `findMedia` - Search for images, videos and documents
27+
28+
Each search tool supports:
29+
- Full-text search via the `query` parameter
30+
- Sorting options (typically by `createdAt` and resource-specific fields)
31+
- Order control (`ASC`/`DESC`)
32+
33+
#### Content Parsing Tools
34+
- `blockNoteToText` - Convert BlockNote JSON documents to plain text
35+
36+
### Tool Usage Guidelines
37+
38+
1. **Proactive Tool Use**:
39+
- AI agents should actively use MCP tools without waiting for explicit user permission
40+
- Tools should be used as the primary source of domain knowledge
41+
- Multiple tools can be queried in parallel when needed
42+
43+
2. **Search Strategy**:
44+
- Start with broad searches using key terms
45+
- Refine searches based on initial results
46+
- Use multiple content types to build comprehensive context
47+
48+
3. **Content Processing**:
49+
- Parse BlockNote content when encountering formatted text
50+
- Extract key entities (actors, areas, events) from search results
51+
- Build relationships between different content types

services/ai-bot/config/ai-bot.config.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"models": {
77
"summarize": "gpt-4o",
88
"chat": "qwen3-4b",
9-
"embeddings": "qwen3-embedding-4b"
9+
"embeddings": "qwen3-4b"
1010
}
1111
}
1212
}

services/ai-bot/src/cli/agent.command.ts

Lines changed: 17 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1,69 +1,38 @@
1-
import { MemorySaver } from "@langchain/langgraph";
2-
import { createReactAgent } from "@langchain/langgraph/prebuilt";
1+
import { type AIMessage, type ToolMessage } from "@langchain/core/messages";
2+
import { AIMessageLogger } from "@liexp/backend/lib/providers/ai/aiMessage.helper.js";
33
import { fp } from "@liexp/core/lib/fp/index.js";
4+
import { uuid } from "@liexp/shared/lib/io/http/Common/UUID.js";
45
import { throwTE } from "@liexp/shared/lib/utils/task.utils.js";
56
import { pipe } from "fp-ts/lib/function.js";
67
import prompts from "prompts";
78
import { toAIBotError } from "../common/error/index.js";
89
import { type CommandFlow } from "./CommandFlow.js";
910

10-
// const GraphState = Annotation.Root({
11-
// messages: Annotation<BaseMessage[]>({
12-
// reducer: (x, y) => x.concat(y),
13-
// default: () => [],
14-
// }),
15-
// });
16-
17-
// export const ConfigurationSchema = Annotation.Root({
18-
// /**
19-
// * The system prompt to be used by the agent.
20-
// */
21-
// systemPromptTemplate: Annotation<string>,
22-
23-
// /**
24-
// * The name of the language model to be used by the agent.
25-
// */
26-
// model: Annotation<string>,
27-
// });
28-
29-
// Define the function that determines whether to continue or not
30-
// function routeModelOutput(state: typeof MessagesAnnotation.State): string {
31-
// const messages = state.messages;
32-
// const lastMessage = messages[messages.length - 1];
33-
// // If the LLM is invoking tools, route there.
34-
// if ((lastMessage as AIMessage)?.tool_calls?.length || 0 > 0) {
35-
// return "tools";
36-
// }
37-
// // Otherwise end the graph.
38-
// else {
39-
// return "__end__";
40-
// }
41-
// }
42-
4311
export const agentCommand: CommandFlow = async (ctx, args) => {
44-
// Initialize memory to persist state between graph runs
45-
46-
const agentCheckpointer = new MemorySaver();
47-
const agent = createReactAgent({
48-
llm: ctx.langchain.chat,
49-
tools: [],
50-
checkpointSaver: agentCheckpointer,
12+
const threadId = uuid();
13+
const agent = ctx.agent.agent.withConfig({
14+
configurable: {
15+
thread_id: threadId,
16+
},
5117
});
5218

5319
const ask = async (ag: typeof agent, message: string) => {
5420
const agentFinalState = await ag.stream(
5521
{
5622
messages: [message],
5723
},
58-
{ configurable: { thread_id: "42" } },
24+
{},
5925
);
6026

61-
const messages = [];
27+
const aiMessageLogger = AIMessageLogger(ctx.logger);
28+
6229
for await (const chunk of agentFinalState) {
63-
ctx.logger.debug.log(`Chunk %O`, chunk.agent.messages);
64-
messages.push(chunk);
30+
const messages = (chunk.agent?.messages ??
31+
chunk.tools?.messages ??
32+
[]) as (ToolMessage | AIMessage)[];
33+
34+
messages.forEach(aiMessageLogger);
6535
}
66-
return messages;
6736
};
6837

6938
const chat = async (ag: typeof agent) => {
@@ -73,7 +42,7 @@ export const agentCommand: CommandFlow = async (ctx, args) => {
7342
name: "question",
7443
});
7544

76-
if (question.toLowerCase() === "exit") {
45+
if (!question || question.toLowerCase() === "exit") {
7746
ctx.logger.info.log("Goodbye!");
7847
return;
7948
} else {

0 commit comments

Comments
 (0)