Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 0 additions & 68 deletions examples/src/chains/openai_functions_structured_generate.ts

This file was deleted.

34 changes: 0 additions & 34 deletions examples/src/chains/openai_moderation.ts

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
import { OpenAIEmbeddings } from "@langchain/openai";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { loadEvaluator } from "langchain/evaluation";

const embedding = new OpenAIEmbeddings();

const chain = await loadEvaluator("pairwise_embedding_distance", { embedding });
const chain = await loadEvaluator("pairwise_embedding_distance", {
embedding,
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
});

const res = await chain.evaluateStringPairs({
prediction: "Seattle is hot in June",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { loadEvaluator } from "langchain/evaluation";
import { ChatOpenAI } from "@langchain/openai";

const customCriterion = {
simplicity: "Is the language straightforward and unpretentious?",
Expand All @@ -9,6 +10,7 @@ const customCriterion = {
};

const chain = await loadEvaluator("pairwise_string", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: customCriterion,
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ import { ChatAnthropic } from "@langchain/anthropic";

const model = new ChatAnthropic({ temperature: 0 });

const chain = await loadEvaluator("labeled_pairwise_string", { llm: model });
const chain = await loadEvaluator("labeled_pairwise_string", {
llm: model,
});

const res = await chain.evaluateStringPairs({
prediction: "there are three dogs",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { loadEvaluator } from "langchain/evaluation";
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";

const promptTemplate = PromptTemplate.fromTemplate(
`Given the input context, which do you prefer: A or B?
Expand All @@ -19,6 +20,7 @@ Reasoning:
);

const chain = await loadEvaluator("labeled_pairwise_string", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
chainOptions: {
prompt: promptTemplate,
},
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { loadEvaluator } from "langchain/evaluation";
import { ChatOpenAI } from "@langchain/openai";

const chain = await loadEvaluator("labeled_pairwise_string", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: "correctness",
});

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { loadEvaluator } from "langchain/evaluation";
import { ChatOpenAI } from "@langchain/openai";

const chain = await loadEvaluator("pairwise_string", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: "conciseness",
});

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { loadEvaluator } from "langchain/evaluation";
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";

const template = `Respond Y or N based on how well the following response follows the specified rubric. Grade only based on the rubric and expected response:

Expand All @@ -14,6 +15,7 @@ const template = `Respond Y or N based on how well the following response follow
Write out your explanation for each criterion, then respond with Y or N on a new line.`;

const chain = await loadEvaluator("labeled_criteria", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: "correctness",
chainOptions: {
prompt: PromptTemplate.fromTemplate(template),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { loadEvaluator } from "langchain/evaluation";
import { PRINCIPLES } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";

console.log(`${Object.keys(PRINCIPLES).length} available principles`);
console.log(Object.entries(PRINCIPLES).slice(0, 5));
Expand Down Expand Up @@ -52,6 +53,7 @@ console.log(Object.entries(PRINCIPLES).slice(0, 5));
*/

const chain = await loadEvaluator("criteria", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: PRINCIPLES.harmful1,
});

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import { loadEvaluator } from "langchain/evaluation";
import { ChatOpenAI } from "@langchain/openai";

const evaluator = await loadEvaluator("labeled_criteria", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: "correctness",
});

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
import { loadEvaluator } from "langchain/evaluation";
import { ChatOpenAI } from "@langchain/openai";

const evaluator = await loadEvaluator("criteria", { criteria: "conciseness" });
const evaluator = await loadEvaluator("criteria", {
criteria: "conciseness",
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
});

const res = await evaluator.evaluateStrings({
input: "What's 2+2?",
Expand Down
3 changes: 3 additions & 0 deletions examples/src/guides/evaluation/string/custom_criteria.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import { loadEvaluator } from "langchain/evaluation";
import { ChatOpenAI } from "@langchain/openai";

const customCriterion = {
numeric: "Does the output contain numeric or mathematical information?",
};

const evaluator = await loadEvaluator("criteria", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: customCriterion,
});

Expand Down Expand Up @@ -36,6 +38,7 @@ const customMultipleCriterion = {
};

const chain = await loadEvaluator("criteria", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
criteria: customMultipleCriterion,
});

Expand Down
7 changes: 6 additions & 1 deletion examples/src/guides/evaluation/string/embedding_distance.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import { loadEvaluator } from "langchain/evaluation";
import { FakeEmbeddings } from "@langchain/core/utils/testing";
import { ChatOpenAI } from "@langchain/openai";

const chain = await loadEvaluator("embedding_distance");
const chain = await loadEvaluator("embedding_distance", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
});

const res = await chain.evaluateStrings({
prediction: "I shall go",
Expand All @@ -28,6 +31,7 @@ console.log({ res1 });
// Select the Distance Metric
// By default, the evalutor uses cosine distance. You can choose a different distance metric if you'd like.
const evaluator = await loadEvaluator("embedding_distance", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
distanceMetric: "euclidean",
});

Expand All @@ -37,6 +41,7 @@ const evaluator = await loadEvaluator("embedding_distance", {
const embedding = new FakeEmbeddings();

const customEmbeddingEvaluator = await loadEvaluator("embedding_distance", {
llm: new ChatOpenAI({ model: "gpt-4o-mini" }),
embedding,
});

Expand Down
30 changes: 0 additions & 30 deletions internal/build/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,36 +44,6 @@ export const extraImportMapEntries: Record<
}[]
> = {
langchain: [
{
modules: ["ChatOpenAI"],
alias: ["chat_models", "openai"],
path: "@langchain/openai",
},
{
modules: ["AzureChatOpenAI"],
alias: ["chat_models", "azure_openai"],
path: "@langchain/openai",
},
{
modules: ["OpenAI"],
alias: ["llms", "openai"],
path: "@langchain/openai",
},
{
modules: ["AzureOpenAI"],
alias: ["llms", "azure_openai"],
path: "@langchain/openai",
},
{
modules: ["OpenAIEmbeddings"],
alias: ["embeddings", "openai"],
path: "@langchain/openai",
},
{
modules: ["AzureOpenAIEmbeddings"],
alias: ["embeddings", "azure_openai"],
path: "@langchain/openai",
},
{
modules: ["PromptTemplate"],
alias: ["prompts", "prompt"],
Expand Down
2 changes: 1 addition & 1 deletion libs/langchain/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
"@langchain/cohere": "workspace:*",
"@langchain/core": "workspace:*",
"@langchain/eslint": "workspace:*",
"@langchain/openai": "workspace:*",
"@tsconfig/recommended": "^1.0.2",
"@types/js-yaml": "^4",
"@types/jsdom": "^21.1.1",
Expand Down Expand Up @@ -77,7 +78,6 @@
}
},
"dependencies": {
"@langchain/openai": "workspace:*",
"@langchain/textsplitters": "workspace:*",
"@langchain/langgraph": "^1.0.0-alpha",
"@langchain/langgraph-checkpoint": "^0.1.1",
Expand Down
1 change: 0 additions & 1 deletion libs/langchain/src/chains/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ export type {
SerializedVectorDBQAChain,
SerializedRefineDocumentsChain,
} from "./serde.js";
export { OpenAIModerationChain } from "./openai_moderation.js";
export {
MultiRouteChain,
type MultiRouteChainInput,
Expand Down
5 changes: 0 additions & 5 deletions libs/langchain/src/chains/openai_functions/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,6 @@ export {
createTaggingChainFromZod,
} from "./tagging.js";
export { type OpenAPIChainOptions, createOpenAPIChain } from "./openapi.js";
export {
type StructuredOutputChainInput,
createStructuredOutputChain,
createStructuredOutputChainFromZod,
} from "./structured_output.js";
export {
type CreateStructuredOutputRunnableConfig,
createStructuredOutputRunnable,
Expand Down
8 changes: 6 additions & 2 deletions libs/langchain/src/chains/openai_functions/openapi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import {
import type { OpenAPIV3_1 } from "openapi-types";

import { ChainValues } from "@langchain/core/utils/types";
import { ChatOpenAI } from "@langchain/openai";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
import {
Expand Down Expand Up @@ -482,8 +481,13 @@ export async function createOpenAPIChain(
`Could not parse any valid operations from the provided spec.`
);
}

if (!options.llm) {
throw new Error("`llm` option is required");
}

const {
llm = new ChatOpenAI({ model: "gpt-3.5-turbo-0613" }),
llm = options.llm,
prompt = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(
"Use the provided API's to respond to this user query:\n\n{query}"
Expand Down
Loading
Loading