Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@ Cargo.lock

# Ignore files generated by text_to_speech example
*.mp3
.env
rustc-ice*
17 changes: 13 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ regex = "1.10.4"
log = "0.4.21"
html-escape = "0.2.13"
reqwest-eventsource = "0.6.0"
async-openai = "0.28.1"
mockito = "1.4.0"
tiktoken-rs = "0.5.8"
sqlx = { version = "0.8.0", default-features = false, features = [
Expand All @@ -43,8 +42,8 @@ surrealdb = { version = "2.0.2", optional = true, default-features = false }
csv = "1.3.0"
urlencoding = "2.1.3"
lopdf = { version = "0.34.0", features = ["nom_parser"], optional = true }
pdf-extract = { version = "0.7.8", optional = true }
thiserror = "2.0.0"
pdf-extract = { version = "0.7.8", optional = true }
thiserror = "2.*"
futures-util = "0.3.30"
async-stream = "0.3.5"
tokio-stream = "0.1.15"
Expand Down Expand Up @@ -84,7 +83,11 @@ ollama-rs = { version = "0.2.0", optional = true, features = [
"chat-history",
] }
mistralai-client = { version = "0.14.0", optional = true }

dotenvy = "0.15.7"
tracing-subscriber = "0.3.19"
tracing = "0.1.41"
byte_string = "1.0.0"
async-openai = { path = "../async-openai/async-openai" }

[features]
default = []
Expand Down Expand Up @@ -124,3 +127,9 @@ testcontainers = "0.23"

[build-dependencies]
cc = { version = "1", optional = true }

[profile.dev]
lto = false
opt-level = 0
incremental = true
codegen-units = 256
46 changes: 46 additions & 0 deletions examples/nanogpt_models.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
use langchain_rust::llm::nanogpt::*;
use dotenvy::dotenv;
use tracing::Level;
use tracing_subscriber::FmtSubscriber;

#[tokio::main]
async fn main() {
dotenv().expect(".env file not found");

let subscriber = FmtSubscriber::builder()
.with_max_level(Level::INFO)
.finish();
tracing::subscriber::set_global_default(subscriber).unwrap();

let nano_config = OpenAIConfig::new()
.with_api_base("https://nano-gpt.com/api/v1")
.with_api_key(dotenvy::var("NANOGPT_KEY").expect("NANOGPT_KEY not set"));

let client = NanoGPT::default().with_config(nano_config);

// Basic model listing
let basic_models = client.get_models(false)
.await
.unwrap_or_else(|e| panic!("Error fetching models: {:?}", e));

println!("\nBasic Models:");
for model in &basic_models.data {
println!("- {} ({})", model.id, model.owned_by);
}

// Detailed model listing with pricing
let detailed_models = client.get_models(true)
.await
.unwrap_or_else(|e| panic!("Error fetching detailed models: {:?}", e));

println!("\nDetailed Models:");
for model in detailed_models.data {
println!("Model: {}", model.name.unwrap_or("Unnamed".into()));
println!("ID: {}", model.id);
if let Some(pricing) = model.pricing {
println!("Pricing: ${}/M tokens (input), ${}/M tokens (output)",
pricing.prompt, pricing.completion);
}
println!("---");
}
}
67 changes: 67 additions & 0 deletions examples/nanogpt_stream.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
use futures::StreamExt;
use langchain_rust::{
chain::{Chain, LLMChainBuilder},
fmt_message, fmt_template,
message_formatter,
prompt::HumanMessagePromptTemplate,
prompt_args,
schemas::messages::Message,
template_fstring,
};

use langchain_rust::llm::nanogpt::*;

use dotenvy::{self, dotenv};
use tracing::Level;
use tracing_subscriber::FmtSubscriber;

#[tokio::main]
async fn main() {
dotenv().expect(".env file not found");
// Serving as an exmaple of using non major LLM provider, tracing logs are incredibly useful here.

let subscriber = FmtSubscriber::builder()
.with_max_level(Level::TRACE)
.with_file(true)
.with_line_number(true)
.finish();

tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed");

let nano_config = OpenAIConfig::new()
.with_api_base("https://nano-gpt.com/api/v1")
.with_api_key(dotenvy::var("NANOGPT_KEY").expect("provide key"));

let open_ai = NanoGPT::default()
.with_config(nano_config)
.with_model("deepseek-ai/DeepSeek-V3.1:thinking");

let prompt = message_formatter![
fmt_message!(Message::new_system_message(
"You excel at metacognition. You are a true philosopher, becacuse you are a piece of consciousness with no ego"
)),
fmt_template!(HumanMessagePromptTemplate::new(template_fstring!(
"{input}", "input"
)))
];

let chain = LLMChainBuilder::new()
.prompt(prompt)
.llm(open_ai.clone())
.build()
.unwrap();

let mut stream = chain
.stream(prompt_args! {
"input" => "Contemplate the intricate web of causality and how that affects one's being, over and over",
})
.await
.unwrap();

while let Some(result) = stream.next().await {
match result {
Ok(value) => value.to_stdout().unwrap(),
Err(e) => println!("protocol non compliance: {:?}", e),
}
}
}
2 changes: 2 additions & 0 deletions src/document_loaders/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,5 @@ pub use dir_loader::*;
mod source_code_loader;
#[cfg(feature = "tree-sitter")]
pub use source_code_loader::*;

pub use dotenvy;
4 changes: 2 additions & 2 deletions src/embedding/openai/openai_embedder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ pub struct OpenAiEmbedder<C: Config> {
model: String,
}

impl<C: Config + Send + Sync + 'static> Into<Box<dyn Embedder>> for OpenAiEmbedder<C> {
impl<C: Config + Send + Sync + 'static + Clone> Into<Box<dyn Embedder>> for OpenAiEmbedder<C> {
fn into(self) -> Box<dyn Embedder> {
Box::new(self)
}
Expand Down Expand Up @@ -46,7 +46,7 @@ impl Default for OpenAiEmbedder<OpenAIConfig> {
}

#[async_trait]
impl<C: Config + Send + Sync> Embedder for OpenAiEmbedder<C> {
impl<C: Config + Send + Sync + Clone> Embedder for OpenAiEmbedder<C> {
async fn embed_documents(&self, documents: &[String]) -> Result<Vec<Vec<f64>>, EmbedderError> {
let client = Client::with_config(self.config.clone());

Expand Down
5 changes: 4 additions & 1 deletion src/language_models/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use serde_json::Error as SerdeJsonError;
use thiserror::Error;
use tokio::time::error::Elapsed;

use crate::llm::{AnthropicError, DeepseekError, QwenError};
use crate::llm::{nanogpt::NanoGPTResponse, AnthropicError, DeepseekError, QwenError};

#[derive(Error, Debug)]
pub enum LLMError {
Expand Down Expand Up @@ -49,4 +49,7 @@ pub enum LLMError {

#[error("Error: {0}")]
OtherError(String),

#[error("NanoGPT error")]
NanoGPTError(NanoGPTResponse),
}
2 changes: 2 additions & 0 deletions src/llm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,5 @@ pub use qwen::*;

pub mod deepseek;
pub use deepseek::*;

pub mod nanogpt;
Loading