Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions codex-rs/app-server-protocol/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,10 @@ pub struct NewConversationParams {
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,

/// Optional override for the review model name (e.g. "gpt-4o-mini").
#[serde(skip_serializing_if = "Option::is_none")]
pub review_model: Option<String>,

/// Configuration profile from config.toml to specify default options.
#[serde(skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
Expand Down Expand Up @@ -498,6 +502,7 @@ pub struct UserSavedConfig {
#[serde(rename_all = "camelCase")]
pub struct Profile {
pub model: Option<String>,
pub review_model: Option<String>,
/// The key in the `model_providers` map identifying the
/// [`ModelProviderInfo`] to use.
pub model_provider: Option<String>,
Expand Down Expand Up @@ -843,6 +848,7 @@ mod tests {
base_instructions: None,
include_plan_tool: None,
include_apply_patch_tool: None,
review_model: None,
},
};
assert_eq!(
Expand Down
3 changes: 2 additions & 1 deletion codex-rs/app-server/src/codex_message_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1327,6 +1327,7 @@ async fn derive_config_from_params(
) -> std::io::Result<Config> {
let NewConversationParams {
model,
review_model,
profile,
cwd,
approval_policy,
Expand All @@ -1338,7 +1339,7 @@ async fn derive_config_from_params(
} = params;
let overrides = ConfigOverrides {
model,
review_model: None,
review_model,
config_profile: profile,
cwd: cwd.map(PathBuf::from),
approval_policy,
Expand Down
2 changes: 2 additions & 0 deletions codex-rs/app-server/tests/suite/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ view_image = true

[profiles.test]
model = "gpt-4o"
review_model = "gpt-4o-mini"
approval_policy = "on-request"
model_reasoning_effort = "high"
model_reasoning_summary = "detailed"
Expand Down Expand Up @@ -105,6 +106,7 @@ async fn get_config_toml_parses_all_fields() {
"test".into(),
Profile {
model: Some("gpt-4o".into()),
review_model: Some("gpt-4o-mini".into()),
approval_policy: Some(AskForApproval::OnRequest),
model_reasoning_effort: Some(ReasoningEffort::High),
model_reasoning_summary: Some(ReasoningSummary::Detailed),
Expand Down
1 change: 1 addition & 0 deletions codex-rs/core/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1122,6 +1122,7 @@ impl Config {

// Default review model when not set in config; allow CLI override to take precedence.
let review_model = override_review_model
.or(config_profile.review_model)
.or(cfg.review_model)
.unwrap_or_else(default_review_model);

Expand Down
2 changes: 2 additions & 0 deletions codex-rs/core/src/config_profile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use codex_protocol::config_types::Verbosity;
#[derive(Debug, Clone, Default, PartialEq, Deserialize)]
pub struct ConfigProfile {
pub model: Option<String>,
pub review_model: Option<String>,
/// The key in the `model_providers` map identifying the
/// [`ModelProviderInfo`] to use.
pub model_provider: Option<String>,
Expand Down Expand Up @@ -38,6 +39,7 @@ impl From<ConfigProfile> for codex_app_server_protocol::Profile {
fn from(config_profile: ConfigProfile) -> Self {
Self {
model: config_profile.model,
review_model: config_profile.review_model,
model_provider: config_profile.model_provider,
approval_policy: config_profile.approval_policy,
model_reasoning_effort: config_profile.model_reasoning_effort,
Expand Down
97 changes: 97 additions & 0 deletions codex-rs/core/tests/suite/review.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ use codex_core::REVIEW_PROMPT;
use codex_core::ResponseItem;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::load_config_as_toml_with_cli_overrides;
use codex_core::protocol::ConversationPathResponseEvent;
use codex_core::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG;
use codex_core::protocol::EventMsg;
Expand Down Expand Up @@ -338,6 +340,101 @@ async fn review_uses_custom_review_model_from_config() {
server.verify().await;
}

/// Ensure that when a custom `review_model` is set in a profile, the review
/// request uses that model (and not the main chat model or top-level config value).
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn review_uses_custom_review_model_from_profile() {
skip_if_no_network!();

// Minimal stream: just a completed event
let sse_raw = r#"[
{"type":"response.completed", "response": {"id": "__ID__"}}
]"#;
let server = start_responses_server_with_sse(sse_raw, 1).await;
let codex_home = TempDir::new().unwrap();

// Create a config.toml file with a profile that has a custom review_model
let config_path = codex_home.path().join("config.toml");
std::fs::write(
&config_path,
r#"
model = "gpt-4.1"
review_model = "gpt-5-codex"

[profiles.test-profile]
model = "gpt-5"
review_model = "gpt-4.1"
"#,
)
.unwrap();

// Load the config from the file with profile override
let config_toml = load_config_as_toml_with_cli_overrides(
codex_home.path(),
vec![(
"config_profile".to_string(),
toml::Value::String("test-profile".to_string()),
)],
)
.await
.expect("load config with profile");

let mut config = Config::load_from_base_config_with_overrides(
config_toml,
ConfigOverrides {
config_profile: Some("test-profile".to_string()),
..Default::default()
},
codex_home.path().to_path_buf(),
)
.expect("create config from loaded toml");

// Override the model provider to use our test server
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
config.model_provider = model_provider;

let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create conversation")
.conversation;

codex
.submit(Op::Review {
review_request: ReviewRequest {
prompt: "use profile review model".to_string(),
user_facing_hint: "use profile review model".to_string(),
},
})
.await
.unwrap();

// Wait for completion
let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await;
let _closed = wait_for_event(&codex, |ev| {
matches!(
ev,
EventMsg::ExitedReviewMode(ExitedReviewModeEvent {
review_output: None
})
)
})
.await;
let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;

// Assert the request body model equals the profile's review model
let request = &server.received_requests().await.unwrap()[0];
let body = request.body_json::<serde_json::Value>().unwrap();
assert_eq!(body["model"].as_str().unwrap(), "gpt-4.1");

server.verify().await;
}

/// When a review session begins, it must not prepend prior chat history from
/// the parent session. The request `input` should contain only the review
/// prompt from the user.
Expand Down
4 changes: 4 additions & 0 deletions codex-rs/exec/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ pub struct Cli {
#[arg(long, short = 'm')]
pub model: Option<String>,

/// Model to use for review sessions.
#[arg(long = "review-model")]
pub review_model: Option<String>,

#[arg(long = "oss", default_value_t = false)]
pub oss: bool,

Expand Down
3 changes: 2 additions & 1 deletion codex-rs/exec/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
command,
images,
model: model_cli_arg,
review_model,
oss,
config_profile,
full_auto,
Expand Down Expand Up @@ -167,7 +168,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
// Load configuration and determine approval policy
let overrides = ConfigOverrides {
model,
review_model: None,
review_model,
config_profile,
// Default to never ask for approvals in headless mode. Feature flags can override.
approval_policy: Some(AskForApproval::Never),
Expand Down
11 changes: 10 additions & 1 deletion codex-rs/mcp-server/src/codex_tool_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ pub struct CodexToolCallParam {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model: Option<String>,

/// Optional override for the review model name (e.g. "gpt-4o-mini").
#[serde(default, skip_serializing_if = "Option::is_none")]
pub review_model: Option<String>,

/// Configuration profile from config.toml to specify default options.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
Expand Down Expand Up @@ -139,6 +143,7 @@ impl CodexToolCallParam {
let Self {
prompt,
model,
review_model,
profile,
cwd,
approval_policy,
Expand All @@ -151,7 +156,7 @@ impl CodexToolCallParam {
// Build the `ConfigOverrides` recognized by codex-core.
let overrides = codex_core::config::ConfigOverrides {
model,
review_model: None,
review_model,
config_profile: profile,
cwd: cwd.map(PathBuf::from),
approval_policy: approval_policy.map(Into::into),
Expand Down Expand Up @@ -288,6 +293,10 @@ mod tests {
"description": "Configuration profile from config.toml to specify default options.",
"type": "string"
},
"review-model": {
"description": "Optional override for the review model name (e.g. \"gpt-4o-mini\").",
"type": "string"
},
"prompt": {
"description": "The *initial user prompt* to start the Codex conversation.",
"type": "string"
Expand Down
4 changes: 4 additions & 0 deletions codex-rs/tui/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ pub struct Cli {
#[arg(long, short = 'm')]
pub model: Option<String>,

/// Model to use for review sessions.
#[arg(long = "review-model")]
pub review_model: Option<String>,

/// Convenience flag to select the local open source model provider.
/// Equivalent to -c model_provider=oss; verifies a local Ollama server is
/// running.
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/tui/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ pub async fn run_main(

let overrides = ConfigOverrides {
model,
review_model: None,
review_model: cli.review_model.clone(),
approval_policy,
sandbox_mode,
cwd,
Expand Down
22 changes: 22 additions & 0 deletions docs/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,27 @@ The model that Codex should use.
model = "o3" # overrides the default of "gpt-5-codex"
```

## review_model

Specifies the model to use for `/review` sessions. This allows you to use a different model for code reviews than your main chat model, optimizing for speed or specialized review capabilities.

Defaults to `"gpt-5-codex"` if not specified.

Example:

```toml
model = "gpt-5"
review_model = "gpt-5-codex" # Use faster model for reviews
```

This can also be set per-profile:

```toml
[profiles.fast-review]
model = "gpt-5"
review_model = "gpt-4.1" # Use different review model in this profile
```

## model_providers

This option lets you override and amend the default set of model providers bundled with Codex. This value is a map where the key is the value to use with `model_provider` to select the corresponding provider.
Expand Down Expand Up @@ -781,6 +802,7 @@ notifications = [ "agent-turn-complete", "approval-requested" ]
| Key | Type / Values | Notes |
| ------------------------------------------------ | ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `model` | string | Model to use (e.g., `gpt-5-codex`). |
| `review_model` | string | Model to use for review sessions (e.g., `gpt-5-codex`). |
| `model_provider` | string | Provider id from `model_providers` (default: `openai`). |
| `model_context_window` | number | Context window tokens. |
| `model_max_output_tokens` | number | Max output tokens. |
Expand Down