Compare commits

..

2 Commits

Author SHA1 Message Date
Julia Ryan
cee726f86b wip 2025-08-13 20:58:59 -07:00
Max Brunsfeld
d8b791d3a6 Start work on introducing RelPath type 2025-08-11 19:25:24 -07:00
337 changed files with 4947 additions and 15626 deletions

View File

@@ -1,35 +0,0 @@
name: Bug Report (Windows Alpha)
description: Zed Windows Alpha Related Bugs
type: "Bug"
labels: ["windows"]
title: "Windows Alpha: <a short description of the Windows bug>"
body:
- type: textarea
attributes:
label: Summary
description: Describe the bug with a one-line summary, and provide detailed reproduction steps
value: |
<!-- Please insert a one-line summary of the issue below -->
SUMMARY_SENTENCE_HERE
### Description
<!-- Describe with sufficient detail to reproduce from a clean Zed install. -->
Steps to trigger the problem:
1.
2.
3.
**Expected Behavior**:
**Actual Behavior**:
validations:
required: true
- type: textarea
id: environment
attributes:
label: Zed Version and System Specs
description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"'
placeholder: |
Output of "zed: copy system specs into clipboard"
validations:
required: true

View File

@@ -20,168 +20,7 @@ runs:
with:
node-version: "18"
- name: Configure crash dumps
shell: powershell
run: |
# Record the start time for this CI run
$runStartTime = Get-Date
$runStartTimeStr = $runStartTime.ToString("yyyy-MM-dd HH:mm:ss")
Write-Host "CI run started at: $runStartTimeStr"
# Save the timestamp for later use
echo "CI_RUN_START_TIME=$($runStartTime.Ticks)" >> $env:GITHUB_ENV
# Create crash dump directory in workspace (non-persistent)
$dumpPath = "$env:GITHUB_WORKSPACE\crash_dumps"
New-Item -ItemType Directory -Force -Path $dumpPath | Out-Null
Write-Host "Setting up crash dump detection..."
Write-Host "Workspace dump path: $dumpPath"
# Note: We're NOT modifying registry on stateful runners
# Instead, we'll check default Windows crash locations after tests
- name: Run tests
shell: powershell
working-directory: ${{ inputs.working-directory }}
run: |
$env:RUST_BACKTRACE = "full"
# Enable Windows debugging features
$env:_NT_SYMBOL_PATH = "srv*https://msdl.microsoft.com/download/symbols"
# .NET crash dump environment variables (ephemeral)
$env:COMPlus_DbgEnableMiniDump = "1"
$env:COMPlus_DbgMiniDumpType = "4"
$env:COMPlus_CreateDumpDiagnostics = "1"
cargo nextest run --workspace --no-fail-fast
continue-on-error: true
- name: Analyze crash dumps
if: always()
shell: powershell
run: |
Write-Host "Checking for crash dumps..."
# Get the CI run start time from the environment
$runStartTime = [DateTime]::new([long]$env:CI_RUN_START_TIME)
Write-Host "Only analyzing dumps created after: $($runStartTime.ToString('yyyy-MM-dd HH:mm:ss'))"
# Check all possible crash dump locations
$searchPaths = @(
"$env:GITHUB_WORKSPACE\crash_dumps",
"$env:LOCALAPPDATA\CrashDumps",
"$env:TEMP",
"$env:GITHUB_WORKSPACE",
"$env:USERPROFILE\AppData\Local\CrashDumps",
"C:\Windows\System32\config\systemprofile\AppData\Local\CrashDumps"
)
$dumps = @()
foreach ($path in $searchPaths) {
if (Test-Path $path) {
Write-Host "Searching in: $path"
$found = Get-ChildItem "$path\*.dmp" -ErrorAction SilentlyContinue | Where-Object {
$_.CreationTime -gt $runStartTime
}
if ($found) {
$dumps += $found
Write-Host " Found $($found.Count) dump(s) from this CI run"
}
}
}
if ($dumps) {
Write-Host "Found $($dumps.Count) crash dump(s)"
# Install debugging tools if not present
$cdbPath = "C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\cdb.exe"
if (-not (Test-Path $cdbPath)) {
Write-Host "Installing Windows Debugging Tools..."
$url = "https://go.microsoft.com/fwlink/?linkid=2237387"
Invoke-WebRequest -Uri $url -OutFile winsdksetup.exe
Start-Process -Wait winsdksetup.exe -ArgumentList "/features OptionId.WindowsDesktopDebuggers /quiet"
}
foreach ($dump in $dumps) {
Write-Host "`n=================================="
Write-Host "Analyzing crash dump: $($dump.Name)"
Write-Host "Size: $([math]::Round($dump.Length / 1MB, 2)) MB"
Write-Host "Time: $($dump.CreationTime)"
Write-Host "=================================="
# Set symbol path
$env:_NT_SYMBOL_PATH = "srv*C:\symbols*https://msdl.microsoft.com/download/symbols"
# Run analysis
$analysisOutput = & $cdbPath -z $dump.FullName -c "!analyze -v; ~*k; lm; q" 2>&1 | Out-String
# Extract key information
if ($analysisOutput -match "ExceptionCode:\s*([\w]+)") {
Write-Host "Exception Code: $($Matches[1])"
if ($Matches[1] -eq "c0000005") {
Write-Host "Exception Type: ACCESS VIOLATION"
}
}
if ($analysisOutput -match "EXCEPTION_RECORD:\s*(.+)") {
Write-Host "Exception Record: $($Matches[1])"
}
if ($analysisOutput -match "FAULTING_IP:\s*\n(.+)") {
Write-Host "Faulting Instruction: $($Matches[1])"
}
# Save full analysis
$analysisFile = "$($dump.FullName).analysis.txt"
$analysisOutput | Out-File -FilePath $analysisFile
Write-Host "`nFull analysis saved to: $analysisFile"
# Print stack trace section
Write-Host "`n--- Stack Trace Preview ---"
$stackSection = $analysisOutput -split "STACK_TEXT:" | Select-Object -Last 1
$stackLines = $stackSection -split "`n" | Select-Object -First 20
$stackLines | ForEach-Object { Write-Host $_ }
Write-Host "--- End Stack Trace Preview ---"
}
Write-Host "`n⚠ Crash dumps detected! Download the 'crash-dumps' artifact for detailed analysis."
# Copy dumps to workspace for artifact upload
$artifactPath = "$env:GITHUB_WORKSPACE\crash_dumps_collected"
New-Item -ItemType Directory -Force -Path $artifactPath | Out-Null
foreach ($dump in $dumps) {
$destName = "$($dump.Directory.Name)_$($dump.Name)"
Copy-Item $dump.FullName -Destination "$artifactPath\$destName"
if (Test-Path "$($dump.FullName).analysis.txt") {
Copy-Item "$($dump.FullName).analysis.txt" -Destination "$artifactPath\$destName.analysis.txt"
}
}
Write-Host "Copied $($dumps.Count) dump(s) to artifact directory"
} else {
Write-Host "No crash dumps from this CI run found"
}
- name: Upload crash dumps
if: always()
uses: actions/upload-artifact@v4
with:
name: crash-dumps-${{ github.run_id }}-${{ github.run_attempt }}
path: |
crash_dumps_collected/*.dmp
crash_dumps_collected/*.txt
if-no-files-found: ignore
retention-days: 7
- name: Check test results
shell: powershell
working-directory: ${{ inputs.working-directory }}
run: |
# Re-check test results to fail the job if tests failed
if ($LASTEXITCODE -ne 0) {
Write-Host "Tests failed with exit code: $LASTEXITCODE"
exit $LASTEXITCODE
}
run: cargo nextest run --workspace --no-fail-fast

View File

@@ -526,11 +526,6 @@ jobs:
with:
node-version: "18"
- name: Setup Sentry CLI
uses: matbour/setup-sentry-cli@3e938c54b3018bdd019973689ef984e033b0454b #v2
with:
token: ${{ SECRETS.SENTRY_AUTH_TOKEN }}
- name: Checkout repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
@@ -616,11 +611,6 @@ jobs:
- name: Install Linux dependencies
run: ./script/linux && ./script/install-mold 2.34.0
- name: Setup Sentry CLI
uses: matbour/setup-sentry-cli@3e938c54b3018bdd019973689ef984e033b0454b #v2
with:
token: ${{ SECRETS.SENTRY_AUTH_TOKEN }}
- name: Determine version and release channel
if: startsWith(github.ref, 'refs/tags/v')
run: |
@@ -674,11 +664,6 @@ jobs:
- name: Install Linux dependencies
run: ./script/linux
- name: Setup Sentry CLI
uses: matbour/setup-sentry-cli@3e938c54b3018bdd019973689ef984e033b0454b #v2
with:
token: ${{ SECRETS.SENTRY_AUTH_TOKEN }}
- name: Determine version and release channel
if: startsWith(github.ref, 'refs/tags/v')
run: |
@@ -718,7 +703,7 @@ jobs:
timeout-minutes: 60
runs-on: github-8vcpu-ubuntu-2404
if: |
false && ( startsWith(github.ref, 'refs/tags/v')
( startsWith(github.ref, 'refs/tags/v')
|| contains(github.event.pull_request.labels.*.name, 'run-bundling') )
needs: [linux_tests]
name: Build Zed on FreeBSD
@@ -804,11 +789,6 @@ jobs:
with:
clean: false
- name: Setup Sentry CLI
uses: matbour/setup-sentry-cli@3e938c54b3018bdd019973689ef984e033b0454b #v2
with:
token: ${{ SECRETS.SENTRY_AUTH_TOKEN }}
- name: Determine version and release channel
working-directory: ${{ env.ZED_WORKSPACE }}
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
@@ -851,12 +831,3 @@ jobs:
run: gh release edit "$GITHUB_REF_NAME" --draft=false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create Sentry release
uses: getsentry/action-release@526942b68292201ac6bbb99b9a0747d4abee354c # v3
env:
SENTRY_ORG: zed-dev
SENTRY_PROJECT: zed
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
with:
environment: production

View File

@@ -316,12 +316,3 @@ jobs:
git config user.email github-actions@github.com
git tag -f nightly
git push origin nightly --force
- name: Create Sentry release
uses: getsentry/action-release@526942b68292201ac6bbb99b9a0747d4abee354c # v3
env:
SENTRY_ORG: zed-dev
SENTRY_PROJECT: zed
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
with:
environment: production

View File

@@ -3,7 +3,7 @@ name: Run Unit Evals
on:
schedule:
# GitHub might drop jobs at busy times, so we choose a random time in the middle of the night.
- cron: "47 1 * * 2"
- cron: "47 1 * * *"
workflow_dispatch:
concurrency:

104
Cargo.lock generated
View File

@@ -6,65 +6,32 @@ version = 4
name = "acp_thread"
version = "0.1.0"
dependencies = [
"action_log",
"agent",
"agent-client-protocol",
"anyhow",
"assistant_tool",
"buffer_diff",
"collections",
"editor",
"env_logger 0.11.8",
"file_icons",
"futures 0.3.31",
"gpui",
"indoc",
"itertools 0.14.0",
"language",
"language_model",
"markdown",
"parking_lot",
"project",
"prompt_store",
"rand 0.8.5",
"serde",
"serde_json",
"settings",
"smol",
"tempfile",
"terminal",
"ui",
"url",
"util",
"uuid",
"watch",
"workspace-hack",
]
[[package]]
name = "action_log"
version = "0.1.0"
dependencies = [
"anyhow",
"buffer_diff",
"clock",
"collections",
"ctor",
"futures 0.3.31",
"gpui",
"indoc",
"language",
"log",
"pretty_assertions",
"project",
"rand 0.8.5",
"serde_json",
"settings",
"text",
"util",
"watch",
"workspace-hack",
"zlog",
]
[[package]]
name = "activity_indicator"
version = "0.1.0"
@@ -117,7 +84,6 @@ dependencies = [
name = "agent"
version = "0.1.0"
dependencies = [
"action_log",
"agent_settings",
"anyhow",
"assistant_context",
@@ -190,29 +156,23 @@ name = "agent2"
version = "0.1.0"
dependencies = [
"acp_thread",
"action_log",
"agent-client-protocol",
"agent_servers",
"agent_settings",
"anyhow",
"assistant_tool",
"assistant_tools",
"chrono",
"client",
"clock",
"cloud_llm_client",
"collections",
"context_server",
"ctor",
"editor",
"env_logger 0.11.8",
"fs",
"futures 0.3.31",
"gpui",
"gpui_tokio",
"handlebars 4.5.0",
"html_to_markdown",
"http_client",
"indoc",
"itertools 0.14.0",
"language",
@@ -220,9 +180,7 @@ dependencies = [
"language_models",
"log",
"lsp",
"open",
"paths",
"portable-pty",
"pretty_assertions",
"project",
"prompt_store",
@@ -233,22 +191,12 @@ dependencies = [
"serde_json",
"settings",
"smol",
"task",
"tempfile",
"terminal",
"text",
"theme",
"tree-sitter-rust",
"ui",
"unindent",
"util",
"uuid",
"watch",
"web_search",
"which 6.0.3",
"workspace-hack",
"worktree",
"zlog",
]
[[package]]
@@ -313,7 +261,6 @@ name = "agent_ui"
version = "0.1.0"
dependencies = [
"acp_thread",
"action_log",
"agent",
"agent-client-protocol",
"agent2",
@@ -895,13 +842,13 @@ dependencies = [
name = "assistant_tool"
version = "0.1.0"
dependencies = [
"action_log",
"anyhow",
"buffer_diff",
"clock",
"collections",
"ctor",
"derive_more 0.99.19",
"futures 0.3.31",
"gpui",
"icons",
"indoc",
@@ -918,6 +865,7 @@ dependencies = [
"settings",
"text",
"util",
"watch",
"workspace",
"workspace-hack",
"zlog",
@@ -927,7 +875,6 @@ dependencies = [
name = "assistant_tools"
version = "0.1.0"
dependencies = [
"action_log",
"agent_settings",
"anyhow",
"assistant_tool",
@@ -4067,7 +4014,6 @@ dependencies = [
"log",
"minidumper",
"paths",
"release_channel",
"smol",
"workspace-hack",
]
@@ -6450,7 +6396,6 @@ dependencies = [
"log",
"parking_lot",
"pretty_assertions",
"rand 0.8.5",
"regex",
"rope",
"schemars",
@@ -11155,13 +11100,14 @@ dependencies = [
"ai_onboarding",
"anyhow",
"client",
"command_palette_hooks",
"component",
"db",
"documented",
"editor",
"feature_flags",
"fs",
"fuzzy",
"git",
"gpui",
"itertools 0.14.0",
"language",
@@ -11173,7 +11119,6 @@ dependencies = [
"schemars",
"serde",
"settings",
"telemetry",
"theme",
"ui",
"util",
@@ -11249,7 +11194,6 @@ dependencies = [
"anyhow",
"futures 0.3.31",
"http_client",
"log",
"schemars",
"serde",
"serde_json",
@@ -12927,6 +12871,7 @@ dependencies = [
"prost-build 0.9.0",
"serde",
"typed-path",
"util",
"workspace-hack",
]
@@ -13578,7 +13523,6 @@ dependencies = [
name = "remote_server"
version = "0.1.0"
dependencies = [
"action_log",
"anyhow",
"askpass",
"assistant_tool",
@@ -18035,7 +17979,6 @@ dependencies = [
"command_palette_hooks",
"db",
"editor",
"env_logger 0.11.8",
"futures 0.3.31",
"git_ui",
"gpui",
@@ -18889,6 +18832,33 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082"
[[package]]
name = "welcome"
version = "0.1.0"
dependencies = [
"anyhow",
"client",
"component",
"db",
"documented",
"editor",
"fuzzy",
"gpui",
"install_cli",
"language",
"picker",
"project",
"serde",
"settings",
"telemetry",
"ui",
"util",
"vim_mode_setting",
"workspace",
"workspace-hack",
"zed_actions",
]
[[package]]
name = "which"
version = "4.4.2"
@@ -20503,7 +20473,7 @@ dependencies = [
[[package]]
name = "zed"
version = "0.201.0"
version = "0.200.0"
dependencies = [
"activity_indicator",
"agent",
@@ -20643,6 +20613,7 @@ dependencies = [
"watch",
"web_search",
"web_search_providers",
"welcome",
"windows 0.61.1",
"winresource",
"workspace",
@@ -20666,7 +20637,7 @@ dependencies = [
[[package]]
name = "zed_emmet"
version = "0.0.6"
version = "0.0.4"
dependencies = [
"zed_extension_api 0.1.0",
]
@@ -20905,7 +20876,6 @@ dependencies = [
"menu",
"postage",
"project",
"rand 0.8.5",
"regex",
"release_channel",
"reqwest_client",

View File

@@ -2,7 +2,6 @@
resolver = "2"
members = [
"crates/acp_thread",
"crates/action_log",
"crates/activity_indicator",
"crates/agent",
"crates/agent2",
@@ -185,6 +184,7 @@ members = [
"crates/watch",
"crates/web_search",
"crates/web_search_providers",
"crates/welcome",
"crates/workspace",
"crates/worktree",
"crates/x_ai",
@@ -229,7 +229,6 @@ edition = "2024"
#
acp_thread = { path = "crates/acp_thread" }
action_log = { path = "crates/action_log" }
agent = { path = "crates/agent" }
agent2 = { path = "crates/agent2" }
activity_indicator = { path = "crates/activity_indicator" }
@@ -411,6 +410,7 @@ vim_mode_setting = { path = "crates/vim_mode_setting" }
watch = { path = "crates/watch" }
web_search = { path = "crates/web_search" }
web_search_providers = { path = "crates/web_search_providers" }
welcome = { path = "crates/welcome" }
workspace = { path = "crates/workspace" }
worktree = { path = "crates/worktree" }
x_ai = { path = "crates/x_ai" }
@@ -564,7 +564,6 @@ reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "951c77
"socks",
"stream",
] }
rodio = { version = "0.21.1", default-features = false }
rsa = "0.9.6"
runtimelib = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734", default-features = false, features = [
"async-dispatcher-runtime",
@@ -713,7 +712,6 @@ features = [
"Win32_System_LibraryLoader",
"Win32_System_Memory",
"Win32_System_Ole",
"Win32_System_Performance",
"Win32_System_Pipes",
"Win32_System_SystemInformation",
"Win32_System_SystemServices",
@@ -841,7 +839,6 @@ style = { level = "allow", priority = -1 }
module_inception = { level = "deny" }
question_mark = { level = "deny" }
redundant_closure = { level = "deny" }
declare_interior_mutable_const = { level = "deny" }
# Individual rules that have violations in the codebase:
type_complexity = "allow"
# We often return trait objects from `new` functions.

View File

@@ -1,6 +1,6 @@
# syntax = docker/dockerfile:1.2
FROM rust:1.89-bookworm as builder
FROM rust:1.88-bookworm as builder
WORKDIR app
COPY . .

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,9 +1,8 @@
Copyright 2019 The Lilex Project Authors (https://github.com/mishamyrt/Lilex)
Copyright © 2017 IBM Corp. with Reserved Font Name "Plex"
This Font Software is licensed under the SIL Open Font License, Version 1.1.
This license is copied below, and is also available with a FAQ at:
https://scripts.sil.org/OFL
http://scripts.sil.org/OFL
-----------------------------------------------------------
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
@@ -90,4 +89,4 @@ COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
OTHER DEALINGS IN THE FONT SOFTWARE.
OTHER DEALINGS IN THE FONT SOFTWARE.

View File

@@ -1,6 +1,6 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M12.8989 5.77778L11.6434 4.52222C10.6384 3.55068 9.29673 3.00526 7.89893 3C6.57285 3 5.30103 3.52678 4.36343 4.46447C3.78887 5.03901 3.36856 5.73897 3.12921 6.5" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.8989 3V5.77778H10.1211" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3.1012 10.2222L4.3568 11.4778C5.3618 12.4493 6.70342 12.9947 8.10122 13C9.42731 13 10.6991 12.4732 11.6368 11.5355C12.2163 10.956 12.6389 10.2487 12.8772 9.47994" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M5.87891 10.2222H3.10111V13" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3 5.77778L4.25556 4.52222C5.26054 3.55068 6.6022 3.00526 8 3C9.32608 3 10.5979 3.52678 11.5355 4.46447C12.2339 5.16285 12.7044 6.04656 12.899 7" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3 3V5.77778H5.77778" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M13.0001 10.2222L11.7445 11.4778C10.7395 12.4493 9.39788 12.9947 8.00008 13C6.67399 13 5.40222 12.4732 4.46454 11.5355C3.76616 10.8372 3.29571 9.95344 3.10107 9" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M10.2224 10.2222H13.0002V13" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 854 B

After

Width:  |  Height:  |  Size: 830 B

View File

@@ -1,3 +1 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M7 13.5V5.143C7 4.97247 6.93226 4.80892 6.81167 4.68833C6.69108 4.56774 6.52753 4.5 6.357 4.5H3.143C2.97247 4.5 2.80892 4.56774 2.68833 4.68833C2.56774 4.80892 2.5 4.97247 2.5 5.143V12.857C2.5 12.9414 2.51663 13.0251 2.54895 13.1031C2.58126 13.1811 2.62862 13.252 2.68833 13.3117C2.74804 13.3714 2.81892 13.4187 2.89693 13.4511C2.97495 13.4834 3.05856 13.5 3.143 13.5H10.857C10.9414 13.5 11.0251 13.4834 11.1031 13.4511C11.1811 13.4187 11.252 13.3714 11.3117 13.3117C11.3714 13.252 11.4187 13.1811 11.4511 13.1031C11.4834 13.0251 11.5 12.9414 11.5 12.857V9.643C11.5 9.47247 11.4323 9.30892 11.3117 9.18833C11.1911 9.06774 11.0275 9 10.857 9H2.5M12.929 2.5H10.07C9.91873 2.50026 9.77376 2.56054 9.66689 2.6676C9.56002 2.77465 9.5 2.91973 9.5 3.071V5.93C9.5 6.245 9.756 6.501 10.071 6.501H12.929C13.0041 6.501 13.0784 6.4862 13.1477 6.45744C13.2171 6.42868 13.2801 6.38653 13.3331 6.3334C13.3861 6.28028 13.4282 6.21721 13.4568 6.14782C13.4855 6.07843 13.5001 6.00407 13.5 5.929V3.07C13.4997 2.91873 13.4395 2.77376 13.3324 2.66689C13.2254 2.56002 13.0803 2.5 12.929 2.5Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" d="M12.429 2H9.57A.571.571 0 0 0 9 2.571V5.43c0 .315.256.571.571.571h2.858A.571.571 0 0 0 13 5.429V2.57A.571.571 0 0 0 12.429 2ZM6.5 13V4.643A.643.643 0 0 0 5.857 4H2.643A.643.643 0 0 0 2 4.643v7.714a.643.643 0 0 0 .643.643h7.714a.643.643 0 0 0 .643-.643V9.143a.643.643 0 0 0-.643-.643H2"/></svg>

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 458 B

View File

@@ -1,5 +1,3 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M11.8889 2H4.11111C3.49746 2 3 2.59695 3 3.33333V12.6667C3 13.403 3.49746 14 4.11111 14H11.8889C12.5025 14 13 13.403 13 12.6667V3.33333C13 2.59695 12.5025 2 11.8889 2Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M9 6H6" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M10 10H6" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M13.9999 11.0333C13.9999 11.3516 13.8735 11.6568 13.6484 11.8818C13.4234 12.1069 13.1182 12.2333 12.7999 12.2333H4.8966C4.57836 12.2334 4.27318 12.3599 4.04818 12.5849L2.72697 13.9061C2.66739 13.9657 2.59149 14.0063 2.50886 14.0227C2.42623 14.0391 2.34058 14.0307 2.26274 13.9985C2.18491 13.9662 2.11838 13.9116 2.07157 13.8416C2.02476 13.7715 1.99977 13.6892 1.99976 13.6049V3.8332C1.99976 3.51493 2.12619 3.2097 2.35123 2.98466C2.57628 2.75961 2.88151 2.63318 3.19977 2.63318H12.7999C13.1182 2.63318 13.4234 2.75961 13.6484 2.98466C13.8735 3.2097 13.9999 3.51493 13.9999 3.8332V11.0333Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 566 B

After

Width:  |  Height:  |  Size: 785 B

View File

@@ -1,3 +1 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M2 2L14 14M5.81044 2.41392C6.89676 1.98976 8.08314 1.89138 9.22449 2.13079C10.3658 2.37021 11.4127 2.93705 12.237 3.76199C13.0613 4.58693 13.6273 5.6342 13.8658 6.77573C14.1044 7.91727 14.0051 9.10357 13.5801 10.1896M12.2484 12.2484C11.1176 13.3558 9.59562 13.9724 8.01292 13.9642C6.43021 13.956 4.91467 13.3236 3.79552 12.2045C2.67636 11.0853 2.044 9.56979 2.03578 7.98708C2.02757 6.40438 2.64417 4.88236 3.75165 3.75165" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" d="M2.998 3 13 13.002M6.174 3.345a5.001 5.001 0 0 1 6.476 6.481M11.54 11.542A5.008 5.008 0 0 1 4.458 4.46"/></svg>

Before

Width:  |  Height:  |  Size: 618 B

After

Width:  |  Height:  |  Size: 276 B

View File

@@ -1,3 +1,3 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M11.8887 1.25C13.0386 1.25 13.7498 2.31634 13.75 3.33301V12.667C13.7499 13.6836 13.0386 14.75 11.8887 14.75H4.11133C2.96134 14.75 2.25014 13.6836 2.25 12.667V3.33301C2.25015 2.31635 2.96136 1.25 4.11133 1.25H11.8887ZM6 9.25C5.58579 9.25 5.25 9.58579 5.25 10C5.25 10.4142 5.58579 10.75 6 10.75H10C10.4142 10.75 10.75 10.4142 10.75 10C10.75 9.58579 10.4142 9.25 10 9.25H6ZM6 5.25C5.58579 5.25 5.25 5.58579 5.25 6C5.25 6.41421 5.58579 6.75 6 6.75H9C9.41421 6.75 9.75 6.41421 9.75 6C9.75 5.58579 9.41421 5.25 9 5.25H6Z" fill="black"/>
<path d="M13.9999 11.0333C13.9999 11.3516 13.8735 11.6568 13.6484 11.8818C13.4234 12.1069 13.1182 12.2333 12.7999 12.2333H4.8966C4.57836 12.2334 4.27318 12.3599 4.04818 12.5849L2.72697 13.9061C2.66739 13.9657 2.59149 14.0063 2.50886 14.0227C2.42623 14.0391 2.34058 14.0307 2.26274 13.9985C2.18491 13.9662 2.11838 13.9116 2.07157 13.8416C2.02476 13.7715 1.99977 13.6892 1.99976 13.6049V3.8332C1.99976 3.51493 2.12619 3.2097 2.35123 2.98466C2.57628 2.75961 2.88151 2.63318 3.19977 2.63318H12.7999C13.1182 2.63318 13.4234 2.75961 13.6484 2.98466C13.8735 3.2097 13.9999 3.51493 13.9999 3.8332V11.0333Z" fill="black" fill-opacity="0.6" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 643 B

After

Width:  |  Height:  |  Size: 817 B

View File

@@ -1,5 +1,5 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M14.0001 13.9999L12.7334 12.7333" stroke="black" stroke-width="1.33333" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M11.3333 13.3334C12.4378 13.3334 13.3333 12.4379 13.3333 11.3334C13.3333 10.2288 12.4378 9.33337 11.3333 9.33337C10.2287 9.33337 9.33325 10.2288 9.33325 11.3334C9.33325 12.4379 10.2287 13.3334 11.3333 13.3334Z" stroke="black" stroke-width="1.33333" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 13H3.2C2.88174 13 2.57651 12.8761 2.35148 12.6554C2.12643 12.4349 2 12.1356 2 11.8236V4.17647C2 3.86445 2.12643 3.56521 2.35148 3.34458C2.57651 3.12395 2.88174 3 3.2 3H5.558C5.75666 3.00004 5.95221 3.04842 6.1271 3.14082C6.30199 3.23322 6.45073 3.36675 6.56 3.52941L7.046 4.2353C7.15637 4.39964 7.30703 4.53421 7.48418 4.6267C7.66133 4.71919 7.8593 4.76664 8.06 4.76471H12.8C13.1183 4.76471 13.4235 4.88866 13.6486 5.10929C13.8735 5.32992 14 5.62916 14 5.94118V7" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 12.5H3.6C3.30826 12.5 3.02847 12.3884 2.82218 12.1899C2.61589 11.9913 2.5 11.722 2.5 11.4412V4.55882C2.5 4.27801 2.61589 4.00869 2.82218 3.81012C3.02847 3.61155 3.30826 3.5 3.6 3.5H5.7615C5.94361 3.50003 6.12286 3.54358 6.28317 3.62674C6.44349 3.7099 6.57984 3.83007 6.68 3.97647L7.1255 4.61176C7.22668 4.75967 7.36478 4.88078 7.52717 4.96402C7.68955 5.04727 7.87103 5.08997 8.055 5.08824H12.4C12.6917 5.08824 12.9715 5.19979 13.1778 5.39836C13.3841 5.59693 13.5 5.86624 13.5 6.14706V7" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -1,6 +1,6 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10 3H13V6" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 13H3V10" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M13 3L9.5 6.5" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3 13L6.5 9.5" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M13 3L9 7" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3 13L7 9" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 525 B

After

Width:  |  Height:  |  Size: 517 B

View File

@@ -1,6 +1,6 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3.5 9.5H6.5V12.5" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.5 6.5H9.5V3.5" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M9.5 6.5L13 3" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3 13L6.5 9.5" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M4 9H7V12" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12 7H9V4" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M9 7L13 3" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3 13L7 9" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 539 B

After

Width:  |  Height:  |  Size: 515 B

View File

@@ -1,3 +1 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6.03641 5.53641L8.33797 7.83797M13.0825 3.0934L6.03641 10.1395M9.99896 9.49896L13.0825 12.5825M4.77932 6.05864C5.25123 6.05864 5.7038 5.87118 6.03749 5.53749C6.37118 5.2038 6.55864 4.75123 6.55864 4.27932C6.55864 3.80742 6.37118 3.35484 6.03749 3.02115C5.7038 2.68746 5.25123 2.5 4.77932 2.5C4.30742 2.5 3.85484 2.68746 3.52115 3.02115C3.18746 3.35484 3 3.80742 3 4.27932C3 4.75123 3.18746 5.2038 3.52115 5.53749C3.85484 5.87118 4.30742 6.05864 4.77932 6.05864ZM4.77932 13.1759C5.25123 13.1759 5.7038 12.9885 6.03749 12.6548C6.37118 12.3211 6.55864 11.8685 6.55864 11.3966C6.55864 10.9247 6.37118 10.4721 6.03749 10.1384C5.7038 9.80475 5.25123 9.61729 4.77932 9.61729C4.30742 9.61729 3.85484 9.80475 3.52115 10.1384C3.18746 10.4721 3 10.9247 3 11.3966C3 11.8685 3.18746 12.3211 3.52115 12.6548C3.85484 12.9885 4.30742 13.1759 4.77932 13.1759Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" d="M4 6a2 2 0 1 0 0-4 2 2 0 0 0 0 4ZM5.413 5.413 8 8M13.333 2.667l-7.92 7.92M4 14a2 2 0 1 0 0-4 2 2 0 0 0 0 4ZM9.867 9.867l3.466 3.466"/></svg>

Before

Width:  |  Height:  |  Size: 1.0 KiB

After

Width:  |  Height:  |  Size: 305 B

View File

@@ -239,7 +239,6 @@
"ctrl-shift-a": "agent::ToggleContextPicker",
"ctrl-shift-j": "agent::ToggleNavigationMenu",
"ctrl-shift-i": "agent::ToggleOptionsMenu",
"ctrl-alt-shift-n": "agent::ToggleNewThreadMenu",
"shift-alt-escape": "agent::ExpandMessageEditor",
"ctrl->": "assistant::QuoteSelection",
"ctrl-alt-e": "agent::RemoveAllContext",
@@ -331,6 +330,8 @@
"use_key_equivalents": true,
"bindings": {
"enter": "agent::Chat",
"up": "agent::PreviousHistoryMessage",
"down": "agent::NextHistoryMessage",
"shift-ctrl-r": "agent::OpenAgentDiff",
"ctrl-shift-y": "agent::KeepAll",
"ctrl-shift-n": "agent::RejectAll"

View File

@@ -279,7 +279,6 @@
"cmd-shift-a": "agent::ToggleContextPicker",
"cmd-shift-j": "agent::ToggleNavigationMenu",
"cmd-shift-i": "agent::ToggleOptionsMenu",
"cmd-alt-shift-n": "agent::ToggleNewThreadMenu",
"shift-alt-escape": "agent::ExpandMessageEditor",
"cmd->": "assistant::QuoteSelection",
"cmd-alt-e": "agent::RemoveAllContext",
@@ -383,6 +382,8 @@
"use_key_equivalents": true,
"bindings": {
"enter": "agent::Chat",
"up": "agent::PreviousHistoryMessage",
"down": "agent::NextHistoryMessage",
"shift-ctrl-r": "agent::OpenAgentDiff",
"cmd-shift-y": "agent::KeepAll",
"cmd-shift-n": "agent::RejectAll"

View File

@@ -333,14 +333,10 @@
"ctrl-x ctrl-c": "editor::ShowEditPrediction", // zed specific
"ctrl-x ctrl-l": "editor::ToggleCodeActions", // zed specific
"ctrl-x ctrl-z": "editor::Cancel",
"ctrl-x ctrl-e": "vim::LineDown",
"ctrl-x ctrl-y": "vim::LineUp",
"ctrl-w": "editor::DeleteToPreviousWordStart",
"ctrl-u": "editor::DeleteToBeginningOfLine",
"ctrl-t": "vim::Indent",
"ctrl-d": "vim::Outdent",
"ctrl-y": "vim::InsertFromAbove",
"ctrl-e": "vim::InsertFromBelow",
"ctrl-k": ["vim::PushDigraph", {}],
"ctrl-v": ["vim::PushLiteral", {}],
"ctrl-shift-v": "editor::Paste", // note: this is *very* similar to ctrl-v in vim, but ctrl-shift-v on linux is the typical shortcut for paste when ctrl-v is already in use.

View File

@@ -28,9 +28,7 @@
"edit_prediction_provider": "zed"
},
// The name of a font to use for rendering text in the editor
// ".ZedMono" currently aliases to Lilex
// but this may change in the future.
"buffer_font_family": ".ZedMono",
"buffer_font_family": "Zed Plex Mono",
// Set the buffer text's font fallbacks, this will be merged with
// the platform's default fallbacks.
"buffer_font_fallbacks": null,
@@ -56,9 +54,7 @@
"buffer_line_height": "comfortable",
// The name of a font to use for rendering text in the UI
// You can set this to ".SystemUIFont" to use the system font
// ".ZedSans" currently aliases to "IBM Plex Sans", but this may
// change in the future
"ui_font_family": ".ZedSans",
"ui_font_family": "Zed Plex Sans",
// Set the UI's font fallbacks, this will be merged with the platform's
// default font fallbacks.
"ui_font_fallbacks": null,
@@ -86,10 +82,10 @@
// Layout mode of the bottom dock. Defaults to "contained"
// choices: contained, full, left_aligned, right_aligned
"bottom_dock_layout": "contained",
// The direction that you want to split panes horizontally. Defaults to "down"
"pane_split_direction_horizontal": "down",
// The direction that you want to split panes vertically. Defaults to "right"
"pane_split_direction_vertical": "right",
// The direction that you want to split panes horizontally. Defaults to "up"
"pane_split_direction_horizontal": "up",
// The direction that you want to split panes vertically. Defaults to "left"
"pane_split_direction_vertical": "left",
// Centered layout related settings.
"centered_layout": {
// The relative width of the left padding of the central pane from the
@@ -1214,18 +1210,7 @@
// Any addition to this list will be merged with the default list.
// Globs are matched relative to the worktree root,
// except when starting with a slash (/) or equivalent in Windows.
"disabled_globs": [
"**/.env*",
"**/*.pem",
"**/*.key",
"**/*.cert",
"**/*.crt",
"**/.dev.vars",
"**/secrets.yml",
"**/.zed/settings.json", // zed project settings
"/**/zed/settings.json", // zed user settings
"/**/zed/keymap.json"
],
"disabled_globs": ["**/.env*", "**/*.pem", "**/*.key", "**/*.cert", "**/*.crt", "**/.dev.vars", "**/secrets.yml"],
// When to show edit predictions previews in buffer.
// This setting takes two possible values:
// 1. Display predictions inline when there are no language server completions available.
@@ -1406,7 +1391,7 @@
// "font_size": 15,
// Set the terminal's font family. If this option is not included,
// the terminal will default to matching the buffer's font family.
// "font_family": ".ZedMono",
// "font_family": "Zed Plex Mono",
// Set the terminal's font fallbacks. If this option is not included,
// the terminal will default to matching the buffer's font fallbacks.
// This will be merged with the platform's default font fallbacks

View File

@@ -86,9 +86,9 @@
"terminal.ansi.blue": "#74ade8ff",
"terminal.ansi.bright_blue": "#385378ff",
"terminal.ansi.dim_blue": "#bed5f4ff",
"terminal.ansi.magenta": "#b477cfff",
"terminal.ansi.bright_magenta": "#d6b4e4ff",
"terminal.ansi.dim_magenta": "#612a79ff",
"terminal.ansi.magenta": "#be5046ff",
"terminal.ansi.bright_magenta": "#5e2b26ff",
"terminal.ansi.dim_magenta": "#e6a79eff",
"terminal.ansi.cyan": "#6eb4bfff",
"terminal.ansi.bright_cyan": "#3a565bff",
"terminal.ansi.dim_cyan": "#b9d9dfff",

View File

@@ -16,31 +16,24 @@ doctest = false
test-support = ["gpui/test-support", "project/test-support"]
[dependencies]
action_log.workspace = true
agent-client-protocol.workspace = true
agent.workspace = true
anyhow.workspace = true
assistant_tool.workspace = true
buffer_diff.workspace = true
collections.workspace = true
editor.workspace = true
file_icons.workspace = true
futures.workspace = true
gpui.workspace = true
itertools.workspace = true
language.workspace = true
language_model.workspace = true
markdown.workspace = true
project.workspace = true
prompt_store.workspace = true
serde.workspace = true
serde_json.workspace = true
settings.workspace = true
smol.workspace = true
terminal.workspace = true
ui.workspace = true
url.workspace = true
util.workspace = true
uuid.workspace = true
watch.workspace = true
workspace-hack.workspace = true
[dev-dependencies]

File diff suppressed because it is too large Load Diff

View File

@@ -1,78 +1,18 @@
use crate::AcpThread;
use std::{error::Error, fmt, path::Path, rc::Rc, sync::Arc};
use agent_client_protocol::{self as acp};
use anyhow::Result;
use collections::IndexMap;
use gpui::{AsyncApp, Entity, SharedString, Task};
use gpui::{AsyncApp, Entity, Task};
use language_model::LanguageModel;
use project::Project;
use std::{error::Error, fmt, path::Path, rc::Rc, sync::Arc};
use ui::{App, IconName};
use uuid::Uuid;
use ui::App;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct UserMessageId(Arc<str>);
impl UserMessageId {
pub fn new() -> Self {
Self(Uuid::new_v4().to_string().into())
}
}
pub trait AgentConnection {
fn new_thread(
self: Rc<Self>,
project: Entity<Project>,
cwd: &Path,
cx: &mut AsyncApp,
) -> Task<Result<Entity<AcpThread>>>;
fn auth_methods(&self) -> &[acp::AuthMethod];
fn authenticate(&self, method: acp::AuthMethodId, cx: &mut App) -> Task<Result<()>>;
fn prompt(
&self,
user_message_id: Option<UserMessageId>,
params: acp::PromptRequest,
cx: &mut App,
) -> Task<Result<acp::PromptResponse>>;
fn cancel(&self, session_id: &acp::SessionId, cx: &mut App);
fn session_editor(
&self,
_session_id: &acp::SessionId,
_cx: &mut App,
) -> Option<Rc<dyn AgentSessionEditor>> {
None
}
/// Returns this agent as an [Rc<dyn ModelSelector>] if the model selection capability is supported.
///
/// If the agent does not support model selection, returns [None].
/// This allows sharing the selector in UI components.
fn model_selector(&self) -> Option<Rc<dyn AgentModelSelector>> {
None
}
}
pub trait AgentSessionEditor {
fn truncate(&self, message_id: UserMessageId, cx: &mut App) -> Task<Result<()>>;
}
#[derive(Debug)]
pub struct AuthRequired;
impl Error for AuthRequired {}
impl fmt::Display for AuthRequired {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "AuthRequired")
}
}
use crate::AcpThread;
/// Trait for agents that support listing, selecting, and querying language models.
///
/// This is an optional capability; agents indicate support via [AgentConnection::model_selector].
pub trait AgentModelSelector: 'static {
pub trait ModelSelector: 'static {
/// Lists all available language models for this agent.
///
/// # Parameters
@@ -80,7 +20,7 @@ pub trait AgentModelSelector: 'static {
///
/// # Returns
/// A task resolving to the list of models or an error (e.g., if no models are configured).
fn list_models(&self, cx: &mut App) -> Task<Result<AgentModelList>>;
fn list_models(&self, cx: &mut AsyncApp) -> Task<Result<Vec<Arc<dyn LanguageModel>>>>;
/// Selects a model for a specific session (thread).
///
@@ -97,8 +37,8 @@ pub trait AgentModelSelector: 'static {
fn select_model(
&self,
session_id: acp::SessionId,
model_id: AgentModelId,
cx: &mut App,
model: Arc<dyn LanguageModel>,
cx: &mut AsyncApp,
) -> Task<Result<()>>;
/// Retrieves the currently selected model for a specific session (thread).
@@ -112,51 +52,42 @@ pub trait AgentModelSelector: 'static {
fn selected_model(
&self,
session_id: &acp::SessionId,
cx: &mut App,
) -> Task<Result<AgentModelInfo>>;
/// Whenever the model list is updated the receiver will be notified.
fn watch(&self, cx: &mut App) -> watch::Receiver<()>;
cx: &mut AsyncApp,
) -> Task<Result<Arc<dyn LanguageModel>>>;
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct AgentModelId(pub SharedString);
pub trait AgentConnection {
fn new_thread(
self: Rc<Self>,
project: Entity<Project>,
cwd: &Path,
cx: &mut AsyncApp,
) -> Task<Result<Entity<AcpThread>>>;
impl std::ops::Deref for AgentModelId {
type Target = SharedString;
fn auth_methods(&self) -> &[acp::AuthMethod];
fn deref(&self) -> &Self::Target {
&self.0
fn authenticate(&self, method: acp::AuthMethodId, cx: &mut App) -> Task<Result<()>>;
fn prompt(&self, params: acp::PromptRequest, cx: &mut App)
-> Task<Result<acp::PromptResponse>>;
fn cancel(&self, session_id: &acp::SessionId, cx: &mut App);
/// Returns this agent as an [Rc<dyn ModelSelector>] if the model selection capability is supported.
///
/// If the agent does not support model selection, returns [None].
/// This allows sharing the selector in UI components.
fn model_selector(&self) -> Option<Rc<dyn ModelSelector>> {
None // Default impl for agents that don't support it
}
}
impl fmt::Display for AgentModelId {
#[derive(Debug)]
pub struct AuthRequired;
impl Error for AuthRequired {}
impl fmt::Display for AuthRequired {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct AgentModelInfo {
pub id: AgentModelId,
pub name: SharedString,
pub icon: Option<IconName>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct AgentModelGroupName(pub SharedString);
#[derive(Debug, Clone)]
pub enum AgentModelList {
Flat(Vec<AgentModelInfo>),
Grouped(IndexMap<AgentModelGroupName, Vec<AgentModelInfo>>),
}
impl AgentModelList {
pub fn is_empty(&self) -> bool {
match self {
AgentModelList::Flat(models) => models.is_empty(),
AgentModelList::Grouped(groups) => groups.is_empty(),
}
write!(f, "AuthRequired")
}
}

View File

@@ -174,10 +174,6 @@ impl Diff {
buffer_text
)
}
pub fn has_revealed_range(&self, cx: &App) -> bool {
self.multibuffer().read(cx).excerpt_paths().next().is_some()
}
}
pub struct PendingDiff {

View File

@@ -1,451 +0,0 @@
use agent::ThreadId;
use anyhow::{Context as _, Result, bail};
use file_icons::FileIcons;
use prompt_store::{PromptId, UserPromptId};
use std::{
fmt,
ops::Range,
path::{Path, PathBuf},
};
use ui::{App, IconName, SharedString};
use url::Url;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum MentionUri {
File {
abs_path: PathBuf,
is_directory: bool,
},
Symbol {
path: PathBuf,
name: String,
line_range: Range<u32>,
},
Thread {
id: ThreadId,
name: String,
},
TextThread {
path: PathBuf,
name: String,
},
Rule {
id: PromptId,
name: String,
},
Selection {
path: PathBuf,
line_range: Range<u32>,
},
Fetch {
url: Url,
},
}
impl MentionUri {
pub fn parse(input: &str) -> Result<Self> {
let url = url::Url::parse(input)?;
let path = url.path();
match url.scheme() {
"file" => {
if let Some(fragment) = url.fragment() {
let range = fragment
.strip_prefix("L")
.context("Line range must start with \"L\"")?;
let (start, end) = range
.split_once(":")
.context("Line range must use colon as separator")?;
let line_range = start
.parse::<u32>()
.context("Parsing line range start")?
.checked_sub(1)
.context("Line numbers should be 1-based")?
..end
.parse::<u32>()
.context("Parsing line range end")?
.checked_sub(1)
.context("Line numbers should be 1-based")?;
if let Some(name) = single_query_param(&url, "symbol")? {
Ok(Self::Symbol {
name,
path: path.into(),
line_range,
})
} else {
Ok(Self::Selection {
path: path.into(),
line_range,
})
}
} else {
let file_path =
PathBuf::from(format!("{}{}", url.host_str().unwrap_or(""), path));
let is_directory = input.ends_with("/");
Ok(Self::File {
abs_path: file_path,
is_directory,
})
}
}
"zed" => {
if let Some(thread_id) = path.strip_prefix("/agent/thread/") {
let name = single_query_param(&url, "name")?.context("Missing thread name")?;
Ok(Self::Thread {
id: thread_id.into(),
name,
})
} else if let Some(path) = path.strip_prefix("/agent/text-thread/") {
let name = single_query_param(&url, "name")?.context("Missing thread name")?;
Ok(Self::TextThread {
path: path.into(),
name,
})
} else if let Some(rule_id) = path.strip_prefix("/agent/rule/") {
let name = single_query_param(&url, "name")?.context("Missing rule name")?;
let rule_id = UserPromptId(rule_id.parse()?);
Ok(Self::Rule {
id: rule_id.into(),
name,
})
} else {
bail!("invalid zed url: {:?}", input);
}
}
"http" | "https" => Ok(MentionUri::Fetch { url }),
other => bail!("unrecognized scheme {:?}", other),
}
}
pub fn name(&self) -> String {
match self {
MentionUri::File { abs_path, .. } => abs_path
.file_name()
.unwrap_or_default()
.to_string_lossy()
.into_owned(),
MentionUri::Symbol { name, .. } => name.clone(),
MentionUri::Thread { name, .. } => name.clone(),
MentionUri::TextThread { name, .. } => name.clone(),
MentionUri::Rule { name, .. } => name.clone(),
MentionUri::Selection {
path, line_range, ..
} => selection_name(path, line_range),
MentionUri::Fetch { url } => url.to_string(),
}
}
pub fn icon_path(&self, cx: &mut App) -> SharedString {
match self {
MentionUri::File {
abs_path,
is_directory,
} => {
if *is_directory {
FileIcons::get_folder_icon(false, cx)
.unwrap_or_else(|| IconName::Folder.path().into())
} else {
FileIcons::get_icon(&abs_path, cx)
.unwrap_or_else(|| IconName::File.path().into())
}
}
MentionUri::Symbol { .. } => IconName::Code.path().into(),
MentionUri::Thread { .. } => IconName::Thread.path().into(),
MentionUri::TextThread { .. } => IconName::Thread.path().into(),
MentionUri::Rule { .. } => IconName::Reader.path().into(),
MentionUri::Selection { .. } => IconName::Reader.path().into(),
MentionUri::Fetch { .. } => IconName::ToolWeb.path().into(),
}
}
pub fn as_link<'a>(&'a self) -> MentionLink<'a> {
MentionLink(self)
}
pub fn to_uri(&self) -> Url {
match self {
MentionUri::File {
abs_path,
is_directory,
} => {
let mut url = Url::parse("file:///").unwrap();
let mut path = abs_path.to_string_lossy().to_string();
if *is_directory && !path.ends_with("/") {
path.push_str("/");
}
url.set_path(&path);
url
}
MentionUri::Symbol {
path,
name,
line_range,
} => {
let mut url = Url::parse("file:///").unwrap();
url.set_path(&path.to_string_lossy());
url.query_pairs_mut().append_pair("symbol", name);
url.set_fragment(Some(&format!(
"L{}:{}",
line_range.start + 1,
line_range.end + 1
)));
url
}
MentionUri::Selection { path, line_range } => {
let mut url = Url::parse("file:///").unwrap();
url.set_path(&path.to_string_lossy());
url.set_fragment(Some(&format!(
"L{}:{}",
line_range.start + 1,
line_range.end + 1
)));
url
}
MentionUri::Thread { name, id } => {
let mut url = Url::parse("zed:///").unwrap();
url.set_path(&format!("/agent/thread/{id}"));
url.query_pairs_mut().append_pair("name", name);
url
}
MentionUri::TextThread { path, name } => {
let mut url = Url::parse("zed:///").unwrap();
url.set_path(&format!("/agent/text-thread/{}", path.to_string_lossy()));
url.query_pairs_mut().append_pair("name", name);
url
}
MentionUri::Rule { name, id } => {
let mut url = Url::parse("zed:///").unwrap();
url.set_path(&format!("/agent/rule/{id}"));
url.query_pairs_mut().append_pair("name", name);
url
}
MentionUri::Fetch { url } => url.clone(),
}
}
}
pub struct MentionLink<'a>(&'a MentionUri);
impl fmt::Display for MentionLink<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[@{}]({})", self.0.name(), self.0.to_uri())
}
}
fn single_query_param(url: &Url, name: &'static str) -> Result<Option<String>> {
let pairs = url.query_pairs().collect::<Vec<_>>();
match pairs.as_slice() {
[] => Ok(None),
[(k, v)] => {
if k != name {
bail!("invalid query parameter")
}
Ok(Some(v.to_string()))
}
_ => bail!("too many query pairs"),
}
}
pub fn selection_name(path: &Path, line_range: &Range<u32>) -> String {
format!(
"{} ({}:{})",
path.file_name().unwrap_or_default().display(),
line_range.start + 1,
line_range.end + 1
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_file_uri() {
let file_uri = "file:///path/to/file.rs";
let parsed = MentionUri::parse(file_uri).unwrap();
match &parsed {
MentionUri::File {
abs_path,
is_directory,
} => {
assert_eq!(abs_path.to_str().unwrap(), "/path/to/file.rs");
assert!(!is_directory);
}
_ => panic!("Expected File variant"),
}
assert_eq!(parsed.to_uri().to_string(), file_uri);
}
#[test]
fn test_parse_directory_uri() {
let file_uri = "file:///path/to/dir/";
let parsed = MentionUri::parse(file_uri).unwrap();
match &parsed {
MentionUri::File {
abs_path,
is_directory,
} => {
assert_eq!(abs_path.to_str().unwrap(), "/path/to/dir");
assert!(is_directory);
}
_ => panic!("Expected File variant"),
}
assert_eq!(parsed.to_uri().to_string(), file_uri);
}
#[test]
fn test_to_directory_uri_with_slash() {
let uri = MentionUri::File {
abs_path: PathBuf::from("/path/to/dir/"),
is_directory: true,
};
assert_eq!(uri.to_uri().to_string(), "file:///path/to/dir/");
}
#[test]
fn test_to_directory_uri_without_slash() {
let uri = MentionUri::File {
abs_path: PathBuf::from("/path/to/dir"),
is_directory: true,
};
assert_eq!(uri.to_uri().to_string(), "file:///path/to/dir/");
}
#[test]
fn test_parse_symbol_uri() {
let symbol_uri = "file:///path/to/file.rs?symbol=MySymbol#L10:20";
let parsed = MentionUri::parse(symbol_uri).unwrap();
match &parsed {
MentionUri::Symbol {
path,
name,
line_range,
} => {
assert_eq!(path.to_str().unwrap(), "/path/to/file.rs");
assert_eq!(name, "MySymbol");
assert_eq!(line_range.start, 9);
assert_eq!(line_range.end, 19);
}
_ => panic!("Expected Symbol variant"),
}
assert_eq!(parsed.to_uri().to_string(), symbol_uri);
}
#[test]
fn test_parse_selection_uri() {
let selection_uri = "file:///path/to/file.rs#L5:15";
let parsed = MentionUri::parse(selection_uri).unwrap();
match &parsed {
MentionUri::Selection { path, line_range } => {
assert_eq!(path.to_str().unwrap(), "/path/to/file.rs");
assert_eq!(line_range.start, 4);
assert_eq!(line_range.end, 14);
}
_ => panic!("Expected Selection variant"),
}
assert_eq!(parsed.to_uri().to_string(), selection_uri);
}
#[test]
fn test_parse_thread_uri() {
let thread_uri = "zed:///agent/thread/session123?name=Thread+name";
let parsed = MentionUri::parse(thread_uri).unwrap();
match &parsed {
MentionUri::Thread {
id: thread_id,
name,
} => {
assert_eq!(thread_id.to_string(), "session123");
assert_eq!(name, "Thread name");
}
_ => panic!("Expected Thread variant"),
}
assert_eq!(parsed.to_uri().to_string(), thread_uri);
}
#[test]
fn test_parse_rule_uri() {
let rule_uri = "zed:///agent/rule/d8694ff2-90d5-4b6f-be33-33c1763acd52?name=Some+rule";
let parsed = MentionUri::parse(rule_uri).unwrap();
match &parsed {
MentionUri::Rule { id, name } => {
assert_eq!(id.to_string(), "d8694ff2-90d5-4b6f-be33-33c1763acd52");
assert_eq!(name, "Some rule");
}
_ => panic!("Expected Rule variant"),
}
assert_eq!(parsed.to_uri().to_string(), rule_uri);
}
#[test]
fn test_parse_fetch_http_uri() {
let http_uri = "http://example.com/path?query=value#fragment";
let parsed = MentionUri::parse(http_uri).unwrap();
match &parsed {
MentionUri::Fetch { url } => {
assert_eq!(url.to_string(), http_uri);
}
_ => panic!("Expected Fetch variant"),
}
assert_eq!(parsed.to_uri().to_string(), http_uri);
}
#[test]
fn test_parse_fetch_https_uri() {
let https_uri = "https://example.com/api/endpoint";
let parsed = MentionUri::parse(https_uri).unwrap();
match &parsed {
MentionUri::Fetch { url } => {
assert_eq!(url.to_string(), https_uri);
}
_ => panic!("Expected Fetch variant"),
}
assert_eq!(parsed.to_uri().to_string(), https_uri);
}
#[test]
fn test_invalid_scheme() {
assert!(MentionUri::parse("ftp://example.com").is_err());
assert!(MentionUri::parse("ssh://example.com").is_err());
assert!(MentionUri::parse("unknown://example.com").is_err());
}
#[test]
fn test_invalid_zed_path() {
assert!(MentionUri::parse("zed:///invalid/path").is_err());
assert!(MentionUri::parse("zed:///agent/unknown/test").is_err());
}
#[test]
fn test_invalid_line_range_format() {
// Missing L prefix
assert!(MentionUri::parse("file:///path/to/file.rs#10:20").is_err());
// Missing colon separator
assert!(MentionUri::parse("file:///path/to/file.rs#L1020").is_err());
// Invalid numbers
assert!(MentionUri::parse("file:///path/to/file.rs#L10:abc").is_err());
assert!(MentionUri::parse("file:///path/to/file.rs#Labc:20").is_err());
}
#[test]
fn test_invalid_query_parameters() {
// Invalid query parameter name
assert!(MentionUri::parse("file:///path/to/file.rs#L10:20?invalid=test").is_err());
// Too many query parameters
assert!(
MentionUri::parse("file:///path/to/file.rs#L10:20?symbol=test&another=param").is_err()
);
}
#[test]
fn test_zero_based_line_numbers() {
// Test that 0-based line numbers are rejected (should be 1-based)
assert!(MentionUri::parse("file:///path/to/file.rs#L0:10").is_err());
assert!(MentionUri::parse("file:///path/to/file.rs#L1:0").is_err());
assert!(MentionUri::parse("file:///path/to/file.rs#L0:0").is_err());
}
}

View File

@@ -1,93 +0,0 @@
use gpui::{App, AppContext, Context, Entity};
use language::LanguageRegistry;
use markdown::Markdown;
use std::{path::PathBuf, process::ExitStatus, sync::Arc, time::Instant};
pub struct Terminal {
command: Entity<Markdown>,
working_dir: Option<PathBuf>,
terminal: Entity<terminal::Terminal>,
started_at: Instant,
output: Option<TerminalOutput>,
}
pub struct TerminalOutput {
pub ended_at: Instant,
pub exit_status: Option<ExitStatus>,
pub was_content_truncated: bool,
pub original_content_len: usize,
pub content_line_count: usize,
pub finished_with_empty_output: bool,
}
impl Terminal {
pub fn new(
command: String,
working_dir: Option<PathBuf>,
terminal: Entity<terminal::Terminal>,
language_registry: Arc<LanguageRegistry>,
cx: &mut Context<Self>,
) -> Self {
Self {
command: cx.new(|cx| {
Markdown::new(
format!("```\n{}\n```", command).into(),
Some(language_registry.clone()),
None,
cx,
)
}),
working_dir,
terminal,
started_at: Instant::now(),
output: None,
}
}
pub fn finish(
&mut self,
exit_status: Option<ExitStatus>,
original_content_len: usize,
truncated_content_len: usize,
content_line_count: usize,
finished_with_empty_output: bool,
cx: &mut Context<Self>,
) {
self.output = Some(TerminalOutput {
ended_at: Instant::now(),
exit_status,
was_content_truncated: truncated_content_len < original_content_len,
original_content_len,
content_line_count,
finished_with_empty_output,
});
cx.notify();
}
pub fn command(&self) -> &Entity<Markdown> {
&self.command
}
pub fn working_dir(&self) -> &Option<PathBuf> {
&self.working_dir
}
pub fn started_at(&self) -> Instant {
self.started_at
}
pub fn output(&self) -> Option<&TerminalOutput> {
self.output.as_ref()
}
pub fn inner(&self) -> &Entity<terminal::Terminal> {
&self.terminal
}
pub fn to_markdown(&self, cx: &App) -> String {
format!(
"Terminal:\n```\n{}\n```\n",
self.terminal.read(cx).get_content()
)
}
}

View File

@@ -1,45 +0,0 @@
[package]
name = "action_log"
version = "0.1.0"
edition.workspace = true
publish.workspace = true
license = "GPL-3.0-or-later"
[lib]
path = "src/action_log.rs"
[lints]
workspace = true
[dependencies]
anyhow.workspace = true
buffer_diff.workspace = true
clock.workspace = true
collections.workspace = true
futures.workspace = true
gpui.workspace = true
language.workspace = true
project.workspace = true
text.workspace = true
util.workspace = true
watch.workspace = true
workspace-hack.workspace = true
[dev-dependencies]
buffer_diff = { workspace = true, features = ["test-support"] }
collections = { workspace = true, features = ["test-support"] }
clock = { workspace = true, features = ["test-support"] }
ctor.workspace = true
gpui = { workspace = true, features = ["test-support"] }
indoc.workspace = true
language = { workspace = true, features = ["test-support"] }
log.workspace = true
pretty_assertions.workspace = true
project = { workspace = true, features = ["test-support"] }
rand.workspace = true
serde_json.workspace = true
settings = { workspace = true, features = ["test-support"] }
text = { workspace = true, features = ["test-support"] }
util = { workspace = true, features = ["test-support"] }
zlog.workspace = true

View File

@@ -716,10 +716,18 @@ impl ActivityIndicator {
})),
tooltip_message: Some(Self::version_tooltip_message(&version)),
}),
AutoUpdateStatus::Updated { version } => Some(Content {
AutoUpdateStatus::Updated {
binary_path,
version,
} => Some(Content {
icon: None,
message: "Click to restart and update Zed".to_string(),
on_click: Some(Arc::new(move |_, _, cx| workspace::reload(cx))),
on_click: Some(Arc::new({
let reload = workspace::Reload {
binary_path: Some(binary_path.clone()),
};
move |_, _, cx| workspace::reload(&reload, cx)
})),
tooltip_message: Some(Self::version_tooltip_message(&version)),
}),
AutoUpdateStatus::Errored => Some(Content {

View File

@@ -19,7 +19,6 @@ test-support = [
]
[dependencies]
action_log.workspace = true
agent_settings.workspace = true
anyhow.workspace = true
assistant_context.workspace = true

View File

@@ -326,7 +326,7 @@ mod tests {
_input: serde_json::Value,
_request: Arc<language_model::LanguageModelRequest>,
_project: Entity<Project>,
_action_log: Entity<action_log::ActionLog>,
_action_log: Entity<assistant_tool::ActionLog>,
_model: Arc<dyn language_model::LanguageModel>,
_window: Option<gpui::AnyWindowHandle>,
_cx: &mut App,

View File

@@ -1,8 +1,7 @@
use std::sync::Arc;
use action_log::ActionLog;
use anyhow::{Result, anyhow, bail};
use assistant_tool::{Tool, ToolResult, ToolSource};
use assistant_tool::{ActionLog, Tool, ToolResult, ToolSource};
use context_server::{ContextServerId, types};
use gpui::{AnyWindowHandle, App, Entity, Task};
use icons::IconName;

View File

@@ -8,10 +8,9 @@ use crate::{
},
tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
};
use action_log::ActionLog;
use agent_settings::{AgentProfileId, AgentSettings, CompletionMode, SUMMARIZE_THREAD_PROMPT};
use anyhow::{Result, anyhow};
use assistant_tool::{AnyToolCard, Tool, ToolWorkingSet};
use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
use chrono::{DateTime, Utc};
use client::{ModelRequestUsage, RequestUsage};
use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, Plan, UsageLimit};
@@ -2268,15 +2267,6 @@ impl Thread {
max_attempts: 3,
})
}
Other(err)
if err.is::<PaymentRequiredError>()
|| err.is::<ModelRequestLimitReachedError>() =>
{
// Retrying won't help for Payment Required or Model Request Limit errors (where
// the user must upgrade to usage-based billing to get more requests, or else wait
// for a significant amount of time for the request limit to reset).
None
}
// Conservatively assume that any other errors are non-retryable
HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
delay: BASE_RETRY_DELAY,

View File

@@ -1,9 +1,9 @@
[package]
name = "agent2"
version = "0.1.0"
edition.workspace = true
publish.workspace = true
edition = "2021"
license = "GPL-3.0-or-later"
publish = false
[lib]
path = "src/agent2.rs"
@@ -13,32 +13,25 @@ workspace = true
[dependencies]
acp_thread.workspace = true
action_log.workspace = true
agent-client-protocol.workspace = true
agent_servers.workspace = true
agent_settings.workspace = true
anyhow.workspace = true
assistant_tool.workspace = true
assistant_tools.workspace = true
chrono.workspace = true
cloud_llm_client.workspace = true
collections.workspace = true
context_server.workspace = true
fs.workspace = true
futures.workspace = true
gpui.workspace = true
handlebars = { workspace = true, features = ["rust-embed"] }
html_to_markdown.workspace = true
http_client.workspace = true
indoc.workspace = true
itertools.workspace = true
language.workspace = true
language_model.workspace = true
language_models.workspace = true
log.workspace = true
open.workspace = true
paths.workspace = true
portable-pty.workspace = true
project.workspace = true
prompt_store.workspace = true
rust-embed.workspace = true
@@ -47,23 +40,16 @@ serde.workspace = true
serde_json.workspace = true
settings.workspace = true
smol.workspace = true
task.workspace = true
terminal.workspace = true
text.workspace = true
ui.workspace = true
util.workspace = true
uuid.workspace = true
watch.workspace = true
web_search.workspace = true
which.workspace = true
workspace-hack.workspace = true
[dev-dependencies]
ctor.workspace = true
client = { workspace = true, "features" = ["test-support"] }
clock = { workspace = true, "features" = ["test-support"] }
context_server = { workspace = true, "features" = ["test-support"] }
editor = { workspace = true, "features" = ["test-support"] }
env_logger.workspace = true
fs = { workspace = true, "features" = ["test-support"] }
gpui = { workspace = true, "features" = ["test-support"] }
@@ -71,14 +57,8 @@ gpui_tokio.workspace = true
language = { workspace = true, "features" = ["test-support"] }
language_model = { workspace = true, "features" = ["test-support"] }
lsp = { workspace = true, "features" = ["test-support"] }
pretty_assertions.workspace = true
project = { workspace = true, "features" = ["test-support"] }
reqwest_client.workspace = true
settings = { workspace = true, "features" = ["test-support"] }
tempfile.workspace = true
terminal = { workspace = true, "features" = ["test-support"] }
theme = { workspace = true, "features" = ["test-support"] }
tree-sitter-rust.workspace = true
unindent = { workspace = true }
worktree = { workspace = true, "features" = ["test-support"] }
zlog.workspace = true
pretty_assertions.workspace = true

View File

@@ -1,26 +1,17 @@
use crate::{AgentResponseEvent, Thread, templates::Templates};
use crate::{
ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DiagnosticsTool, EditFileTool,
FetchTool, FindPathTool, GrepTool, ListDirectoryTool, MovePathTool, NowTool, OpenTool,
ReadFileTool, TerminalTool, ThinkingTool, ToolCallAuthorization, UserMessageContent,
WebSearchTool,
};
use acp_thread::AgentModelSelector;
use crate::{templates::Templates, AgentResponseEvent, Thread};
use crate::{EditFileTool, FindPathTool, ReadFileTool, ThinkingTool, ToolCallAuthorization};
use acp_thread::ModelSelector;
use agent_client_protocol as acp;
use agent_settings::AgentSettings;
use anyhow::{Context as _, Result, anyhow};
use collections::{HashSet, IndexMap};
use fs::Fs;
use futures::{StreamExt, future};
use anyhow::{anyhow, Context as _, Result};
use futures::{future, StreamExt};
use gpui::{
App, AppContext, AsyncApp, Context, Entity, SharedString, Subscription, Task, WeakEntity,
};
use language_model::{LanguageModel, LanguageModelProvider, LanguageModelRegistry};
use language_model::{LanguageModel, LanguageModelRegistry};
use project::{Project, ProjectItem, ProjectPath, Worktree};
use prompt_store::{
ProjectContext, PromptId, PromptStore, RulesFileContext, UserRulesContext, WorktreeContext,
};
use settings::update_settings_file;
use std::cell::RefCell;
use std::collections::HashMap;
use std::path::Path;
@@ -53,104 +44,6 @@ struct Session {
_subscription: Subscription,
}
pub struct LanguageModels {
/// Access language model by ID
models: HashMap<acp_thread::AgentModelId, Arc<dyn LanguageModel>>,
/// Cached list for returning language model information
model_list: acp_thread::AgentModelList,
refresh_models_rx: watch::Receiver<()>,
refresh_models_tx: watch::Sender<()>,
}
impl LanguageModels {
fn new(cx: &App) -> Self {
let (refresh_models_tx, refresh_models_rx) = watch::channel(());
let mut this = Self {
models: HashMap::default(),
model_list: acp_thread::AgentModelList::Grouped(IndexMap::default()),
refresh_models_rx,
refresh_models_tx,
};
this.refresh_list(cx);
this
}
fn refresh_list(&mut self, cx: &App) {
let providers = LanguageModelRegistry::global(cx)
.read(cx)
.providers()
.into_iter()
.filter(|provider| provider.is_authenticated(cx))
.collect::<Vec<_>>();
let mut language_model_list = IndexMap::default();
let mut recommended_models = HashSet::default();
let mut recommended = Vec::new();
for provider in &providers {
for model in provider.recommended_models(cx) {
recommended_models.insert(model.id());
recommended.push(Self::map_language_model_to_info(&model, &provider));
}
}
if !recommended.is_empty() {
language_model_list.insert(
acp_thread::AgentModelGroupName("Recommended".into()),
recommended,
);
}
let mut models = HashMap::default();
for provider in providers {
let mut provider_models = Vec::new();
for model in provider.provided_models(cx) {
let model_info = Self::map_language_model_to_info(&model, &provider);
let model_id = model_info.id.clone();
if !recommended_models.contains(&model.id()) {
provider_models.push(model_info);
}
models.insert(model_id, model);
}
if !provider_models.is_empty() {
language_model_list.insert(
acp_thread::AgentModelGroupName(provider.name().0.clone()),
provider_models,
);
}
}
self.models = models;
self.model_list = acp_thread::AgentModelList::Grouped(language_model_list);
self.refresh_models_tx.send(()).ok();
}
fn watch(&self) -> watch::Receiver<()> {
self.refresh_models_rx.clone()
}
pub fn model_from_id(
&self,
model_id: &acp_thread::AgentModelId,
) -> Option<Arc<dyn LanguageModel>> {
self.models.get(model_id).cloned()
}
fn map_language_model_to_info(
model: &Arc<dyn LanguageModel>,
provider: &Arc<dyn LanguageModelProvider>,
) -> acp_thread::AgentModelInfo {
acp_thread::AgentModelInfo {
id: Self::model_id(model),
name: model.name().0,
icon: Some(provider.icon()),
}
}
fn model_id(model: &Arc<dyn LanguageModel>) -> acp_thread::AgentModelId {
acp_thread::AgentModelId(format!("{}/{}", model.provider_id().0, model.id().0).into())
}
}
pub struct NativeAgent {
/// Session ID -> Session mapping
sessions: HashMap<acp::SessionId, Session>,
@@ -158,14 +51,10 @@ pub struct NativeAgent {
project_context: Rc<RefCell<ProjectContext>>,
project_context_needs_refresh: watch::Sender<()>,
_maintain_project_context: Task<Result<()>>,
context_server_registry: Entity<ContextServerRegistry>,
/// Shared templates for all threads
templates: Arc<Templates>,
/// Cached model information
models: LanguageModels,
project: Entity<Project>,
prompt_store: Option<Entity<PromptStore>>,
fs: Arc<dyn Fs>,
_subscriptions: Vec<Subscription>,
}
@@ -174,7 +63,6 @@ impl NativeAgent {
project: Entity<Project>,
templates: Arc<Templates>,
prompt_store: Option<Entity<PromptStore>>,
fs: Arc<dyn Fs>,
cx: &mut AsyncApp,
) -> Result<Entity<NativeAgent>> {
log::info!("Creating new NativeAgent");
@@ -184,13 +72,7 @@ impl NativeAgent {
.await;
cx.new(|cx| {
let mut subscriptions = vec![
cx.subscribe(&project, Self::handle_project_event),
cx.subscribe(
&LanguageModelRegistry::global(cx),
Self::handle_models_updated_event,
),
];
let mut subscriptions = vec![cx.subscribe(&project, Self::handle_project_event)];
if let Some(prompt_store) = prompt_store.as_ref() {
subscriptions.push(cx.subscribe(prompt_store, Self::handle_prompts_updated_event))
}
@@ -204,23 +86,14 @@ impl NativeAgent {
_maintain_project_context: cx.spawn(async move |this, cx| {
Self::maintain_project_context(this, project_context_needs_refresh_rx, cx).await
}),
context_server_registry: cx.new(|cx| {
ContextServerRegistry::new(project.read(cx).context_server_store(), cx)
}),
templates,
models: LanguageModels::new(cx),
project,
prompt_store,
fs,
_subscriptions: subscriptions,
}
})
}
pub fn models(&self) -> &LanguageModels {
&self.models
}
async fn maintain_project_context(
this: WeakEntity<Self>,
mut needs_refresh: watch::Receiver<()>,
@@ -416,104 +289,75 @@ impl NativeAgent {
) {
self.project_context_needs_refresh.send(()).ok();
}
fn handle_models_updated_event(
&mut self,
_registry: Entity<LanguageModelRegistry>,
_event: &language_model::Event,
cx: &mut Context<Self>,
) {
self.models.refresh_list(cx);
for session in self.sessions.values_mut() {
session.thread.update(cx, |thread, _| {
let model_id = LanguageModels::model_id(&thread.selected_model);
if let Some(model) = self.models.model_from_id(&model_id) {
thread.selected_model = model.clone();
}
});
}
}
}
/// Wrapper struct that implements the AgentConnection trait
#[derive(Clone)]
pub struct NativeAgentConnection(pub Entity<NativeAgent>);
impl AgentModelSelector for NativeAgentConnection {
fn list_models(&self, cx: &mut App) -> Task<Result<acp_thread::AgentModelList>> {
impl ModelSelector for NativeAgentConnection {
fn list_models(&self, cx: &mut AsyncApp) -> Task<Result<Vec<Arc<dyn LanguageModel>>>> {
log::debug!("NativeAgentConnection::list_models called");
let list = self.0.read(cx).models.model_list.clone();
Task::ready(if list.is_empty() {
Err(anyhow::anyhow!("No models available"))
} else {
Ok(list)
cx.spawn(async move |cx| {
cx.update(|cx| {
let registry = LanguageModelRegistry::read_global(cx);
let models = registry.available_models(cx).collect::<Vec<_>>();
log::info!("Found {} available models", models.len());
if models.is_empty() {
Err(anyhow::anyhow!("No models available"))
} else {
Ok(models)
}
})?
})
}
fn select_model(
&self,
session_id: acp::SessionId,
model_id: acp_thread::AgentModelId,
cx: &mut App,
model: Arc<dyn LanguageModel>,
cx: &mut AsyncApp,
) -> Task<Result<()>> {
log::info!("Setting model for session {}: {}", session_id, model_id);
let Some(thread) = self
.0
.read(cx)
.sessions
.get(&session_id)
.map(|session| session.thread.clone())
else {
return Task::ready(Err(anyhow!("Session not found")));
};
let Some(model) = self.0.read(cx).models.model_from_id(&model_id) else {
return Task::ready(Err(anyhow!("Invalid model ID {}", model_id)));
};
thread.update(cx, |thread, _cx| {
thread.selected_model = model.clone();
});
update_settings_file::<AgentSettings>(
self.0.read(cx).fs.clone(),
cx,
move |settings, _cx| {
settings.set_model(model);
},
log::info!(
"Setting model for session {}: {:?}",
session_id,
model.name()
);
let agent = self.0.clone();
Task::ready(Ok(()))
cx.spawn(async move |cx| {
agent.update(cx, |agent, cx| {
if let Some(session) = agent.sessions.get(&session_id) {
session.thread.update(cx, |thread, _cx| {
thread.selected_model = model;
});
Ok(())
} else {
Err(anyhow!("Session not found"))
}
})?
})
}
fn selected_model(
&self,
session_id: &acp::SessionId,
cx: &mut App,
) -> Task<Result<acp_thread::AgentModelInfo>> {
cx: &mut AsyncApp,
) -> Task<Result<Arc<dyn LanguageModel>>> {
let agent = self.0.clone();
let session_id = session_id.clone();
let Some(thread) = self
.0
.read(cx)
.sessions
.get(&session_id)
.map(|session| session.thread.clone())
else {
return Task::ready(Err(anyhow!("Session not found")));
};
let model = thread.read(cx).selected_model.clone();
let Some(provider) = LanguageModelRegistry::read_global(cx).provider(&model.provider_id())
else {
return Task::ready(Err(anyhow!("Provider not found")));
};
Task::ready(Ok(LanguageModels::map_language_model_to_info(
&model, &provider,
)))
}
fn watch(&self, cx: &mut App) -> watch::Receiver<()> {
self.0.read(cx).models.watch()
cx.spawn(async move |cx| {
let thread = agent
.read_with(cx, |agent, _| {
agent
.sessions
.get(&session_id)
.map(|session| session.thread.clone())
})?
.ok_or_else(|| anyhow::anyhow!("Session not found"))?;
let selected = thread.read_with(cx, |thread, _| thread.selected_model.clone())?;
Ok(selected)
})
}
}
@@ -537,13 +381,7 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
// Create AcpThread
let acp_thread = cx.update(|cx| {
cx.new(|cx| {
acp_thread::AcpThread::new(
"agent2",
self.clone(),
project.clone(),
session_id.clone(),
cx,
)
acp_thread::AcpThread::new("agent2", self.clone(), project.clone(), session_id.clone(), cx)
})
})?;
let action_log = cx.update(|cx| acp_thread.read(cx).action_log().clone())?;
@@ -561,44 +399,25 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
let default_model = registry
.default_model()
.and_then(|default_model| {
agent
.models
.model_from_id(&LanguageModels::model_id(&default_model.model))
.map(|configured| {
log::info!(
"Using configured default model: {:?} from provider: {:?}",
configured.model.name(),
configured.provider.name()
);
configured.model
})
.ok_or_else(|| {
log::warn!("No default model configured in settings");
anyhow!(
"No default model. Please configure a default model in settings."
)
anyhow!("No default model configured. Please configure a default model in settings.")
})?;
let thread = cx.new(|cx| {
let mut thread = Thread::new(
project.clone(),
agent.project_context.clone(),
agent.context_server_registry.clone(),
action_log.clone(),
agent.templates.clone(),
default_model,
cx,
);
thread.add_tool(CreateDirectoryTool::new(project.clone()));
thread.add_tool(CopyPathTool::new(project.clone()));
thread.add_tool(DiagnosticsTool::new(project.clone()));
thread.add_tool(MovePathTool::new(project.clone()));
thread.add_tool(ListDirectoryTool::new(project.clone()));
thread.add_tool(OpenTool::new(project.clone()));
let mut thread = Thread::new(project.clone(), agent.project_context.clone(), action_log.clone(), agent.templates.clone(), default_model);
thread.add_tool(ThinkingTool);
thread.add_tool(FindPathTool::new(project.clone()));
thread.add_tool(FetchTool::new(project.read(cx).client().http_client()));
thread.add_tool(GrepTool::new(project.clone()));
thread.add_tool(ReadFileTool::new(project.clone(), action_log));
thread.add_tool(EditFileTool::new(cx.entity()));
thread.add_tool(NowTool);
thread.add_tool(TerminalTool::new(project.clone(), cx));
// TODO: Needs to be conditional based on zed model or not
thread.add_tool(WebSearchTool);
thread
});
@@ -615,7 +434,7 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
acp_thread: acp_thread.downgrade(),
_subscription: cx.observe_release(&acp_thread, |this, acp_thread, _cx| {
this.sessions.remove(acp_thread.session_id());
}),
})
},
);
})?;
@@ -632,17 +451,15 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
Task::ready(Ok(()))
}
fn model_selector(&self) -> Option<Rc<dyn AgentModelSelector>> {
Some(Rc::new(self.clone()) as Rc<dyn AgentModelSelector>)
fn model_selector(&self) -> Option<Rc<dyn ModelSelector>> {
Some(Rc::new(self.clone()) as Rc<dyn ModelSelector>)
}
fn prompt(
&self,
id: Option<acp_thread::UserMessageId>,
params: acp::PromptRequest,
cx: &mut App,
) -> Task<Result<acp::PromptResponse>> {
let id = id.expect("UserMessageId is required");
let session_id = params.session_id.clone();
let agent = self.0.clone();
log::info!("Received prompt request for session: {}", session_id);
@@ -663,14 +480,10 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
})?;
log::debug!("Found session for: {}", session_id);
let content: Vec<UserMessageContent> = params
.prompt
.into_iter()
.map(Into::into)
.collect::<Vec<_>>();
log::info!("Converted prompt to message: {} chars", content.len());
log::debug!("Message id: {:?}", id);
log::debug!("Message content: {:?}", content);
// Convert prompt to message
let message = convert_prompt_to_message(params.prompt);
log::info!("Converted prompt to message: {} chars", message.len());
log::debug!("Message content: {}", message);
// Get model using the ModelSelector capability (always available for agent2)
// Get the selected model from the thread directly
@@ -679,7 +492,7 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
// Send to thread
log::info!("Sending message to thread with model: {:?}", model.name());
let mut response_stream =
thread.update(cx, |thread, cx| thread.send(id, content, cx))?;
thread.update(cx, |thread, cx| thread.send(model, message, cx))?;
// Handle response stream and forward to session.acp_thread
while let Some(result) = response_stream.next().await {
@@ -773,33 +586,44 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
}
});
}
fn session_editor(
&self,
session_id: &agent_client_protocol::SessionId,
cx: &mut App,
) -> Option<Rc<dyn acp_thread::AgentSessionEditor>> {
self.0.update(cx, |agent, _cx| {
agent
.sessions
.get(session_id)
.map(|session| Rc::new(NativeAgentSessionEditor(session.thread.clone())) as _)
})
}
}
struct NativeAgentSessionEditor(Entity<Thread>);
/// Convert ACP content blocks to a message string
fn convert_prompt_to_message(blocks: Vec<acp::ContentBlock>) -> String {
log::debug!("Converting {} content blocks to message", blocks.len());
let mut message = String::new();
impl acp_thread::AgentSessionEditor for NativeAgentSessionEditor {
fn truncate(&self, message_id: acp_thread::UserMessageId, cx: &mut App) -> Task<Result<()>> {
Task::ready(self.0.update(cx, |thread, _cx| thread.truncate(message_id)))
for block in blocks {
match block {
acp::ContentBlock::Text(text) => {
log::trace!("Processing text block: {} chars", text.text.len());
message.push_str(&text.text);
}
acp::ContentBlock::ResourceLink(link) => {
log::trace!("Processing resource link: {}", link.uri);
message.push_str(&format!(" @{} ", link.uri));
}
acp::ContentBlock::Image(_) => {
log::trace!("Processing image block");
message.push_str(" [image] ");
}
acp::ContentBlock::Audio(_) => {
log::trace!("Processing audio block");
message.push_str(" [audio] ");
}
acp::ContentBlock::Resource(resource) => {
log::trace!("Processing resource block: {:?}", resource.resource);
message.push_str(&format!(" [resource: {:?}] ", resource.resource));
}
}
}
message
}
#[cfg(test)]
mod tests {
use super::*;
use acp_thread::{AgentConnection, AgentModelGroupName, AgentModelId, AgentModelInfo};
use fs::FakeFs;
use gpui::TestAppContext;
use serde_json::json;
@@ -817,15 +641,9 @@ mod tests {
)
.await;
let project = Project::test(fs.clone(), [], cx).await;
let agent = NativeAgent::new(
project.clone(),
Templates::new(),
None,
fs.clone(),
&mut cx.to_async(),
)
.await
.unwrap();
let agent = NativeAgent::new(project.clone(), Templates::new(), None, &mut cx.to_async())
.await
.unwrap();
agent.read_with(cx, |agent, _| {
assert_eq!(agent.project_context.borrow().worktrees, vec![])
});
@@ -866,131 +684,13 @@ mod tests {
});
}
#[gpui::test]
async fn test_listing_models(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.insert_tree("/", json!({ "a": {} })).await;
let project = Project::test(fs.clone(), [], cx).await;
let connection = NativeAgentConnection(
NativeAgent::new(
project.clone(),
Templates::new(),
None,
fs.clone(),
&mut cx.to_async(),
)
.await
.unwrap(),
);
let models = cx.update(|cx| connection.list_models(cx)).await.unwrap();
let acp_thread::AgentModelList::Grouped(models) = models else {
panic!("Unexpected model group");
};
assert_eq!(
models,
IndexMap::from_iter([(
AgentModelGroupName("Fake".into()),
vec![AgentModelInfo {
id: AgentModelId("fake/fake".into()),
name: "Fake".into(),
icon: Some(ui::IconName::ZedAssistant),
}]
)])
);
}
#[gpui::test]
async fn test_model_selection_persists_to_settings(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.create_dir(paths::settings_file().parent().unwrap())
.await
.unwrap();
fs.insert_file(
paths::settings_file(),
json!({
"agent": {
"default_model": {
"provider": "foo",
"model": "bar"
}
}
})
.to_string()
.into_bytes(),
)
.await;
let project = Project::test(fs.clone(), [], cx).await;
// Create the agent and connection
let agent = NativeAgent::new(
project.clone(),
Templates::new(),
None,
fs.clone(),
&mut cx.to_async(),
)
.await
.unwrap();
let connection = NativeAgentConnection(agent.clone());
// Create a thread/session
let acp_thread = cx
.update(|cx| {
Rc::new(connection.clone()).new_thread(
project.clone(),
Path::new("/a"),
&mut cx.to_async(),
)
})
.await
.unwrap();
let session_id = cx.update(|cx| acp_thread.read(cx).session_id().clone());
// Select a model
let model_id = AgentModelId("fake/fake".into());
cx.update(|cx| connection.select_model(session_id.clone(), model_id.clone(), cx))
.await
.unwrap();
// Verify the thread has the selected model
agent.read_with(cx, |agent, _| {
let session = agent.sessions.get(&session_id).unwrap();
session.thread.read_with(cx, |thread, _| {
assert_eq!(thread.selected_model.id().0, "fake");
});
});
cx.run_until_parked();
// Verify settings file was updated
let settings_content = fs.load(paths::settings_file()).await.unwrap();
let settings_json: serde_json::Value = serde_json::from_str(&settings_content).unwrap();
// Check that the agent settings contain the selected model
assert_eq!(
settings_json["agent"]["default_model"]["model"],
json!("fake")
);
assert_eq!(
settings_json["agent"]["default_model"]["provider"],
json!("fake")
);
}
fn init_test(cx: &mut TestAppContext) {
env_logger::try_init().ok();
cx.update(|cx| {
let settings_store = SettingsStore::test(cx);
cx.set_global(settings_store);
Project::init_settings(cx);
agent_settings::init(cx);
language::init(cx);
LanguageModelRegistry::test(cx);
});
}
}

View File

@@ -1,24 +1,16 @@
use std::{path::Path, rc::Rc, sync::Arc};
use std::path::Path;
use std::rc::Rc;
use agent_servers::AgentServer;
use anyhow::Result;
use fs::Fs;
use gpui::{App, Entity, Task};
use project::Project;
use prompt_store::PromptStore;
use crate::{NativeAgent, NativeAgentConnection, templates::Templates};
use crate::{templates::Templates, NativeAgent, NativeAgentConnection};
#[derive(Clone)]
pub struct NativeAgentServer {
fs: Arc<dyn Fs>,
}
impl NativeAgentServer {
pub fn new(fs: Arc<dyn Fs>) -> Self {
Self { fs }
}
}
pub struct NativeAgentServer;
impl AgentServer for NativeAgentServer {
fn name(&self) -> &'static str {
@@ -49,7 +41,6 @@ impl AgentServer for NativeAgentServer {
_root_dir
);
let project = project.clone();
let fs = self.fs.clone();
let prompt_store = PromptStore::global(cx);
cx.spawn(async move |cx| {
log::debug!("Creating templates for native agent");
@@ -57,7 +48,7 @@ impl AgentServer for NativeAgentServer {
let prompt_store = prompt_store.await?;
log::debug!("Creating native agent entity");
let agent = NativeAgent::new(project, templates, Some(prompt_store), fs, cx).await?;
let agent = NativeAgent::new(project, templates, Some(prompt_store), cx).await?;
// Create the connection wrapper
let connection = NativeAgentConnection(agent);

View File

@@ -1,20 +1,17 @@
use super::*;
use acp_thread::{AgentConnection, AgentModelGroupName, AgentModelList, UserMessageId};
use action_log::ActionLog;
use acp_thread::AgentConnection;
use agent_client_protocol::{self as acp};
use agent_settings::AgentProfileId;
use anyhow::Result;
use assistant_tool::ActionLog;
use client::{Client, UserStore};
use fs::{FakeFs, Fs};
use fs::FakeFs;
use futures::channel::mpsc::UnboundedReceiver;
use gpui::{
App, AppContext, Entity, Task, TestAppContext, UpdateGlobal, http_client::FakeHttpClient,
};
use gpui::{http_client::FakeHttpClient, AppContext, Entity, Task, TestAppContext};
use indoc::indoc;
use language_model::{
LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId,
LanguageModelRegistry, LanguageModelToolResult, LanguageModelToolUse, Role, StopReason,
fake_provider::FakeLanguageModel,
fake_provider::FakeLanguageModel, LanguageModel, LanguageModelCompletionError,
LanguageModelCompletionEvent, LanguageModelId, LanguageModelRegistry, LanguageModelToolResult,
LanguageModelToolUse, MessageContent, Role, StopReason,
};
use project::Project;
use prompt_store::ProjectContext;
@@ -22,7 +19,6 @@ use reqwest_client::ReqwestClient;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::json;
use settings::SettingsStore;
use smol::stream::StreamExt;
use std::{cell::RefCell, path::Path, rc::Rc, sync::Arc, time::Duration};
use util::path;
@@ -33,23 +29,19 @@ use test_tools::*;
#[gpui::test]
#[ignore = "can't run on CI yet"]
async fn test_echo(cx: &mut TestAppContext) {
let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
let events = thread
.update(cx, |thread, cx| {
thread.send(UserMessageId::new(), ["Testing: Reply with 'Hello'"], cx)
thread.send(model.clone(), "Testing: Reply with 'Hello'", cx)
})
.collect()
.await;
thread.update(cx, |thread, _cx| {
assert_eq!(
thread.last_message().unwrap().to_markdown(),
indoc! {"
## Assistant
Hello
"}
)
thread.messages().last().unwrap().content,
vec![MessageContent::Text("Hello".to_string())]
);
});
assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
}
@@ -57,18 +49,18 @@ async fn test_echo(cx: &mut TestAppContext) {
#[gpui::test]
#[ignore = "can't run on CI yet"]
async fn test_thinking(cx: &mut TestAppContext) {
let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4Thinking).await;
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4Thinking).await;
let events = thread
.update(cx, |thread, cx| {
thread.send(
UserMessageId::new(),
[indoc! {"
model.clone(),
indoc! {"
Testing:
Generate a thinking step where you just think the word 'Think',
and have your final answer be 'Hello'
"}],
"},
cx,
)
})
@@ -76,10 +68,9 @@ async fn test_thinking(cx: &mut TestAppContext) {
.await;
thread.update(cx, |thread, _cx| {
assert_eq!(
thread.last_message().unwrap().to_markdown(),
thread.messages().last().unwrap().to_markdown(),
indoc! {"
## Assistant
## assistant
<think>Think</think>
Hello
"}
@@ -100,9 +91,7 @@ async fn test_system_prompt(cx: &mut TestAppContext) {
project_context.borrow_mut().shell = "test-shell".into();
thread.update(cx, |thread, _| thread.add_tool(EchoTool));
thread.update(cx, |thread, cx| {
thread.send(UserMessageId::new(), ["abc"], cx)
});
thread.update(cx, |thread, cx| thread.send(model.clone(), "abc", cx));
cx.run_until_parked();
let mut pending_completions = fake_model.pending_completions();
assert_eq!(
@@ -132,15 +121,15 @@ async fn test_system_prompt(cx: &mut TestAppContext) {
#[gpui::test]
#[ignore = "can't run on CI yet"]
async fn test_basic_tool_calls(cx: &mut TestAppContext) {
let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
// Test a tool call that's likely to complete *before* streaming stops.
let events = thread
.update(cx, |thread, cx| {
thread.add_tool(EchoTool);
thread.send(
UserMessageId::new(),
["Now test the echo tool with 'Hello'. Does it work? Say 'Yes' or 'No'."],
model.clone(),
"Now test the echo tool with 'Hello'. Does it work? Say 'Yes' or 'No'.",
cx,
)
})
@@ -154,11 +143,8 @@ async fn test_basic_tool_calls(cx: &mut TestAppContext) {
thread.remove_tool(&AgentTool::name(&EchoTool));
thread.add_tool(DelayTool);
thread.send(
UserMessageId::new(),
[
"Now call the delay tool with 200ms.",
"When the timer goes off, then you echo the output of the tool.",
],
model.clone(),
"Now call the delay tool with 200ms. When the timer goes off, then you echo the output of the tool.",
cx,
)
})
@@ -166,36 +152,31 @@ async fn test_basic_tool_calls(cx: &mut TestAppContext) {
.await;
assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
thread.update(cx, |thread, _cx| {
assert!(
thread
.last_message()
.unwrap()
.as_agent_message()
.unwrap()
.content
.iter()
.any(|content| {
if let AgentMessageContent::Text(text) = content {
text.contains("Ding")
} else {
false
}
}),
"{}",
thread.to_markdown()
);
assert!(thread
.messages()
.last()
.unwrap()
.content
.iter()
.any(|content| {
if let MessageContent::Text(text) = content {
text.contains("Ding")
} else {
false
}
}));
});
}
#[gpui::test]
#[ignore = "can't run on CI yet"]
async fn test_streaming_tool_calls(cx: &mut TestAppContext) {
let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
// Test a tool call that's likely to complete *before* streaming stops.
let mut events = thread.update(cx, |thread, cx| {
thread.add_tool(WordListTool);
thread.send(UserMessageId::new(), ["Test the word_list tool."], cx)
thread.send(model.clone(), "Test the word_list tool.", cx)
});
let mut saw_partial_tool_use = false;
@@ -203,10 +184,8 @@ async fn test_streaming_tool_calls(cx: &mut TestAppContext) {
if let Ok(AgentResponseEvent::ToolCall(tool_call)) = event {
thread.update(cx, |thread, _cx| {
// Look for a tool use in the thread's last message
let message = thread.last_message().unwrap();
let agent_message = message.as_agent_message().unwrap();
let last_content = agent_message.content.last().unwrap();
if let AgentMessageContent::ToolUse(last_tool_use) = last_content {
let last_content = thread.messages().last().unwrap().content.last().unwrap();
if let MessageContent::ToolUse(last_tool_use) = last_content {
assert_eq!(last_tool_use.name.as_ref(), "word_list");
if tool_call.status == acp::ToolCallStatus::Pending {
if !last_tool_use.is_input_complete
@@ -244,7 +223,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
let mut events = thread.update(cx, |thread, cx| {
thread.add_tool(ToolRequiringPermission);
thread.send(UserMessageId::new(), ["abc"], cx)
thread.send(model.clone(), "abc", cx)
});
cx.run_until_parked();
fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
@@ -288,14 +267,14 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
assert_eq!(
message.content,
vec![
language_model::MessageContent::ToolResult(LanguageModelToolResult {
MessageContent::ToolResult(LanguageModelToolResult {
tool_use_id: tool_call_auth_1.tool_call.id.0.to_string().into(),
tool_name: ToolRequiringPermission.name().into(),
is_error: false,
content: "Allowed".into(),
output: Some("Allowed".into())
}),
language_model::MessageContent::ToolResult(LanguageModelToolResult {
MessageContent::ToolResult(LanguageModelToolResult {
tool_use_id: tool_call_auth_2.tool_call.id.0.to_string().into(),
tool_name: ToolRequiringPermission.name().into(),
is_error: true,
@@ -304,67 +283,6 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
})
]
);
// Simulate yet another tool call.
fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
LanguageModelToolUse {
id: "tool_id_3".into(),
name: ToolRequiringPermission.name().into(),
raw_input: "{}".into(),
input: json!({}),
is_input_complete: true,
},
));
fake_model.end_last_completion_stream();
// Respond by always allowing tools.
let tool_call_auth_3 = next_tool_call_authorization(&mut events).await;
tool_call_auth_3
.response
.send(tool_call_auth_3.options[0].id.clone())
.unwrap();
cx.run_until_parked();
let completion = fake_model.pending_completions().pop().unwrap();
let message = completion.messages.last().unwrap();
assert_eq!(
message.content,
vec![language_model::MessageContent::ToolResult(
LanguageModelToolResult {
tool_use_id: tool_call_auth_3.tool_call.id.0.to_string().into(),
tool_name: ToolRequiringPermission.name().into(),
is_error: false,
content: "Allowed".into(),
output: Some("Allowed".into())
}
)]
);
// Simulate a final tool call, ensuring we don't trigger authorization.
fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
LanguageModelToolUse {
id: "tool_id_4".into(),
name: ToolRequiringPermission.name().into(),
raw_input: "{}".into(),
input: json!({}),
is_input_complete: true,
},
));
fake_model.end_last_completion_stream();
cx.run_until_parked();
let completion = fake_model.pending_completions().pop().unwrap();
let message = completion.messages.last().unwrap();
assert_eq!(
message.content,
vec![language_model::MessageContent::ToolResult(
LanguageModelToolResult {
tool_use_id: "tool_id_4".into(),
tool_name: ToolRequiringPermission.name().into(),
is_error: false,
content: "Allowed".into(),
output: Some("Allowed".into())
}
)]
);
}
#[gpui::test]
@@ -372,9 +290,7 @@ async fn test_tool_hallucination(cx: &mut TestAppContext) {
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
let fake_model = model.as_fake();
let mut events = thread.update(cx, |thread, cx| {
thread.send(UserMessageId::new(), ["abc"], cx)
});
let mut events = thread.update(cx, |thread, cx| thread.send(model.clone(), "abc", cx));
cx.run_until_parked();
fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
LanguageModelToolUse {
@@ -420,7 +336,7 @@ async fn expect_tool_call_update_fields(
.unwrap();
match event {
AgentResponseEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update)) => {
return update;
return update
}
event => {
panic!("Unexpected event {event:?}");
@@ -459,19 +375,15 @@ async fn next_tool_call_authorization(
#[gpui::test]
#[ignore = "can't run on CI yet"]
async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
// Test concurrent tool calls with different delay times
let events = thread
.update(cx, |thread, cx| {
thread.add_tool(DelayTool);
thread.send(
UserMessageId::new(),
[
"Call the delay tool twice in the same message.",
"Once with 100ms. Once with 300ms.",
"When both timers are complete, describe the outputs.",
],
model.clone(),
"Call the delay tool twice in the same message. Once with 100ms. Once with 300ms. When both timers are complete, describe the outputs.",
cx,
)
})
@@ -482,13 +394,12 @@ async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
assert_eq!(stop_reasons, vec![acp::StopReason::EndTurn]);
thread.update(cx, |thread, _cx| {
let last_message = thread.last_message().unwrap();
let agent_message = last_message.as_agent_message().unwrap();
let text = agent_message
let last_message = thread.messages().last().unwrap();
let text = last_message
.content
.iter()
.filter_map(|content| {
if let AgentMessageContent::Text(text) = content {
if let MessageContent::Text(text) = content {
Some(text.as_str())
} else {
None
@@ -500,93 +411,17 @@ async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
});
}
#[gpui::test]
async fn test_profiles(cx: &mut TestAppContext) {
let ThreadTest {
model, thread, fs, ..
} = setup(cx, TestModel::Fake).await;
let fake_model = model.as_fake();
thread.update(cx, |thread, _cx| {
thread.add_tool(DelayTool);
thread.add_tool(EchoTool);
thread.add_tool(InfiniteTool);
});
// Override profiles and wait for settings to be loaded.
fs.insert_file(
paths::settings_file(),
json!({
"agent": {
"profiles": {
"test-1": {
"name": "Test Profile 1",
"tools": {
EchoTool.name(): true,
DelayTool.name(): true,
}
},
"test-2": {
"name": "Test Profile 2",
"tools": {
InfiniteTool.name(): true,
}
}
}
}
})
.to_string()
.into_bytes(),
)
.await;
cx.run_until_parked();
// Test that test-1 profile (default) has echo and delay tools
thread.update(cx, |thread, cx| {
thread.set_profile(AgentProfileId("test-1".into()));
thread.send(UserMessageId::new(), ["test"], cx);
});
cx.run_until_parked();
let mut pending_completions = fake_model.pending_completions();
assert_eq!(pending_completions.len(), 1);
let completion = pending_completions.pop().unwrap();
let tool_names: Vec<String> = completion
.tools
.iter()
.map(|tool| tool.name.clone())
.collect();
assert_eq!(tool_names, vec![DelayTool.name(), EchoTool.name()]);
fake_model.end_last_completion_stream();
// Switch to test-2 profile, and verify that it has only the infinite tool.
thread.update(cx, |thread, cx| {
thread.set_profile(AgentProfileId("test-2".into()));
thread.send(UserMessageId::new(), ["test2"], cx)
});
cx.run_until_parked();
let mut pending_completions = fake_model.pending_completions();
assert_eq!(pending_completions.len(), 1);
let completion = pending_completions.pop().unwrap();
let tool_names: Vec<String> = completion
.tools
.iter()
.map(|tool| tool.name.clone())
.collect();
assert_eq!(tool_names, vec![InfiniteTool.name()]);
}
#[gpui::test]
#[ignore = "can't run on CI yet"]
async fn test_cancellation(cx: &mut TestAppContext) {
let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Sonnet4).await;
let mut events = thread.update(cx, |thread, cx| {
thread.add_tool(InfiniteTool);
thread.add_tool(EchoTool);
thread.send(
UserMessageId::new(),
["Call the echo tool, then call the infinite tool, then explain their output"],
model.clone(),
"Call the echo tool and then call the infinite tool, then explain their output",
cx,
)
});
@@ -631,20 +466,14 @@ async fn test_cancellation(cx: &mut TestAppContext) {
// Ensure we can still send a new message after cancellation.
let events = thread
.update(cx, |thread, cx| {
thread.send(
UserMessageId::new(),
["Testing: reply with 'Hello' then stop."],
cx,
)
thread.send(model.clone(), "Testing: reply with 'Hello' then stop.", cx)
})
.collect::<Vec<_>>()
.await;
thread.update(cx, |thread, _cx| {
let message = thread.last_message().unwrap();
let agent_message = message.as_agent_message().unwrap();
assert_eq!(
agent_message.content,
vec![AgentMessageContent::Text("Hello".to_string())]
thread.messages().last().unwrap().content,
vec![MessageContent::Text("Hello".to_string())]
);
});
assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
@@ -655,16 +484,13 @@ async fn test_refusal(cx: &mut TestAppContext) {
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
let fake_model = model.as_fake();
let events = thread.update(cx, |thread, cx| {
thread.send(UserMessageId::new(), ["Hello"], cx)
});
let events = thread.update(cx, |thread, cx| thread.send(model.clone(), "Hello", cx));
cx.run_until_parked();
thread.read_with(cx, |thread, _| {
assert_eq!(
thread.to_markdown(),
indoc! {"
## User
## user
Hello
"}
);
@@ -676,12 +502,9 @@ async fn test_refusal(cx: &mut TestAppContext) {
assert_eq!(
thread.to_markdown(),
indoc! {"
## User
## user
Hello
## Assistant
## assistant
Hey!
"}
);
@@ -697,85 +520,6 @@ async fn test_refusal(cx: &mut TestAppContext) {
});
}
#[gpui::test]
async fn test_truncate(cx: &mut TestAppContext) {
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
let fake_model = model.as_fake();
let message_id = UserMessageId::new();
thread.update(cx, |thread, cx| {
thread.send(message_id.clone(), ["Hello"], cx)
});
cx.run_until_parked();
thread.read_with(cx, |thread, _| {
assert_eq!(
thread.to_markdown(),
indoc! {"
## User
Hello
"}
);
});
fake_model.send_last_completion_stream_text_chunk("Hey!");
cx.run_until_parked();
thread.read_with(cx, |thread, _| {
assert_eq!(
thread.to_markdown(),
indoc! {"
## User
Hello
## Assistant
Hey!
"}
);
});
thread
.update(cx, |thread, _cx| thread.truncate(message_id))
.unwrap();
cx.run_until_parked();
thread.read_with(cx, |thread, _| {
assert_eq!(thread.to_markdown(), "");
});
// Ensure we can still send a new message after truncation.
thread.update(cx, |thread, cx| {
thread.send(UserMessageId::new(), ["Hi"], cx)
});
thread.update(cx, |thread, _cx| {
assert_eq!(
thread.to_markdown(),
indoc! {"
## User
Hi
"}
);
});
cx.run_until_parked();
fake_model.send_last_completion_stream_text_chunk("Ahoy!");
cx.run_until_parked();
thread.read_with(cx, |thread, _| {
assert_eq!(
thread.to_markdown(),
indoc! {"
## User
Hi
## Assistant
Ahoy!
"}
);
});
}
#[gpui::test]
async fn test_agent_connection(cx: &mut TestAppContext) {
cx.update(settings::init);
@@ -794,26 +538,19 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
language_models::init(user_store.clone(), client.clone(), cx);
Project::init_settings(cx);
LanguageModelRegistry::test(cx);
agent_settings::init(cx);
});
cx.executor().forbid_parking();
// Create a project for new_thread
let fake_fs = cx.update(|cx| fs::FakeFs::new(cx.background_executor().clone()));
fake_fs.insert_tree(path!("/test"), json!({})).await;
let project = Project::test(fake_fs.clone(), [Path::new("/test")], cx).await;
let project = Project::test(fake_fs, [Path::new("/test")], cx).await;
let cwd = Path::new("/test");
// Create agent and connection
let agent = NativeAgent::new(
project.clone(),
templates.clone(),
None,
fake_fs.clone(),
&mut cx.to_async(),
)
.await
.unwrap();
let agent = NativeAgent::new(project.clone(), templates.clone(), None, &mut cx.to_async())
.await
.unwrap();
let connection = NativeAgentConnection(agent.clone());
// Test model_selector returns Some
@@ -826,22 +563,22 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
// Test list_models
let listed_models = cx
.update(|cx| selector.list_models(cx))
.update(|cx| {
let mut async_cx = cx.to_async();
selector.list_models(&mut async_cx)
})
.await
.expect("list_models should succeed");
let AgentModelList::Grouped(listed_models) = listed_models else {
panic!("Unexpected model list type");
};
assert!(!listed_models.is_empty(), "should have at least one model");
assert_eq!(
listed_models[&AgentModelGroupName("Fake".into())][0].id.0,
"fake/fake"
);
assert_eq!(listed_models[0].id().0, "fake");
// Create a thread using new_thread
let connection_rc = Rc::new(connection.clone());
let acp_thread = cx
.update(|cx| connection_rc.new_thread(project, cwd, &mut cx.to_async()))
.update(|cx| {
let mut async_cx = cx.to_async();
connection_rc.new_thread(project, cwd, &mut async_cx)
})
.await
.expect("new_thread should succeed");
@@ -850,12 +587,12 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
// Test selected_model returns the default
let model = cx
.update(|cx| selector.selected_model(&session_id, cx))
.update(|cx| {
let mut async_cx = cx.to_async();
selector.selected_model(&session_id, &mut async_cx)
})
.await
.expect("selected_model should succeed");
let model = cx
.update(|cx| agent.read(cx).models().model_from_id(&model.id))
.unwrap();
let model = model.as_fake();
assert_eq!(model.id().0, "fake", "should return default model");
@@ -889,7 +626,6 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
let result = cx
.update(|cx| {
connection.prompt(
Some(acp_thread::UserMessageId::new()),
acp::PromptRequest {
session_id: session_id.clone(),
prompt: vec!["ghi".into()],
@@ -912,9 +648,7 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
thread.update(cx, |thread, _cx| thread.add_tool(ThinkingTool));
let fake_model = model.as_fake();
let mut events = thread.update(cx, |thread, cx| {
thread.send(UserMessageId::new(), ["Think"], cx)
});
let mut events = thread.update(cx, |thread, cx| thread.send(model.clone(), "Think", cx));
cx.run_until_parked();
// Simulate streaming partial input.
@@ -999,7 +733,6 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
id: acp::ToolCallId("1".into()),
fields: acp::ToolCallUpdateFields {
status: Some(acp::ToolCallStatus::Completed),
raw_output: Some("Finished thinking.".into()),
..Default::default()
},
}
@@ -1023,7 +756,6 @@ struct ThreadTest {
model: Arc<dyn LanguageModel>,
thread: Entity<Thread>,
project_context: Rc<RefCell<ProjectContext>>,
fs: Arc<FakeFs>,
}
enum TestModel {
@@ -1044,59 +776,28 @@ impl TestModel {
async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
cx.executor().allow_parking();
let fs = FakeFs::new(cx.background_executor.clone());
fs.create_dir(paths::settings_file().parent().unwrap())
.await
.unwrap();
fs.insert_file(
paths::settings_file(),
json!({
"agent": {
"default_profile": "test-profile",
"profiles": {
"test-profile": {
"name": "Test Profile",
"tools": {
EchoTool.name(): true,
DelayTool.name(): true,
WordListTool.name(): true,
ToolRequiringPermission.name(): true,
InfiniteTool.name(): true,
}
}
}
}
})
.to_string()
.into_bytes(),
)
.await;
cx.update(|cx| {
settings::init(cx);
Project::init_settings(cx);
agent_settings::init(cx);
gpui_tokio::init(cx);
let http_client = ReqwestClient::user_agent("agent tests").unwrap();
cx.set_http_client(Arc::new(http_client));
client::init_settings(cx);
let client = Client::production(cx);
let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
language_model::init(client.clone(), cx);
language_models::init(user_store.clone(), client.clone(), cx);
watch_settings(fs.clone(), cx);
});
let templates = Templates::new();
let fs = FakeFs::new(cx.background_executor.clone());
fs.insert_tree(path!("/test"), json!({})).await;
let project = Project::test(fs.clone(), [path!("/test").as_ref()], cx).await;
let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
let model = cx
.update(|cx| {
gpui_tokio::init(cx);
let http_client = ReqwestClient::user_agent("agent tests").unwrap();
cx.set_http_client(Arc::new(http_client));
client::init_settings(cx);
let client = Client::production(cx);
let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
language_model::init(client.clone(), cx);
language_models::init(user_store.clone(), client.clone(), cx);
if let TestModel::Fake = model {
Task::ready(Arc::new(FakeLanguageModel::default()) as Arc<_>)
} else {
@@ -1119,25 +820,20 @@ async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
.await;
let project_context = Rc::new(RefCell::new(ProjectContext::default()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project,
project_context.clone(),
context_server_registry,
action_log,
templates,
model.clone(),
cx,
)
});
ThreadTest {
model,
thread,
project_context,
fs,
}
}
@@ -1148,26 +844,3 @@ fn init_logger() {
env_logger::init();
}
}
fn watch_settings(fs: Arc<dyn Fs>, cx: &mut App) {
let fs = fs.clone();
cx.spawn({
async move |cx| {
let mut new_settings_content_rx = settings::watch_config_file(
cx.background_executor(),
fs,
paths::settings_file().clone(),
);
while let Some(new_settings_content) = new_settings_content_rx.next().await {
cx.update(|cx| {
SettingsStore::update_global(cx, |settings, cx| {
settings.set_user_settings(&new_settings_content, cx)
})
})
.ok();
}
}
})
.detach();
}

View File

@@ -110,9 +110,9 @@ impl AgentTool for ToolRequiringPermission {
event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<String>> {
let authorize = event_stream.authorize("Authorize?", cx);
let auth_check = event_stream.authorize("Authorize?".into());
cx.foreground_executor().spawn(async move {
authorize.await?;
auth_check.await?;
Ok("Allowed".to_string())
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,35 +1,9 @@
mod context_server_registry;
mod copy_path_tool;
mod create_directory_tool;
mod delete_path_tool;
mod diagnostics_tool;
mod edit_file_tool;
mod fetch_tool;
mod find_path_tool;
mod grep_tool;
mod list_directory_tool;
mod move_path_tool;
mod now_tool;
mod open_tool;
mod read_file_tool;
mod terminal_tool;
mod thinking_tool;
mod web_search_tool;
pub use context_server_registry::*;
pub use copy_path_tool::*;
pub use create_directory_tool::*;
pub use delete_path_tool::*;
pub use diagnostics_tool::*;
pub use edit_file_tool::*;
pub use fetch_tool::*;
pub use find_path_tool::*;
pub use grep_tool::*;
pub use list_directory_tool::*;
pub use move_path_tool::*;
pub use now_tool::*;
pub use open_tool::*;
pub use read_file_tool::*;
pub use terminal_tool::*;
pub use thinking_tool::*;
pub use web_search_tool::*;

View File

@@ -1,231 +0,0 @@
use crate::{AgentToolOutput, AnyAgentTool, ToolCallEventStream};
use agent_client_protocol::ToolKind;
use anyhow::{Result, anyhow, bail};
use collections::{BTreeMap, HashMap};
use context_server::ContextServerId;
use gpui::{App, Context, Entity, SharedString, Task};
use project::context_server_store::{ContextServerStatus, ContextServerStore};
use std::sync::Arc;
use util::ResultExt;
pub struct ContextServerRegistry {
server_store: Entity<ContextServerStore>,
registered_servers: HashMap<ContextServerId, RegisteredContextServer>,
_subscription: gpui::Subscription,
}
struct RegisteredContextServer {
tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
load_tools: Task<Result<()>>,
}
impl ContextServerRegistry {
pub fn new(server_store: Entity<ContextServerStore>, cx: &mut Context<Self>) -> Self {
let mut this = Self {
server_store: server_store.clone(),
registered_servers: HashMap::default(),
_subscription: cx.subscribe(&server_store, Self::handle_context_server_store_event),
};
for server in server_store.read(cx).running_servers() {
this.reload_tools_for_server(server.id(), cx);
}
this
}
pub fn servers(
&self,
) -> impl Iterator<
Item = (
&ContextServerId,
&BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
),
> {
self.registered_servers
.iter()
.map(|(id, server)| (id, &server.tools))
}
fn reload_tools_for_server(&mut self, server_id: ContextServerId, cx: &mut Context<Self>) {
let Some(server) = self.server_store.read(cx).get_running_server(&server_id) else {
return;
};
let Some(client) = server.client() else {
return;
};
if !client.capable(context_server::protocol::ServerCapability::Tools) {
return;
}
let registered_server =
self.registered_servers
.entry(server_id.clone())
.or_insert(RegisteredContextServer {
tools: BTreeMap::default(),
load_tools: Task::ready(Ok(())),
});
registered_server.load_tools = cx.spawn(async move |this, cx| {
let response = client
.request::<context_server::types::requests::ListTools>(())
.await;
this.update(cx, |this, cx| {
let Some(registered_server) = this.registered_servers.get_mut(&server_id) else {
return;
};
registered_server.tools.clear();
if let Some(response) = response.log_err() {
for tool in response.tools {
let tool = Arc::new(ContextServerTool::new(
this.server_store.clone(),
server.id(),
tool,
));
registered_server.tools.insert(tool.name(), tool);
}
cx.notify();
}
})
});
}
fn handle_context_server_store_event(
&mut self,
_: Entity<ContextServerStore>,
event: &project::context_server_store::Event,
cx: &mut Context<Self>,
) {
match event {
project::context_server_store::Event::ServerStatusChanged { server_id, status } => {
match status {
ContextServerStatus::Starting => {}
ContextServerStatus::Running => {
self.reload_tools_for_server(server_id.clone(), cx);
}
ContextServerStatus::Stopped | ContextServerStatus::Error(_) => {
self.registered_servers.remove(&server_id);
cx.notify();
}
}
}
}
}
}
struct ContextServerTool {
store: Entity<ContextServerStore>,
server_id: ContextServerId,
tool: context_server::types::Tool,
}
impl ContextServerTool {
fn new(
store: Entity<ContextServerStore>,
server_id: ContextServerId,
tool: context_server::types::Tool,
) -> Self {
Self {
store,
server_id,
tool,
}
}
}
impl AnyAgentTool for ContextServerTool {
fn name(&self) -> SharedString {
self.tool.name.clone().into()
}
fn description(&self) -> SharedString {
self.tool.description.clone().unwrap_or_default().into()
}
fn kind(&self) -> ToolKind {
ToolKind::Other
}
fn initial_title(&self, _input: serde_json::Value) -> SharedString {
format!("Run MCP tool `{}`", self.tool.name).into()
}
fn input_schema(
&self,
format: language_model::LanguageModelToolSchemaFormat,
) -> Result<serde_json::Value> {
let mut schema = self.tool.input_schema.clone();
assistant_tool::adapt_schema_to_format(&mut schema, format)?;
Ok(match schema {
serde_json::Value::Null => {
serde_json::json!({ "type": "object", "properties": [] })
}
serde_json::Value::Object(map) if map.is_empty() => {
serde_json::json!({ "type": "object", "properties": [] })
}
_ => schema,
})
}
fn run(
self: Arc<Self>,
input: serde_json::Value,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<AgentToolOutput>> {
let Some(server) = self.store.read(cx).get_running_server(&self.server_id) else {
return Task::ready(Err(anyhow!("Context server not found")));
};
let tool_name = self.tool.name.clone();
let server_clone = server.clone();
let input_clone = input.clone();
cx.spawn(async move |_cx| {
let Some(protocol) = server_clone.client() else {
bail!("Context server not initialized");
};
let arguments = if let serde_json::Value::Object(map) = input_clone {
Some(map.into_iter().collect())
} else {
None
};
log::trace!(
"Running tool: {} with arguments: {:?}",
tool_name,
arguments
);
let response = protocol
.request::<context_server::types::requests::CallTool>(
context_server::types::CallToolParams {
name: tool_name,
arguments,
meta: None,
},
)
.await?;
let mut result = String::new();
for content in response.content {
match content {
context_server::types::ToolResponseContent::Text { text } => {
result.push_str(&text);
}
context_server::types::ToolResponseContent::Image { .. } => {
log::warn!("Ignoring image content from tool response");
}
context_server::types::ToolResponseContent::Audio { .. } => {
log::warn!("Ignoring audio content from tool response");
}
context_server::types::ToolResponseContent::Resource { .. } => {
log::warn!("Ignoring resource content from tool response");
}
}
}
Ok(AgentToolOutput {
raw_output: result.clone().into(),
llm_output: result.into(),
})
})
}
}

View File

@@ -1,118 +0,0 @@
use crate::{AgentTool, ToolCallEventStream};
use agent_client_protocol::ToolKind;
use anyhow::{Context as _, Result, anyhow};
use gpui::{App, AppContext, Entity, SharedString, Task};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use util::markdown::MarkdownInlineCode;
/// Copies a file or directory in the project, and returns confirmation that the
/// copy succeeded.
///
/// Directory contents will be copied recursively (like `cp -r`).
///
/// This tool should be used when it's desirable to create a copy of a file or
/// directory without modifying the original. It's much more efficient than
/// doing this by separately reading and then writing the file or directory's
/// contents, so this tool should be preferred over that approach whenever
/// copying is the goal.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CopyPathToolInput {
/// The source path of the file or directory to copy.
/// If a directory is specified, its contents will be copied recursively (like `cp -r`).
///
/// <example>
/// If the project has the following files:
///
/// - directory1/a/something.txt
/// - directory2/a/things.txt
/// - directory3/a/other.txt
///
/// You can copy the first file by providing a source_path of "directory1/a/something.txt"
/// </example>
pub source_path: String,
/// The destination path where the file or directory should be copied to.
///
/// <example>
/// To copy "directory1/a/something.txt" to "directory2/b/copy.txt",
/// provide a destination_path of "directory2/b/copy.txt"
/// </example>
pub destination_path: String,
}
pub struct CopyPathTool {
project: Entity<Project>,
}
impl CopyPathTool {
pub fn new(project: Entity<Project>) -> Self {
Self { project }
}
}
impl AgentTool for CopyPathTool {
type Input = CopyPathToolInput;
type Output = String;
fn name(&self) -> SharedString {
"copy_path".into()
}
fn kind(&self) -> ToolKind {
ToolKind::Move
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> ui::SharedString {
if let Ok(input) = input {
let src = MarkdownInlineCode(&input.source_path);
let dest = MarkdownInlineCode(&input.destination_path);
format!("Copy {src} to {dest}").into()
} else {
"Copy path".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let copy_task = self.project.update(cx, |project, cx| {
match project
.find_project_path(&input.source_path, cx)
.and_then(|project_path| project.entry_for_path(&project_path, cx))
{
Some(entity) => match project.find_project_path(&input.destination_path, cx) {
Some(project_path) => {
project.copy_entry(entity.id, None, project_path.path, cx)
}
None => Task::ready(Err(anyhow!(
"Destination path {} was outside the project.",
input.destination_path
))),
},
None => Task::ready(Err(anyhow!(
"Source path {} was not found in the project.",
input.source_path
))),
}
});
cx.background_spawn(async move {
let _ = copy_task.await.with_context(|| {
format!(
"Copying {} to {}",
input.source_path, input.destination_path
)
})?;
Ok(format!(
"Copied {} to {}",
input.source_path, input.destination_path
))
})
}
}

View File

@@ -1,89 +0,0 @@
use agent_client_protocol::ToolKind;
use anyhow::{Context as _, Result, anyhow};
use gpui::{App, Entity, SharedString, Task};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use util::markdown::MarkdownInlineCode;
use crate::{AgentTool, ToolCallEventStream};
/// Creates a new directory at the specified path within the project. Returns
/// confirmation that the directory was created.
///
/// This tool creates a directory and all necessary parent directories (similar
/// to `mkdir -p`). It should be used whenever you need to create new
/// directories within the project.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CreateDirectoryToolInput {
/// The path of the new directory.
///
/// <example>
/// If the project has the following structure:
///
/// - directory1/
/// - directory2/
///
/// You can create a new directory by providing a path of "directory1/new_directory"
/// </example>
pub path: String,
}
pub struct CreateDirectoryTool {
project: Entity<Project>,
}
impl CreateDirectoryTool {
pub fn new(project: Entity<Project>) -> Self {
Self { project }
}
}
impl AgentTool for CreateDirectoryTool {
type Input = CreateDirectoryToolInput;
type Output = String;
fn name(&self) -> SharedString {
"create_directory".into()
}
fn kind(&self) -> ToolKind {
ToolKind::Read
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Ok(input) = input {
format!("Create directory {}", MarkdownInlineCode(&input.path)).into()
} else {
"Create directory".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let project_path = match self.project.read(cx).find_project_path(&input.path, cx) {
Some(project_path) => project_path,
None => {
return Task::ready(Err(anyhow!("Path to create was outside the project")));
}
};
let destination_path: Arc<str> = input.path.as_str().into();
let create_entry = self.project.update(cx, |project, cx| {
project.create_entry(project_path.clone(), true, cx)
});
cx.spawn(async move |_cx| {
create_entry
.await
.with_context(|| format!("Creating directory {destination_path}"))?;
Ok(format!("Created directory {destination_path}"))
})
}
}

View File

@@ -1,137 +0,0 @@
use crate::{AgentTool, ToolCallEventStream};
use action_log::ActionLog;
use agent_client_protocol::ToolKind;
use anyhow::{Context as _, Result, anyhow};
use futures::{SinkExt, StreamExt, channel::mpsc};
use gpui::{App, AppContext, Entity, SharedString, Task};
use project::{Project, ProjectPath};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
/// Deletes the file or directory (and the directory's contents, recursively) at
/// the specified path in the project, and returns confirmation of the deletion.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct DeletePathToolInput {
/// The path of the file or directory to delete.
///
/// <example>
/// If the project has the following files:
///
/// - directory1/a/something.txt
/// - directory2/a/things.txt
/// - directory3/a/other.txt
///
/// You can delete the first file by providing a path of "directory1/a/something.txt"
/// </example>
pub path: String,
}
pub struct DeletePathTool {
project: Entity<Project>,
action_log: Entity<ActionLog>,
}
impl DeletePathTool {
pub fn new(project: Entity<Project>, action_log: Entity<ActionLog>) -> Self {
Self {
project,
action_log,
}
}
}
impl AgentTool for DeletePathTool {
type Input = DeletePathToolInput;
type Output = String;
fn name(&self) -> SharedString {
"delete_path".into()
}
fn kind(&self) -> ToolKind {
ToolKind::Delete
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Ok(input) = input {
format!("Delete “`{}`”", input.path).into()
} else {
"Delete path".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let path = input.path;
let Some(project_path) = self.project.read(cx).find_project_path(&path, cx) else {
return Task::ready(Err(anyhow!(
"Couldn't delete {path} because that path isn't in this project."
)));
};
let Some(worktree) = self
.project
.read(cx)
.worktree_for_id(project_path.worktree_id, cx)
else {
return Task::ready(Err(anyhow!(
"Couldn't delete {path} because that path isn't in this project."
)));
};
let worktree_snapshot = worktree.read(cx).snapshot();
let (mut paths_tx, mut paths_rx) = mpsc::channel(256);
cx.background_spawn({
let project_path = project_path.clone();
async move {
for entry in
worktree_snapshot.traverse_from_path(true, false, false, &project_path.path)
{
if !entry.path.starts_with(&project_path.path) {
break;
}
paths_tx
.send(ProjectPath {
worktree_id: project_path.worktree_id,
path: entry.path.clone(),
})
.await?;
}
anyhow::Ok(())
}
})
.detach();
let project = self.project.clone();
let action_log = self.action_log.clone();
cx.spawn(async move |cx| {
while let Some(path) = paths_rx.next().await {
if let Ok(buffer) = project
.update(cx, |project, cx| project.open_buffer(path, cx))?
.await
{
action_log.update(cx, |action_log, cx| {
action_log.will_delete_buffer(buffer.clone(), cx)
})?;
}
}
let deletion_task = project
.update(cx, |project, cx| {
project.delete_file(project_path, false, cx)
})?
.with_context(|| {
format!("Couldn't delete {path} because that path isn't in this project.")
})?;
deletion_task
.await
.with_context(|| format!("Deleting {path}"))?;
Ok(format!("Deleted {path}"))
})
}
}

View File

@@ -1,163 +0,0 @@
use crate::{AgentTool, ToolCallEventStream};
use agent_client_protocol as acp;
use anyhow::{Result, anyhow};
use gpui::{App, Entity, Task};
use language::{DiagnosticSeverity, OffsetRangeExt};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{fmt::Write, path::Path, sync::Arc};
use ui::SharedString;
use util::markdown::MarkdownInlineCode;
/// Get errors and warnings for the project or a specific file.
///
/// This tool can be invoked after a series of edits to determine if further edits are necessary, or if the user asks to fix errors or warnings in their codebase.
///
/// When a path is provided, shows all diagnostics for that specific file.
/// When no path is provided, shows a summary of error and warning counts for all files in the project.
///
/// <example>
/// To get diagnostics for a specific file:
/// {
/// "path": "src/main.rs"
/// }
///
/// To get a project-wide diagnostic summary:
/// {}
/// </example>
///
/// <guidelines>
/// - If you think you can fix a diagnostic, make 1-2 attempts and then give up.
/// - Don't remove code you've generated just because you can't fix an error. The user can help you fix it.
/// </guidelines>
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct DiagnosticsToolInput {
/// The path to get diagnostics for. If not provided, returns a project-wide summary.
///
/// This path should never be absolute, and the first component
/// of the path should always be a root directory in a project.
///
/// <example>
/// If the project has the following root directories:
///
/// - lorem
/// - ipsum
///
/// If you wanna access diagnostics for `dolor.txt` in `ipsum`, you should use the path `ipsum/dolor.txt`.
/// </example>
pub path: Option<String>,
}
pub struct DiagnosticsTool {
project: Entity<Project>,
}
impl DiagnosticsTool {
pub fn new(project: Entity<Project>) -> Self {
Self { project }
}
}
impl AgentTool for DiagnosticsTool {
type Input = DiagnosticsToolInput;
type Output = String;
fn name(&self) -> SharedString {
"diagnostics".into()
}
fn kind(&self) -> acp::ToolKind {
acp::ToolKind::Read
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Some(path) = input.ok().and_then(|input| match input.path {
Some(path) if !path.is_empty() => Some(path),
_ => None,
}) {
format!("Check diagnostics for {}", MarkdownInlineCode(&path)).into()
} else {
"Check project diagnostics".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
match input.path {
Some(path) if !path.is_empty() => {
let Some(project_path) = self.project.read(cx).find_project_path(&path, cx) else {
return Task::ready(Err(anyhow!("Could not find path {path} in project",)));
};
let buffer = self
.project
.update(cx, |project, cx| project.open_buffer(project_path, cx));
cx.spawn(async move |cx| {
let mut output = String::new();
let buffer = buffer.await?;
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?;
for (_, group) in snapshot.diagnostic_groups(None) {
let entry = &group.entries[group.primary_ix];
let range = entry.range.to_point(&snapshot);
let severity = match entry.diagnostic.severity {
DiagnosticSeverity::ERROR => "error",
DiagnosticSeverity::WARNING => "warning",
_ => continue,
};
writeln!(
output,
"{} at line {}: {}",
severity,
range.start.row + 1,
entry.diagnostic.message
)?;
}
if output.is_empty() {
Ok("File doesn't have errors or warnings!".to_string())
} else {
Ok(output)
}
})
}
_ => {
let project = self.project.read(cx);
let mut output = String::new();
let mut has_diagnostics = false;
for (project_path, _, summary) in project.diagnostic_summaries(true, cx) {
if summary.error_count > 0 || summary.warning_count > 0 {
let Some(worktree) = project.worktree_for_id(project_path.worktree_id, cx)
else {
continue;
};
has_diagnostics = true;
output.push_str(&format!(
"{}: {} error(s), {} warning(s)\n",
Path::new(worktree.read(cx).root_name())
.join(project_path.path)
.display(),
summary.error_count,
summary.warning_count
));
}
}
if has_diagnostics {
Task::ready(Ok(output))
} else {
Task::ready(Ok("No errors or warnings found in the project.".into()))
}
}
}
}
}

View File

@@ -1,13 +1,12 @@
use crate::{AgentTool, Thread, ToolCallEventStream};
use acp_thread::Diff;
use agent_client_protocol::{self as acp, ToolCallLocation, ToolCallUpdateFields};
use anyhow::{Context as _, Result, anyhow};
use agent_client_protocol as acp;
use anyhow::{anyhow, Context as _, Result};
use assistant_tools::edit_agent::{EditAgent, EditAgentOutput, EditAgentOutputEvent, EditFormat};
use cloud_llm_client::CompletionIntent;
use collections::HashSet;
use gpui::{App, AppContext, AsyncApp, Entity, Task};
use indoc::formatdoc;
use language::ToPoint;
use language::language_settings::{self, FormatOnSave};
use language_model::LanguageModelToolResultContent;
use paths;
@@ -134,7 +133,7 @@ impl EditFileTool {
&self,
input: &EditFileToolInput,
event_stream: &ToolCallEventStream,
cx: &mut App,
cx: &App,
) -> Task<Result<()>> {
if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
return Task::ready(Ok(()));
@@ -148,9 +147,8 @@ impl EditFileTool {
.components()
.any(|component| component.as_os_str() == local_settings_folder.as_os_str())
{
return event_stream.authorize(
format!("{} (local settings)", input.display_description),
cx,
return cx.foreground_executor().spawn(
event_stream.authorize(format!("{} (local settings)", input.display_description)),
);
}
@@ -158,9 +156,9 @@ impl EditFileTool {
// so check for that edge case too.
if let Ok(canonical_path) = std::fs::canonicalize(&input.path) {
if canonical_path.starts_with(paths::config_dir()) {
return event_stream.authorize(
format!("{} (global settings)", input.display_description),
cx,
return cx.foreground_executor().spawn(
event_stream
.authorize(format!("{} (global settings)", input.display_description)),
);
}
}
@@ -175,7 +173,8 @@ impl EditFileTool {
if project_path.is_some() {
Task::ready(Ok(()))
} else {
event_stream.authorize(&input.display_description, cx)
cx.foreground_executor()
.spawn(event_stream.authorize(input.display_description.clone()))
}
}
}
@@ -226,16 +225,6 @@ impl AgentTool for EditFileTool {
Ok(path) => path,
Err(err) => return Task::ready(Err(anyhow!(err))),
};
let abs_path = project.read(cx).absolute_path(&project_path, cx);
if let Some(abs_path) = abs_path.clone() {
event_stream.update_fields(ToolCallUpdateFields {
locations: Some(vec![acp::ToolCallLocation {
path: abs_path,
line: None,
}]),
..Default::default()
});
}
let request = self.thread.update(cx, |thread, cx| {
thread.build_completion_request(CompletionIntent::ToolResults, cx)
@@ -294,38 +283,13 @@ impl AgentTool for EditFileTool {
let mut hallucinated_old_text = false;
let mut ambiguous_ranges = Vec::new();
let mut emitted_location = false;
while let Some(event) = events.next().await {
match event {
EditAgentOutputEvent::Edited(range) => {
if !emitted_location {
let line = buffer.update(cx, |buffer, _cx| {
range.start.to_point(&buffer.snapshot()).row
}).ok();
if let Some(abs_path) = abs_path.clone() {
event_stream.update_fields(ToolCallUpdateFields {
locations: Some(vec![ToolCallLocation { path: abs_path, line }]),
..Default::default()
});
}
emitted_location = true;
}
},
EditAgentOutputEvent::Edited => {},
EditAgentOutputEvent::UnresolvedEditRange => hallucinated_old_text = true,
EditAgentOutputEvent::AmbiguousEditRange(ranges) => ambiguous_ranges = ranges,
EditAgentOutputEvent::ResolvingEditRange(range) => {
diff.update(cx, |card, cx| card.reveal_range(range.clone(), cx))?;
// if !emitted_location {
// let line = buffer.update(cx, |buffer, _cx| {
// range.start.to_point(&buffer.snapshot()).row
// }).ok();
// if let Some(abs_path) = abs_path.clone() {
// event_stream.update_fields(ToolCallUpdateFields {
// locations: Some(vec![ToolCallLocation { path: abs_path, line }]),
// ..Default::default()
// });
// }
// }
diff.update(cx, |card, cx| card.reveal_range(range, cx))?;
}
}
}
@@ -490,9 +454,10 @@ fn resolve_path(
#[cfg(test)]
mod tests {
use crate::Templates;
use super::*;
use crate::{ContextServerRegistry, Templates};
use action_log::ActionLog;
use assistant_tool::ActionLog;
use client::TelemetrySettings;
use fs::Fs;
use gpui::{TestAppContext, UpdateGlobal};
@@ -510,20 +475,9 @@ mod tests {
fs.insert_tree("/root", json!({})).await;
let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await;
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
Thread::new(
project,
Rc::default(),
context_server_registry,
action_log,
Templates::new(),
model,
cx,
)
});
let thread =
cx.new(|_| Thread::new(project, Rc::default(), action_log, Templates::new(), model));
let result = cx
.update(|cx| {
let input = EditFileToolInput {
@@ -707,18 +661,14 @@ mod tests {
});
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project,
Rc::default(),
context_server_registry,
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
@@ -842,19 +792,15 @@ mod tests {
.unwrap();
let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await;
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project,
Rc::default(),
context_server_registry,
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
@@ -968,19 +914,15 @@ mod tests {
init_test(cx);
let fs = project::FakeFs::new(cx.executor());
let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await;
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project,
Rc::default(),
context_server_registry,
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
let tool = Arc::new(EditFileTool { thread });
@@ -1000,7 +942,7 @@ mod tests {
)
});
let event = stream_rx.expect_authorization().await;
let event = stream_rx.expect_tool_authorization().await;
assert_eq!(event.tool_call.title, "test 1 (local settings)");
// Test 2: Path outside project should require confirmation
@@ -1017,7 +959,7 @@ mod tests {
)
});
let event = stream_rx.expect_authorization().await;
let event = stream_rx.expect_tool_authorization().await;
assert_eq!(event.tool_call.title, "test 2");
// Test 3: Relative path without .zed should not require confirmation
@@ -1050,7 +992,7 @@ mod tests {
cx,
)
});
let event = stream_rx.expect_authorization().await;
let event = stream_rx.expect_tool_authorization().await;
assert_eq!(event.tool_call.title, "test 4 (local settings)");
// Test 5: When always_allow_tool_actions is enabled, no confirmation needed
@@ -1099,19 +1041,15 @@ mod tests {
let fs = project::FakeFs::new(cx.executor());
fs.insert_tree("/project", json!({})).await;
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project,
Rc::default(),
context_server_registry,
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
let tool = Arc::new(EditFileTool { thread });
@@ -1150,7 +1088,7 @@ mod tests {
});
if should_confirm {
stream_rx.expect_authorization().await;
stream_rx.expect_tool_authorization().await;
} else {
auth.await.unwrap();
assert!(
@@ -1210,18 +1148,14 @@ mod tests {
.await;
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project.clone(),
Rc::default(),
context_server_registry.clone(),
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
let tool = Arc::new(EditFileTool { thread });
@@ -1258,7 +1192,7 @@ mod tests {
});
if should_confirm {
stream_rx.expect_authorization().await;
stream_rx.expect_tool_authorization().await;
} else {
auth.await.unwrap();
assert!(
@@ -1291,18 +1225,14 @@ mod tests {
.await;
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project.clone(),
Rc::default(),
context_server_registry.clone(),
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
let tool = Arc::new(EditFileTool { thread });
@@ -1346,7 +1276,7 @@ mod tests {
});
if should_confirm {
stream_rx.expect_authorization().await;
stream_rx.expect_tool_authorization().await;
} else {
auth.await.unwrap();
assert!(
@@ -1375,18 +1305,14 @@ mod tests {
.await;
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project.clone(),
Rc::default(),
context_server_registry.clone(),
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
let tool = Arc::new(EditFileTool { thread });
@@ -1413,7 +1339,7 @@ mod tests {
)
});
stream_rx.expect_authorization().await;
stream_rx.expect_tool_authorization().await;
// Test outside path with different modes
let (stream_tx, mut stream_rx) = ToolCallEventStream::test();
@@ -1429,7 +1355,7 @@ mod tests {
)
});
stream_rx.expect_authorization().await;
stream_rx.expect_tool_authorization().await;
// Test normal path with different modes
let (stream_tx, mut stream_rx) = ToolCallEventStream::test();
@@ -1456,18 +1382,14 @@ mod tests {
let fs = project::FakeFs::new(cx.executor());
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let context_server_registry =
cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let model = Arc::new(FakeLanguageModel::default());
let thread = cx.new(|cx| {
let thread = cx.new(|_| {
Thread::new(
project.clone(),
Rc::default(),
context_server_registry,
action_log.clone(),
Templates::new(),
model.clone(),
cx,
)
});
let tool = Arc::new(EditFileTool { thread });

View File

@@ -1,155 +0,0 @@
use std::rc::Rc;
use std::sync::Arc;
use std::{borrow::Cow, cell::RefCell};
use agent_client_protocol as acp;
use anyhow::{Context as _, Result, bail};
use futures::AsyncReadExt as _;
use gpui::{App, AppContext as _, Task};
use html_to_markdown::{TagHandler, convert_html_to_markdown, markdown};
use http_client::{AsyncBody, HttpClientWithUrl};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use ui::SharedString;
use util::markdown::MarkdownEscaped;
use crate::{AgentTool, ToolCallEventStream};
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)]
enum ContentType {
Html,
Plaintext,
Json,
}
/// Fetches a URL and returns the content as Markdown.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct FetchToolInput {
/// The URL to fetch.
url: String,
}
pub struct FetchTool {
http_client: Arc<HttpClientWithUrl>,
}
impl FetchTool {
pub fn new(http_client: Arc<HttpClientWithUrl>) -> Self {
Self { http_client }
}
async fn build_message(http_client: Arc<HttpClientWithUrl>, url: &str) -> Result<String> {
let url = if !url.starts_with("https://") && !url.starts_with("http://") {
Cow::Owned(format!("https://{url}"))
} else {
Cow::Borrowed(url)
};
let mut response = http_client.get(&url, AsyncBody::default(), true).await?;
let mut body = Vec::new();
response
.body_mut()
.read_to_end(&mut body)
.await
.context("error reading response body")?;
if response.status().is_client_error() {
let text = String::from_utf8_lossy(body.as_slice());
bail!(
"status error {}, response: {text:?}",
response.status().as_u16()
);
}
let Some(content_type) = response.headers().get("content-type") else {
bail!("missing Content-Type header");
};
let content_type = content_type
.to_str()
.context("invalid Content-Type header")?;
let content_type = if content_type.starts_with("text/plain") {
ContentType::Plaintext
} else if content_type.starts_with("application/json") {
ContentType::Json
} else {
ContentType::Html
};
match content_type {
ContentType::Html => {
let mut handlers: Vec<TagHandler> = vec![
Rc::new(RefCell::new(markdown::WebpageChromeRemover)),
Rc::new(RefCell::new(markdown::ParagraphHandler)),
Rc::new(RefCell::new(markdown::HeadingHandler)),
Rc::new(RefCell::new(markdown::ListHandler)),
Rc::new(RefCell::new(markdown::TableHandler::new())),
Rc::new(RefCell::new(markdown::StyledTextHandler)),
];
if url.contains("wikipedia.org") {
use html_to_markdown::structure::wikipedia;
handlers.push(Rc::new(RefCell::new(wikipedia::WikipediaChromeRemover)));
handlers.push(Rc::new(RefCell::new(wikipedia::WikipediaInfoboxHandler)));
handlers.push(Rc::new(
RefCell::new(wikipedia::WikipediaCodeHandler::new()),
));
} else {
handlers.push(Rc::new(RefCell::new(markdown::CodeHandler)));
}
convert_html_to_markdown(&body[..], &mut handlers)
}
ContentType::Plaintext => Ok(std::str::from_utf8(&body)?.to_owned()),
ContentType::Json => {
let json: serde_json::Value = serde_json::from_slice(&body)?;
Ok(format!(
"```json\n{}\n```",
serde_json::to_string_pretty(&json)?
))
}
}
}
}
impl AgentTool for FetchTool {
type Input = FetchToolInput;
type Output = String;
fn name(&self) -> SharedString {
"fetch".into()
}
fn kind(&self) -> acp::ToolKind {
acp::ToolKind::Fetch
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
match input {
Ok(input) => format!("Fetch {}", MarkdownEscaped(&input.url)).into(),
Err(_) => "Fetch URL".into(),
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let text = cx.background_spawn({
let http_client = self.http_client.clone();
async move { Self::build_message(http_client, &input.url).await }
});
cx.foreground_executor().spawn(async move {
let text = text.await?;
if text.trim().is_empty() {
bail!("no textual content found");
}
Ok(text)
})
}
}

View File

@@ -1,6 +1,6 @@
use crate::{AgentTool, ToolCallEventStream};
use agent_client_protocol as acp;
use anyhow::{Result, anyhow};
use anyhow::{anyhow, Result};
use gpui::{App, AppContext, Entity, SharedString, Task};
use language_model::LanguageModelToolResultContent;
use project::Project;
@@ -139,6 +139,9 @@ impl AgentTool for FindPathTool {
})
.collect(),
),
raw_output: Some(serde_json::json!({
"paths": &matches,
})),
..Default::default()
});

File diff suppressed because it is too large Load Diff

View File

@@ -1,664 +0,0 @@
use crate::{AgentTool, ToolCallEventStream};
use agent_client_protocol::ToolKind;
use anyhow::{Result, anyhow};
use gpui::{App, Entity, SharedString, Task};
use project::{Project, WorktreeSettings};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use settings::Settings;
use std::fmt::Write;
use std::{path::Path, sync::Arc};
use util::markdown::MarkdownInlineCode;
/// Lists files and directories in a given path. Prefer the `grep` or
/// `find_path` tools when searching the codebase.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ListDirectoryToolInput {
/// The fully-qualified path of the directory to list in the project.
///
/// This path should never be absolute, and the first component
/// of the path should always be a root directory in a project.
///
/// <example>
/// If the project has the following root directories:
///
/// - directory1
/// - directory2
///
/// You can list the contents of `directory1` by using the path `directory1`.
/// </example>
///
/// <example>
/// If the project has the following root directories:
///
/// - foo
/// - bar
///
/// If you wanna list contents in the directory `foo/baz`, you should use the path `foo/baz`.
/// </example>
pub path: String,
}
pub struct ListDirectoryTool {
project: Entity<Project>,
}
impl ListDirectoryTool {
pub fn new(project: Entity<Project>) -> Self {
Self { project }
}
}
impl AgentTool for ListDirectoryTool {
type Input = ListDirectoryToolInput;
type Output = String;
fn name(&self) -> SharedString {
"list_directory".into()
}
fn kind(&self) -> ToolKind {
ToolKind::Read
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Ok(input) = input {
let path = MarkdownInlineCode(&input.path);
format!("List the {path} directory's contents").into()
} else {
"List directory".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
// Sometimes models will return these even though we tell it to give a path and not a glob.
// When this happens, just list the root worktree directories.
if matches!(input.path.as_str(), "." | "" | "./" | "*") {
let output = self
.project
.read(cx)
.worktrees(cx)
.filter_map(|worktree| {
worktree.read(cx).root_entry().and_then(|entry| {
if entry.is_dir() {
entry.path.to_str()
} else {
None
}
})
})
.collect::<Vec<_>>()
.join("\n");
return Task::ready(Ok(output));
}
let Some(project_path) = self.project.read(cx).find_project_path(&input.path, cx) else {
return Task::ready(Err(anyhow!("Path {} not found in project", input.path)));
};
let Some(worktree) = self
.project
.read(cx)
.worktree_for_id(project_path.worktree_id, cx)
else {
return Task::ready(Err(anyhow!("Worktree not found")));
};
// Check if the directory whose contents we're listing is itself excluded or private
let global_settings = WorktreeSettings::get_global(cx);
if global_settings.is_path_excluded(&project_path.path) {
return Task::ready(Err(anyhow!(
"Cannot list directory because its path matches the user's global `file_scan_exclusions` setting: {}",
&input.path
)));
}
if global_settings.is_path_private(&project_path.path) {
return Task::ready(Err(anyhow!(
"Cannot list directory because its path matches the user's global `private_files` setting: {}",
&input.path
)));
}
let worktree_settings = WorktreeSettings::get(Some((&project_path).into()), cx);
if worktree_settings.is_path_excluded(&project_path.path) {
return Task::ready(Err(anyhow!(
"Cannot list directory because its path matches the user's worktree`file_scan_exclusions` setting: {}",
&input.path
)));
}
if worktree_settings.is_path_private(&project_path.path) {
return Task::ready(Err(anyhow!(
"Cannot list directory because its path matches the user's worktree `private_paths` setting: {}",
&input.path
)));
}
let worktree_snapshot = worktree.read(cx).snapshot();
let worktree_root_name = worktree.read(cx).root_name().to_string();
let Some(entry) = worktree_snapshot.entry_for_path(&project_path.path) else {
return Task::ready(Err(anyhow!("Path not found: {}", input.path)));
};
if !entry.is_dir() {
return Task::ready(Err(anyhow!("{} is not a directory.", input.path)));
}
let worktree_snapshot = worktree.read(cx).snapshot();
let mut folders = Vec::new();
let mut files = Vec::new();
for entry in worktree_snapshot.child_entries(&project_path.path) {
// Skip private and excluded files and directories
if global_settings.is_path_private(&entry.path)
|| global_settings.is_path_excluded(&entry.path)
{
continue;
}
if self
.project
.read(cx)
.find_project_path(&entry.path, cx)
.map(|project_path| {
let worktree_settings = WorktreeSettings::get(Some((&project_path).into()), cx);
worktree_settings.is_path_excluded(&project_path.path)
|| worktree_settings.is_path_private(&project_path.path)
})
.unwrap_or(false)
{
continue;
}
let full_path = Path::new(&worktree_root_name)
.join(&entry.path)
.display()
.to_string();
if entry.is_dir() {
folders.push(full_path);
} else {
files.push(full_path);
}
}
let mut output = String::new();
if !folders.is_empty() {
writeln!(output, "# Folders:\n{}", folders.join("\n")).unwrap();
}
if !files.is_empty() {
writeln!(output, "\n# Files:\n{}", files.join("\n")).unwrap();
}
if output.is_empty() {
writeln!(output, "{} is empty.", input.path).unwrap();
}
Task::ready(Ok(output))
}
}
#[cfg(test)]
mod tests {
use super::*;
use gpui::{TestAppContext, UpdateGlobal};
use indoc::indoc;
use project::{FakeFs, Project, WorktreeSettings};
use serde_json::json;
use settings::SettingsStore;
use util::path;
fn platform_paths(path_str: &str) -> String {
if cfg!(target_os = "windows") {
path_str.replace("/", "\\")
} else {
path_str.to_string()
}
}
fn init_test(cx: &mut TestAppContext) {
cx.update(|cx| {
let settings_store = SettingsStore::test(cx);
cx.set_global(settings_store);
language::init(cx);
Project::init_settings(cx);
});
}
#[gpui::test]
async fn test_list_directory_separates_files_and_dirs(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.insert_tree(
path!("/project"),
json!({
"src": {
"main.rs": "fn main() {}",
"lib.rs": "pub fn hello() {}",
"models": {
"user.rs": "struct User {}",
"post.rs": "struct Post {}"
},
"utils": {
"helper.rs": "pub fn help() {}"
}
},
"tests": {
"integration_test.rs": "#[test] fn test() {}"
},
"README.md": "# Project",
"Cargo.toml": "[package]"
}),
)
.await;
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let tool = Arc::new(ListDirectoryTool::new(project));
// Test listing root directory
let input = ListDirectoryToolInput {
path: "project".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert_eq!(
output,
platform_paths(indoc! {"
# Folders:
project/src
project/tests
# Files:
project/Cargo.toml
project/README.md
"})
);
// Test listing src directory
let input = ListDirectoryToolInput {
path: "project/src".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert_eq!(
output,
platform_paths(indoc! {"
# Folders:
project/src/models
project/src/utils
# Files:
project/src/lib.rs
project/src/main.rs
"})
);
// Test listing directory with only files
let input = ListDirectoryToolInput {
path: "project/tests".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert!(!output.contains("# Folders:"));
assert!(output.contains("# Files:"));
assert!(output.contains(&platform_paths("project/tests/integration_test.rs")));
}
#[gpui::test]
async fn test_list_directory_empty_directory(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.insert_tree(
path!("/project"),
json!({
"empty_dir": {}
}),
)
.await;
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let tool = Arc::new(ListDirectoryTool::new(project));
let input = ListDirectoryToolInput {
path: "project/empty_dir".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert_eq!(output, "project/empty_dir is empty.\n");
}
#[gpui::test]
async fn test_list_directory_error_cases(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.insert_tree(
path!("/project"),
json!({
"file.txt": "content"
}),
)
.await;
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let tool = Arc::new(ListDirectoryTool::new(project));
// Test non-existent path
let input = ListDirectoryToolInput {
path: "project/nonexistent".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await;
assert!(output.unwrap_err().to_string().contains("Path not found"));
// Test trying to list a file instead of directory
let input = ListDirectoryToolInput {
path: "project/file.txt".into(),
};
let output = cx
.update(|cx| tool.run(input, ToolCallEventStream::test().0, cx))
.await;
assert!(
output
.unwrap_err()
.to_string()
.contains("is not a directory")
);
}
#[gpui::test]
async fn test_list_directory_security(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.insert_tree(
path!("/project"),
json!({
"normal_dir": {
"file1.txt": "content",
"file2.txt": "content"
},
".mysecrets": "SECRET_KEY=abc123",
".secretdir": {
"config": "special configuration",
"secret.txt": "secret content"
},
".mymetadata": "custom metadata",
"visible_dir": {
"normal.txt": "normal content",
"special.privatekey": "private key content",
"data.mysensitive": "sensitive data",
".hidden_subdir": {
"hidden_file.txt": "hidden content"
}
}
}),
)
.await;
// Configure settings explicitly
cx.update(|cx| {
SettingsStore::update_global(cx, |store, cx| {
store.update_user_settings::<WorktreeSettings>(cx, |settings| {
settings.file_scan_exclusions = Some(vec![
"**/.secretdir".to_string(),
"**/.mymetadata".to_string(),
"**/.hidden_subdir".to_string(),
]);
settings.private_files = Some(vec![
"**/.mysecrets".to_string(),
"**/*.privatekey".to_string(),
"**/*.mysensitive".to_string(),
]);
});
});
});
let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
let tool = Arc::new(ListDirectoryTool::new(project));
// Listing root directory should exclude private and excluded files
let input = ListDirectoryToolInput {
path: "project".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
// Should include normal directories
assert!(output.contains("normal_dir"), "Should list normal_dir");
assert!(output.contains("visible_dir"), "Should list visible_dir");
// Should NOT include excluded or private files
assert!(
!output.contains(".secretdir"),
"Should not list .secretdir (file_scan_exclusions)"
);
assert!(
!output.contains(".mymetadata"),
"Should not list .mymetadata (file_scan_exclusions)"
);
assert!(
!output.contains(".mysecrets"),
"Should not list .mysecrets (private_files)"
);
// Trying to list an excluded directory should fail
let input = ListDirectoryToolInput {
path: "project/.secretdir".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await;
assert!(
output
.unwrap_err()
.to_string()
.contains("file_scan_exclusions"),
"Error should mention file_scan_exclusions"
);
// Listing a directory should exclude private files within it
let input = ListDirectoryToolInput {
path: "project/visible_dir".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
// Should include normal files
assert!(output.contains("normal.txt"), "Should list normal.txt");
// Should NOT include private files
assert!(
!output.contains("privatekey"),
"Should not list .privatekey files (private_files)"
);
assert!(
!output.contains("mysensitive"),
"Should not list .mysensitive files (private_files)"
);
// Should NOT include subdirectories that match exclusions
assert!(
!output.contains(".hidden_subdir"),
"Should not list .hidden_subdir (file_scan_exclusions)"
);
}
#[gpui::test]
async fn test_list_directory_with_multiple_worktree_settings(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
// Create first worktree with its own private files
fs.insert_tree(
path!("/worktree1"),
json!({
".zed": {
"settings.json": r#"{
"file_scan_exclusions": ["**/fixture.*"],
"private_files": ["**/secret.rs", "**/config.toml"]
}"#
},
"src": {
"main.rs": "fn main() { println!(\"Hello from worktree1\"); }",
"secret.rs": "const API_KEY: &str = \"secret_key_1\";",
"config.toml": "[database]\nurl = \"postgres://localhost/db1\""
},
"tests": {
"test.rs": "mod tests { fn test_it() {} }",
"fixture.sql": "CREATE TABLE users (id INT, name VARCHAR(255));"
}
}),
)
.await;
// Create second worktree with different private files
fs.insert_tree(
path!("/worktree2"),
json!({
".zed": {
"settings.json": r#"{
"file_scan_exclusions": ["**/internal.*"],
"private_files": ["**/private.js", "**/data.json"]
}"#
},
"lib": {
"public.js": "export function greet() { return 'Hello from worktree2'; }",
"private.js": "const SECRET_TOKEN = \"private_token_2\";",
"data.json": "{\"api_key\": \"json_secret_key\"}"
},
"docs": {
"README.md": "# Public Documentation",
"internal.md": "# Internal Secrets and Configuration"
}
}),
)
.await;
// Set global settings
cx.update(|cx| {
SettingsStore::update_global(cx, |store, cx| {
store.update_user_settings::<WorktreeSettings>(cx, |settings| {
settings.file_scan_exclusions =
Some(vec!["**/.git".to_string(), "**/node_modules".to_string()]);
settings.private_files = Some(vec!["**/.env".to_string()]);
});
});
});
let project = Project::test(
fs.clone(),
[path!("/worktree1").as_ref(), path!("/worktree2").as_ref()],
cx,
)
.await;
// Wait for worktrees to be fully scanned
cx.executor().run_until_parked();
let tool = Arc::new(ListDirectoryTool::new(project));
// Test listing worktree1/src - should exclude secret.rs and config.toml based on local settings
let input = ListDirectoryToolInput {
path: "worktree1/src".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert!(output.contains("main.rs"), "Should list main.rs");
assert!(
!output.contains("secret.rs"),
"Should not list secret.rs (local private_files)"
);
assert!(
!output.contains("config.toml"),
"Should not list config.toml (local private_files)"
);
// Test listing worktree1/tests - should exclude fixture.sql based on local settings
let input = ListDirectoryToolInput {
path: "worktree1/tests".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert!(output.contains("test.rs"), "Should list test.rs");
assert!(
!output.contains("fixture.sql"),
"Should not list fixture.sql (local file_scan_exclusions)"
);
// Test listing worktree2/lib - should exclude private.js and data.json based on local settings
let input = ListDirectoryToolInput {
path: "worktree2/lib".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert!(output.contains("public.js"), "Should list public.js");
assert!(
!output.contains("private.js"),
"Should not list private.js (local private_files)"
);
assert!(
!output.contains("data.json"),
"Should not list data.json (local private_files)"
);
// Test listing worktree2/docs - should exclude internal.md based on local settings
let input = ListDirectoryToolInput {
path: "worktree2/docs".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await
.unwrap();
assert!(output.contains("README.md"), "Should list README.md");
assert!(
!output.contains("internal.md"),
"Should not list internal.md (local file_scan_exclusions)"
);
// Test trying to list an excluded directory directly
let input = ListDirectoryToolInput {
path: "worktree1/src/secret.rs".into(),
};
let output = cx
.update(|cx| tool.clone().run(input, ToolCallEventStream::test().0, cx))
.await;
assert!(
output
.unwrap_err()
.to_string()
.contains("Cannot list directory"),
);
}
}

View File

@@ -1,123 +0,0 @@
use crate::{AgentTool, ToolCallEventStream};
use agent_client_protocol::ToolKind;
use anyhow::{Context as _, Result, anyhow};
use gpui::{App, AppContext, Entity, SharedString, Task};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{path::Path, sync::Arc};
use util::markdown::MarkdownInlineCode;
/// Moves or rename a file or directory in the project, and returns confirmation
/// that the move succeeded.
///
/// If the source and destination directories are the same, but the filename is
/// different, this performs a rename. Otherwise, it performs a move.
///
/// This tool should be used when it's desirable to move or rename a file or
/// directory without changing its contents at all.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct MovePathToolInput {
/// The source path of the file or directory to move/rename.
///
/// <example>
/// If the project has the following files:
///
/// - directory1/a/something.txt
/// - directory2/a/things.txt
/// - directory3/a/other.txt
///
/// You can move the first file by providing a source_path of "directory1/a/something.txt"
/// </example>
pub source_path: String,
/// The destination path where the file or directory should be moved/renamed to.
/// If the paths are the same except for the filename, then this will be a rename.
///
/// <example>
/// To move "directory1/a/something.txt" to "directory2/b/renamed.txt",
/// provide a destination_path of "directory2/b/renamed.txt"
/// </example>
pub destination_path: String,
}
pub struct MovePathTool {
project: Entity<Project>,
}
impl MovePathTool {
pub fn new(project: Entity<Project>) -> Self {
Self { project }
}
}
impl AgentTool for MovePathTool {
type Input = MovePathToolInput;
type Output = String;
fn name(&self) -> SharedString {
"move_path".into()
}
fn kind(&self) -> ToolKind {
ToolKind::Move
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Ok(input) = input {
let src = MarkdownInlineCode(&input.source_path);
let dest = MarkdownInlineCode(&input.destination_path);
let src_path = Path::new(&input.source_path);
let dest_path = Path::new(&input.destination_path);
match dest_path
.file_name()
.and_then(|os_str| os_str.to_os_string().into_string().ok())
{
Some(filename) if src_path.parent() == dest_path.parent() => {
let filename = MarkdownInlineCode(&filename);
format!("Rename {src} to {filename}").into()
}
_ => format!("Move {src} to {dest}").into(),
}
} else {
"Move path".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let rename_task = self.project.update(cx, |project, cx| {
match project
.find_project_path(&input.source_path, cx)
.and_then(|project_path| project.entry_for_path(&project_path, cx))
{
Some(entity) => match project.find_project_path(&input.destination_path, cx) {
Some(project_path) => project.rename_entry(entity.id, project_path.path, cx),
None => Task::ready(Err(anyhow!(
"Destination path {} was outside the project.",
input.destination_path
))),
},
None => Task::ready(Err(anyhow!(
"Source path {} was not found in the project.",
input.source_path
))),
}
});
cx.background_spawn(async move {
let _ = rename_task.await.with_context(|| {
format!("Moving {} to {}", input.source_path, input.destination_path)
})?;
Ok(format!(
"Moved {} to {}",
input.source_path, input.destination_path
))
})
}
}

View File

@@ -1,59 +0,0 @@
use std::sync::Arc;
use agent_client_protocol as acp;
use anyhow::Result;
use chrono::{Local, Utc};
use gpui::{App, SharedString, Task};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::{AgentTool, ToolCallEventStream};
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum Timezone {
/// Use UTC for the datetime.
Utc,
/// Use local time for the datetime.
Local,
}
/// Returns the current datetime in RFC 3339 format.
/// Only use this tool when the user specifically asks for it or the current task would benefit from knowing the current datetime.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct NowToolInput {
/// The timezone to use for the datetime.
timezone: Timezone,
}
pub struct NowTool;
impl AgentTool for NowTool {
type Input = NowToolInput;
type Output = String;
fn name(&self) -> SharedString {
"now".into()
}
fn kind(&self) -> acp::ToolKind {
acp::ToolKind::Other
}
fn initial_title(&self, _input: Result<Self::Input, serde_json::Value>) -> SharedString {
"Get current time".into()
}
fn run(
self: Arc<Self>,
input: Self::Input,
_event_stream: ToolCallEventStream,
_cx: &mut App,
) -> Task<Result<String>> {
let now = match input.timezone {
Timezone::Utc => Utc::now().to_rfc3339(),
Timezone::Local => Local::now().to_rfc3339(),
};
Task::ready(Ok(format!("The current datetime is {now}.")))
}
}

View File

@@ -1,170 +0,0 @@
use crate::AgentTool;
use agent_client_protocol::ToolKind;
use anyhow::{Context as _, Result};
use gpui::{App, AppContext, Entity, SharedString, Task};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{path::PathBuf, sync::Arc};
use util::markdown::MarkdownEscaped;
/// This tool opens a file or URL with the default application associated with
/// it on the user's operating system:
///
/// - On macOS, it's equivalent to the `open` command
/// - On Windows, it's equivalent to `start`
/// - On Linux, it uses something like `xdg-open`, `gio open`, `gnome-open`, `kde-open`, `wslview` as appropriate
///
/// For example, it can open a web browser with a URL, open a PDF file with the
/// default PDF viewer, etc.
///
/// You MUST ONLY use this tool when the user has explicitly requested opening
/// something. You MUST NEVER assume that the user would like for you to use
/// this tool.
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)]
pub struct OpenToolInput {
/// The path or URL to open with the default application.
path_or_url: String,
}
pub struct OpenTool {
project: Entity<Project>,
}
impl OpenTool {
pub fn new(project: Entity<Project>) -> Self {
Self { project }
}
}
impl AgentTool for OpenTool {
type Input = OpenToolInput;
type Output = String;
fn name(&self) -> SharedString {
"open".into()
}
fn kind(&self) -> ToolKind {
ToolKind::Execute
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Ok(input) = input {
format!("Open `{}`", MarkdownEscaped(&input.path_or_url)).into()
} else {
"Open file or URL".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
event_stream: crate::ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
// If path_or_url turns out to be a path in the project, make it absolute.
let abs_path = to_absolute_path(&input.path_or_url, self.project.clone(), cx);
let authorize = event_stream.authorize(self.initial_title(Ok(input.clone())), cx);
cx.background_spawn(async move {
authorize.await?;
match abs_path {
Some(path) => open::that(path),
None => open::that(&input.path_or_url),
}
.context("Failed to open URL or file path")?;
Ok(format!("Successfully opened {}", input.path_or_url))
})
}
}
fn to_absolute_path(
potential_path: &str,
project: Entity<Project>,
cx: &mut App,
) -> Option<PathBuf> {
let project = project.read(cx);
project
.find_project_path(PathBuf::from(potential_path), cx)
.and_then(|project_path| project.absolute_path(&project_path, cx))
}
#[cfg(test)]
mod tests {
use super::*;
use gpui::TestAppContext;
use project::{FakeFs, Project};
use settings::SettingsStore;
use std::path::Path;
use tempfile::TempDir;
#[gpui::test]
async fn test_to_absolute_path(cx: &mut TestAppContext) {
init_test(cx);
let temp_dir = TempDir::new().expect("Failed to create temp directory");
let temp_path = temp_dir.path().to_string_lossy().to_string();
let fs = FakeFs::new(cx.executor());
fs.insert_tree(
&temp_path,
serde_json::json!({
"src": {
"main.rs": "fn main() {}",
"lib.rs": "pub fn lib_fn() {}"
},
"docs": {
"readme.md": "# Project Documentation"
}
}),
)
.await;
// Use the temp_path as the root directory, not just its filename
let project = Project::test(fs.clone(), [temp_dir.path()], cx).await;
// Test cases where the function should return Some
cx.update(|cx| {
// Project-relative paths should return Some
// Create paths using the last segment of the temp path to simulate a project-relative path
let root_dir_name = Path::new(&temp_path)
.file_name()
.unwrap_or_else(|| std::ffi::OsStr::new("temp"))
.to_string_lossy();
assert!(
to_absolute_path(&format!("{root_dir_name}/src/main.rs"), project.clone(), cx)
.is_some(),
"Failed to resolve main.rs path"
);
assert!(
to_absolute_path(
&format!("{root_dir_name}/docs/readme.md",),
project.clone(),
cx,
)
.is_some(),
"Failed to resolve readme.md path"
);
// External URL should return None
let result = to_absolute_path("https://example.com", project.clone(), cx);
assert_eq!(result, None, "External URLs should return None");
// Path outside project
let result = to_absolute_path("../invalid/path", project.clone(), cx);
assert_eq!(result, None, "Paths outside the project should return None");
});
}
fn init_test(cx: &mut TestAppContext) {
cx.update(|cx| {
let settings_store = SettingsStore::test(cx);
cx.set_global(settings_store);
language::init(cx);
Project::init_settings(cx);
});
}
}

View File

@@ -1,16 +1,16 @@
use action_log::ActionLog;
use agent_client_protocol::{self as acp, ToolCallUpdateFields};
use anyhow::{Context as _, Result, anyhow};
use assistant_tool::outline;
use gpui::{App, Entity, SharedString, Task};
use agent_client_protocol::{self as acp};
use anyhow::{anyhow, Context, Result};
use assistant_tool::{outline, ActionLog};
use gpui::{Entity, Task};
use indoc::formatdoc;
use language::Point;
use language::{Anchor, Point};
use language_model::{LanguageModelImage, LanguageModelToolResultContent};
use project::{AgentLocation, ImageItem, Project, WorktreeSettings, image_store};
use project::{image_store, AgentLocation, ImageItem, Project, WorktreeSettings};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use settings::Settings;
use std::sync::Arc;
use ui::{App, SharedString};
use crate::{AgentTool, ToolCallEventStream};
@@ -97,7 +97,7 @@ impl AgentTool for ReadFileTool {
fn run(
self: Arc<Self>,
input: Self::Input,
event_stream: ToolCallEventStream,
_event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<LanguageModelToolResultContent>> {
let Some(project_path) = self.project.read(cx).find_project_path(&input.path, cx) else {
@@ -166,9 +166,7 @@ impl AgentTool for ReadFileTool {
cx.spawn(async move |cx| {
let buffer = cx
.update(|cx| {
project.update(cx, |project, cx| {
project.open_buffer(project_path.clone(), cx)
})
project.update(cx, |project, cx| project.open_buffer(project_path, cx))
})?
.await?;
if buffer.read_with(cx, |buffer, _| {
@@ -180,10 +178,19 @@ impl AgentTool for ReadFileTool {
anyhow::bail!("{file_path} not found");
}
let mut anchor = None;
project.update(cx, |project, cx| {
project.set_agent_location(
Some(AgentLocation {
buffer: buffer.downgrade(),
position: Anchor::MIN,
}),
cx,
);
})?;
// Check if specific line ranges are provided
let result = if input.start_line.is_some() || input.end_line.is_some() {
if input.start_line.is_some() || input.end_line.is_some() {
let mut anchor = None;
let result = buffer.read_with(cx, |buffer, _cx| {
let text = buffer.text();
// .max(1) because despite instructions to be 1-indexed, sometimes the model passes 0.
@@ -207,6 +214,18 @@ impl AgentTool for ReadFileTool {
log.buffer_read(buffer.clone(), cx);
})?;
if let Some(anchor) = anchor {
project.update(cx, |project, cx| {
project.set_agent_location(
Some(AgentLocation {
buffer: buffer.downgrade(),
position: anchor,
}),
cx,
);
})?;
}
Ok(result.into())
} else {
// No line ranges specified, so check file size to see if it's too big.
@@ -217,7 +236,7 @@ impl AgentTool for ReadFileTool {
let result = buffer.read_with(cx, |buffer, _cx| buffer.text())?;
action_log.update(cx, |log, cx| {
log.buffer_read(buffer.clone(), cx);
log.buffer_read(buffer, cx);
})?;
Ok(result.into())
@@ -225,8 +244,7 @@ impl AgentTool for ReadFileTool {
// File is too big, so return the outline
// and a suggestion to read again with line numbers.
let outline =
outline::file_outline(project.clone(), file_path, action_log, None, cx)
.await?;
outline::file_outline(project, file_path, action_log, None, cx).await?;
Ok(formatdoc! {"
This file was too big to read all at once.
@@ -243,28 +261,7 @@ impl AgentTool for ReadFileTool {
}
.into())
}
};
project.update(cx, |project, cx| {
if let Some(abs_path) = project.absolute_path(&project_path, cx) {
project.set_agent_location(
Some(AgentLocation {
buffer: buffer.downgrade(),
position: anchor.unwrap_or(text::Anchor::MIN),
}),
cx,
);
event_stream.update_fields(ToolCallUpdateFields {
locations: Some(vec![acp::ToolCallLocation {
path: abs_path,
line: input.start_line.map(|line| line.saturating_sub(1)),
}]),
..Default::default()
});
}
})?;
result
}
})
}
}
@@ -273,7 +270,7 @@ impl AgentTool for ReadFileTool {
mod test {
use super::*;
use gpui::{AppContext, TestAppContext, UpdateGlobal as _};
use language::{Language, LanguageConfig, LanguageMatcher, tree_sitter_rust};
use language::{tree_sitter_rust, Language, LanguageConfig, LanguageMatcher};
use project::{FakeFs, Project};
use serde_json::json;
use settings::SettingsStore;

View File

@@ -1,473 +0,0 @@
use agent_client_protocol as acp;
use anyhow::Result;
use futures::{FutureExt as _, future::Shared};
use gpui::{App, AppContext, Entity, SharedString, Task};
use project::{Project, terminals::TerminalKind};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use util::{ResultExt, get_system_shell, markdown::MarkdownInlineCode};
use crate::{AgentTool, ToolCallEventStream};
const COMMAND_OUTPUT_LIMIT: usize = 16 * 1024;
/// Executes a shell one-liner and returns the combined output.
///
/// This tool spawns a process using the user's shell, reads from stdout and stderr (preserving the order of writes), and returns a string with the combined output result.
///
/// The output results will be shown to the user already, only list it again if necessary, avoid being redundant.
///
/// Make sure you use the `cd` parameter to navigate to one of the root directories of the project. NEVER do it as part of the `command` itself, otherwise it will error.
///
/// Do not use this tool for commands that run indefinitely, such as servers (like `npm run start`, `npm run dev`, `python -m http.server`, etc) or file watchers that don't terminate on their own.
///
/// Remember that each invocation of this tool will spawn a new shell process, so you can't rely on any state from previous invocations.
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)]
pub struct TerminalToolInput {
/// The one-liner command to execute.
command: String,
/// Working directory for the command. This must be one of the root directories of the project.
cd: String,
}
pub struct TerminalTool {
project: Entity<Project>,
determine_shell: Shared<Task<String>>,
}
impl TerminalTool {
pub fn new(project: Entity<Project>, cx: &mut App) -> Self {
let determine_shell = cx.background_spawn(async move {
if cfg!(windows) {
return get_system_shell();
}
if which::which("bash").is_ok() {
log::info!("agent selected bash for terminal tool");
"bash".into()
} else {
let shell = get_system_shell();
log::info!("agent selected {shell} for terminal tool");
shell
}
});
Self {
project,
determine_shell: determine_shell.shared(),
}
}
}
impl AgentTool for TerminalTool {
type Input = TerminalToolInput;
type Output = String;
fn name(&self) -> SharedString {
"terminal".into()
}
fn kind(&self) -> acp::ToolKind {
acp::ToolKind::Execute
}
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
if let Ok(input) = input {
let mut lines = input.command.lines();
let first_line = lines.next().unwrap_or_default();
let remaining_line_count = lines.count();
match remaining_line_count {
0 => MarkdownInlineCode(&first_line).to_string().into(),
1 => MarkdownInlineCode(&format!(
"{} - {} more line",
first_line, remaining_line_count
))
.to_string()
.into(),
n => MarkdownInlineCode(&format!("{} - {} more lines", first_line, n))
.to_string()
.into(),
}
} else {
"Run terminal command".into()
}
}
fn run(
self: Arc<Self>,
input: Self::Input,
event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let language_registry = self.project.read(cx).languages().clone();
let working_dir = match working_dir(&input, &self.project, cx) {
Ok(dir) => dir,
Err(err) => return Task::ready(Err(err)),
};
let program = self.determine_shell.clone();
let command = if cfg!(windows) {
format!("$null | & {{{}}}", input.command.replace("\"", "'"))
} else if let Some(cwd) = working_dir
.as_ref()
.and_then(|cwd| cwd.as_os_str().to_str())
{
// Make sure once we're *inside* the shell, we cd into `cwd`
format!("(cd {cwd}; {}) </dev/null", input.command)
} else {
format!("({}) </dev/null", input.command)
};
let args = vec!["-c".into(), command];
let env = match &working_dir {
Some(dir) => self.project.update(cx, |project, cx| {
project.directory_environment(dir.as_path().into(), cx)
}),
None => Task::ready(None).shared(),
};
let env = cx.spawn(async move |_| {
let mut env = env.await.unwrap_or_default();
if cfg!(unix) {
env.insert("PAGER".into(), "cat".into());
}
env
});
let authorize = event_stream.authorize(self.initial_title(Ok(input.clone())), cx);
cx.spawn({
async move |cx| {
authorize.await?;
let program = program.await;
let env = env.await;
let terminal = self
.project
.update(cx, |project, cx| {
project.create_terminal(
TerminalKind::Task(task::SpawnInTerminal {
command: Some(program),
args,
cwd: working_dir.clone(),
env,
..Default::default()
}),
cx,
)
})?
.await?;
let acp_terminal = cx.new(|cx| {
acp_thread::Terminal::new(
input.command.clone(),
working_dir.clone(),
terminal.clone(),
language_registry,
cx,
)
})?;
event_stream.update_terminal(acp_terminal.clone());
let exit_status = terminal
.update(cx, |terminal, cx| terminal.wait_for_completed_task(cx))?
.await;
let (content, content_line_count) = terminal.read_with(cx, |terminal, _| {
(terminal.get_content(), terminal.total_lines())
})?;
let (processed_content, finished_with_empty_output) = process_content(
&content,
&input.command,
exit_status.map(portable_pty::ExitStatus::from),
);
acp_terminal
.update(cx, |terminal, cx| {
terminal.finish(
exit_status,
content.len(),
processed_content.len(),
content_line_count,
finished_with_empty_output,
cx,
);
})
.log_err();
Ok(processed_content)
}
})
}
}
fn process_content(
content: &str,
command: &str,
exit_status: Option<portable_pty::ExitStatus>,
) -> (String, bool) {
let should_truncate = content.len() > COMMAND_OUTPUT_LIMIT;
let content = if should_truncate {
let mut end_ix = COMMAND_OUTPUT_LIMIT.min(content.len());
while !content.is_char_boundary(end_ix) {
end_ix -= 1;
}
// Don't truncate mid-line, clear the remainder of the last line
end_ix = content[..end_ix].rfind('\n').unwrap_or(end_ix);
&content[..end_ix]
} else {
content
};
let content = content.trim();
let is_empty = content.is_empty();
let content = format!("```\n{content}\n```");
let content = if should_truncate {
format!(
"Command output too long. The first {} bytes:\n\n{content}",
content.len(),
)
} else {
content
};
let content = match exit_status {
Some(exit_status) if exit_status.success() => {
if is_empty {
"Command executed successfully.".to_string()
} else {
content.to_string()
}
}
Some(exit_status) => {
if is_empty {
format!(
"Command \"{command}\" failed with exit code {}.",
exit_status.exit_code()
)
} else {
format!(
"Command \"{command}\" failed with exit code {}.\n\n{content}",
exit_status.exit_code()
)
}
}
None => {
format!(
"Command failed or was interrupted.\nPartial output captured:\n\n{}",
content,
)
}
};
(content, is_empty)
}
fn working_dir(
input: &TerminalToolInput,
project: &Entity<Project>,
cx: &mut App,
) -> Result<Option<PathBuf>> {
let project = project.read(cx);
let cd = &input.cd;
if cd == "." || cd == "" {
// Accept "." or "" as meaning "the one worktree" if we only have one worktree.
let mut worktrees = project.worktrees(cx);
match worktrees.next() {
Some(worktree) => {
anyhow::ensure!(
worktrees.next().is_none(),
"'.' is ambiguous in multi-root workspaces. Please specify a root directory explicitly.",
);
Ok(Some(worktree.read(cx).abs_path().to_path_buf()))
}
None => Ok(None),
}
} else {
let input_path = Path::new(cd);
if input_path.is_absolute() {
// Absolute paths are allowed, but only if they're in one of the project's worktrees.
if project
.worktrees(cx)
.any(|worktree| input_path.starts_with(&worktree.read(cx).abs_path()))
{
return Ok(Some(input_path.into()));
}
} else {
if let Some(worktree) = project.worktree_for_root_name(cd, cx) {
return Ok(Some(worktree.read(cx).abs_path().to_path_buf()));
}
}
anyhow::bail!("`cd` directory {cd:?} was not in any of the project's worktrees.");
}
}
#[cfg(test)]
mod tests {
use agent_settings::AgentSettings;
use editor::EditorSettings;
use fs::RealFs;
use gpui::{BackgroundExecutor, TestAppContext};
use pretty_assertions::assert_eq;
use serde_json::json;
use settings::{Settings, SettingsStore};
use terminal::terminal_settings::TerminalSettings;
use theme::ThemeSettings;
use util::test::TempTree;
use crate::AgentResponseEvent;
use super::*;
fn init_test(executor: &BackgroundExecutor, cx: &mut TestAppContext) {
zlog::init_test();
executor.allow_parking();
cx.update(|cx| {
let settings_store = SettingsStore::test(cx);
cx.set_global(settings_store);
language::init(cx);
Project::init_settings(cx);
ThemeSettings::register(cx);
TerminalSettings::register(cx);
EditorSettings::register(cx);
AgentSettings::register(cx);
});
}
#[gpui::test]
async fn test_interactive_command(executor: BackgroundExecutor, cx: &mut TestAppContext) {
if cfg!(windows) {
return;
}
init_test(&executor, cx);
let fs = Arc::new(RealFs::new(None, executor));
let tree = TempTree::new(json!({
"project": {},
}));
let project: Entity<Project> =
Project::test(fs, [tree.path().join("project").as_path()], cx).await;
let input = TerminalToolInput {
command: "cat".to_owned(),
cd: tree
.path()
.join("project")
.as_path()
.to_string_lossy()
.to_string(),
};
let (event_stream_tx, mut event_stream_rx) = ToolCallEventStream::test();
let result = cx
.update(|cx| Arc::new(TerminalTool::new(project, cx)).run(input, event_stream_tx, cx));
let auth = event_stream_rx.expect_authorization().await;
auth.response.send(auth.options[0].id.clone()).unwrap();
event_stream_rx.expect_terminal().await;
assert_eq!(result.await.unwrap(), "Command executed successfully.");
}
#[gpui::test]
async fn test_working_directory(executor: BackgroundExecutor, cx: &mut TestAppContext) {
if cfg!(windows) {
return;
}
init_test(&executor, cx);
let fs = Arc::new(RealFs::new(None, executor));
let tree = TempTree::new(json!({
"project": {},
"other-project": {},
}));
let project: Entity<Project> =
Project::test(fs, [tree.path().join("project").as_path()], cx).await;
let check = |input, expected, cx: &mut TestAppContext| {
let (stream_tx, mut stream_rx) = ToolCallEventStream::test();
let result = cx.update(|cx| {
Arc::new(TerminalTool::new(project.clone(), cx)).run(input, stream_tx, cx)
});
cx.run_until_parked();
let event = stream_rx.try_next();
if let Ok(Some(Ok(AgentResponseEvent::ToolCallAuthorization(auth)))) = event {
auth.response.send(auth.options[0].id.clone()).unwrap();
}
cx.spawn(async move |_| {
let output = result.await;
assert_eq!(output.ok(), expected);
})
};
check(
TerminalToolInput {
command: "pwd".into(),
cd: ".".into(),
},
Some(format!(
"```\n{}\n```",
tree.path().join("project").display()
)),
cx,
)
.await;
check(
TerminalToolInput {
command: "pwd".into(),
cd: "other-project".into(),
},
None, // other-project is a dir, but *not* a worktree (yet)
cx,
)
.await;
// Absolute path above the worktree root
check(
TerminalToolInput {
command: "pwd".into(),
cd: tree.path().to_string_lossy().into(),
},
None,
cx,
)
.await;
project
.update(cx, |project, cx| {
project.create_worktree(tree.path().join("other-project"), true, cx)
})
.await
.unwrap();
check(
TerminalToolInput {
command: "pwd".into(),
cd: "other-project".into(),
},
Some(format!(
"```\n{}\n```",
tree.path().join("other-project").display()
)),
cx,
)
.await;
check(
TerminalToolInput {
command: "pwd".into(),
cd: ".".into(),
},
None,
cx,
)
.await;
}
}

View File

@@ -1,112 +0,0 @@
use std::sync::Arc;
use crate::{AgentTool, ToolCallEventStream};
use agent_client_protocol as acp;
use anyhow::{Result, anyhow};
use cloud_llm_client::WebSearchResponse;
use gpui::{App, AppContext, Task};
use language_model::{
LanguageModelProviderId, LanguageModelToolResultContent, ZED_CLOUD_PROVIDER_ID,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use ui::prelude::*;
use web_search::WebSearchRegistry;
/// Search the web for information using your query.
/// Use this when you need real-time information, facts, or data that might not be in your training. \
/// Results will include snippets and links from relevant web pages.
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct WebSearchToolInput {
/// The search term or question to query on the web.
query: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct WebSearchToolOutput(WebSearchResponse);
impl From<WebSearchToolOutput> for LanguageModelToolResultContent {
fn from(value: WebSearchToolOutput) -> Self {
serde_json::to_string(&value.0)
.expect("Failed to serialize WebSearchResponse")
.into()
}
}
pub struct WebSearchTool;
impl AgentTool for WebSearchTool {
type Input = WebSearchToolInput;
type Output = WebSearchToolOutput;
fn name(&self) -> SharedString {
"web_search".into()
}
fn kind(&self) -> acp::ToolKind {
acp::ToolKind::Fetch
}
fn initial_title(&self, _input: Result<Self::Input, serde_json::Value>) -> SharedString {
"Searching the Web".into()
}
/// We currently only support Zed Cloud as a provider.
fn supported_provider(&self, provider: &LanguageModelProviderId) -> bool {
provider == &ZED_CLOUD_PROVIDER_ID
}
fn run(
self: Arc<Self>,
input: Self::Input,
event_stream: ToolCallEventStream,
cx: &mut App,
) -> Task<Result<Self::Output>> {
let Some(provider) = WebSearchRegistry::read_global(cx).active_provider() else {
return Task::ready(Err(anyhow!("Web search is not available.")));
};
let search_task = provider.search(input.query, cx);
cx.background_spawn(async move {
let response = match search_task.await {
Ok(response) => response,
Err(err) => {
event_stream.update_fields(acp::ToolCallUpdateFields {
title: Some("Web Search Failed".to_string()),
..Default::default()
});
return Err(err);
}
};
let result_text = if response.results.len() == 1 {
"1 result".to_string()
} else {
format!("{} results", response.results.len())
};
event_stream.update_fields(acp::ToolCallUpdateFields {
title: Some(format!("Searched the web: {result_text}")),
content: Some(
response
.results
.iter()
.map(|result| acp::ToolCallContent::Content {
content: acp::ContentBlock::ResourceLink(acp::ResourceLink {
name: result.title.clone(),
uri: result.url.clone(),
title: Some(result.title.clone()),
description: Some(result.text.clone()),
mime_type: None,
annotations: None,
size: None,
}),
})
.collect(),
),
..Default::default()
});
Ok(WebSearchToolOutput(response))
})
}
}

View File

@@ -467,7 +467,6 @@ impl AgentConnection for AcpConnection {
fn prompt(
&self,
_id: Option<acp_thread::UserMessageId>,
params: acp::PromptRequest,
cx: &mut App,
) -> Task<Result<acp::PromptResponse>> {

View File

@@ -171,7 +171,6 @@ impl AgentConnection for AcpConnection {
fn prompt(
&self,
_id: Option<acp_thread::UserMessageId>,
params: acp::PromptRequest,
cx: &mut App,
) -> Task<Result<acp::PromptResponse>> {

View File

@@ -210,7 +210,6 @@ impl AgentConnection for ClaudeAgentConnection {
fn prompt(
&self,
_id: Option<acp_thread::UserMessageId>,
params: acp::PromptRequest,
cx: &mut App,
) -> Task<Result<acp::PromptResponse>> {
@@ -424,7 +423,7 @@ impl ClaudeAgentSession {
if !turn_state.borrow().is_cancelled() {
thread
.update(cx, |thread, cx| {
thread.push_user_content_block(None, text.into(), cx)
thread.push_user_content_block(text.into(), cx)
})
.log_err();
}

View File

@@ -48,20 +48,6 @@ pub struct AgentProfileSettings {
pub context_servers: IndexMap<Arc<str>, ContextServerPreset>,
}
impl AgentProfileSettings {
pub fn is_tool_enabled(&self, tool_name: &str) -> bool {
self.tools.get(tool_name) == Some(&true)
}
pub fn is_context_server_tool_enabled(&self, server_id: &str, tool_name: &str) -> bool {
self.enable_all_context_servers
|| self
.context_servers
.get(server_id)
.map_or(false, |preset| preset.tools.get(tool_name) == Some(&true))
}
}
#[derive(Debug, Clone, Default)]
pub struct ContextServerPreset {
pub tools: IndexMap<Arc<str>, bool>,

View File

@@ -442,6 +442,10 @@ impl Settings for AgentSettings {
&mut settings.inline_alternatives,
value.inline_alternatives.clone(),
);
merge(
&mut settings.always_allow_tool_actions,
value.always_allow_tool_actions,
);
merge(
&mut settings.notify_when_agent_waiting,
value.notify_when_agent_waiting,
@@ -503,20 +507,6 @@ impl Settings for AgentSettings {
}
}
debug_assert_eq!(
sources.default.always_allow_tool_actions.unwrap_or(false),
false,
"For security, agent.always_allow_tool_actions should always be false in default.json. If it's true, that is a bug that should be fixed!"
);
// For security reasons, only trust the user's global settings for whether to always allow tool actions.
// If this could be overridden locally, an attacker could (e.g. by committing to source control and
// convincing you to switch branches) modify your project-local settings to disable the agent's safety checks.
settings.always_allow_tool_actions = sources
.user
.and_then(|setting| setting.always_allow_tool_actions)
.unwrap_or(false);
Ok(settings)
}

View File

@@ -17,7 +17,6 @@ test-support = ["gpui/test-support", "language/test-support"]
[dependencies]
acp_thread.workspace = true
action_log.workspace = true
agent-client-protocol.workspace = true
agent.workspace = true
agent2.workspace = true

View File

@@ -1,9 +1,6 @@
mod completion_provider;
mod message_editor;
mod model_selector;
mod model_selector_popover;
mod message_history;
mod thread_view;
pub use model_selector::AcpModelSelector;
pub use model_selector_popover::AcpModelSelectorPopover;
pub use message_history::MessageHistory;
pub use thread_view::AcpThreadView;

View File

@@ -1,20 +1,18 @@
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use acp_thread::MentionUri;
use anyhow::{Context as _, Result};
use anyhow::Result;
use collections::HashMap;
use editor::display_map::CreaseId;
use editor::{CompletionProvider, Editor, ExcerptId};
use file_icons::FileIcons;
use futures::future::try_join_all;
use gpui::{App, Entity, Task, WeakEntity};
use language::{Buffer, CodeLabel, HighlightId};
use lsp::CompletionContext;
use parking_lot::Mutex;
use project::{Completion, CompletionIntent, CompletionResponse, Project, ProjectPath, WorktreeId};
use project::{Completion, CompletionIntent, CompletionResponse, ProjectPath, WorktreeId};
use rope::Point;
use text::{Anchor, ToPoint};
use ui::prelude::*;
@@ -25,67 +23,21 @@ use crate::context_picker::file_context_picker::{extract_file_name_and_directory
#[derive(Default)]
pub struct MentionSet {
paths_by_crease_id: HashMap<CreaseId, MentionUri>,
paths_by_crease_id: HashMap<CreaseId, ProjectPath>,
}
impl MentionSet {
pub fn insert(&mut self, crease_id: CreaseId, path: PathBuf) {
self.paths_by_crease_id
.insert(crease_id, MentionUri::File(path));
pub fn insert(&mut self, crease_id: CreaseId, path: ProjectPath) {
self.paths_by_crease_id.insert(crease_id, path);
}
pub fn path_for_crease_id(&self, crease_id: CreaseId) -> Option<ProjectPath> {
self.paths_by_crease_id.get(&crease_id).cloned()
}
pub fn drain(&mut self) -> impl Iterator<Item = CreaseId> {
self.paths_by_crease_id.drain().map(|(id, _)| id)
}
pub fn clear(&mut self) {
self.paths_by_crease_id.clear();
}
pub fn contents(
&self,
project: Entity<Project>,
cx: &mut App,
) -> Task<Result<HashMap<CreaseId, Mention>>> {
let contents = self
.paths_by_crease_id
.iter()
.map(|(crease_id, uri)| match uri {
MentionUri::File(path) => {
let crease_id = *crease_id;
let uri = uri.clone();
let path = path.to_path_buf();
let buffer_task = project.update(cx, |project, cx| {
let path = project
.find_project_path(path, cx)
.context("Failed to find project path")?;
anyhow::Ok(project.open_buffer(path, cx))
});
cx.spawn(async move |cx| {
let buffer = buffer_task?.await?;
let content = buffer.read_with(cx, |buffer, _cx| buffer.text())?;
anyhow::Ok((crease_id, Mention { uri, content }))
})
}
_ => {
// TODO
unimplemented!()
}
})
.collect::<Vec<_>>();
cx.spawn(async move |_cx| {
let contents = try_join_all(contents).await?.into_iter().collect();
anyhow::Ok(contents)
})
}
}
pub struct Mention {
pub uri: MentionUri,
pub content: String,
}
pub struct ContextPickerCompletionProvider {
@@ -116,7 +68,6 @@ impl ContextPickerCompletionProvider {
source_range: Range<Anchor>,
editor: Entity<Editor>,
mention_set: Arc<Mutex<MentionSet>>,
project: Entity<Project>,
cx: &App,
) -> Completion {
let (file_name, directory) =
@@ -161,7 +112,6 @@ impl ContextPickerCompletionProvider {
new_text_len - 1,
editor,
mention_set,
project,
)),
}
}
@@ -209,7 +159,6 @@ impl CompletionProvider for ContextPickerCompletionProvider {
return Task::ready(Ok(Vec::new()));
};
let project = workspace.read(cx).project().clone();
let snapshot = buffer.read(cx).snapshot();
let source_range = snapshot.anchor_before(state.source_range.start)
..snapshot.anchor_after(state.source_range.end);
@@ -246,7 +195,6 @@ impl CompletionProvider for ContextPickerCompletionProvider {
source_range.clone(),
editor.clone(),
mention_set.clone(),
project.clone(),
cx,
)
})
@@ -306,7 +254,6 @@ fn confirm_completion_callback(
content_len: usize,
editor: Entity<Editor>,
mention_set: Arc<Mutex<MentionSet>>,
project: Entity<Project>,
) -> Arc<dyn Fn(CompletionIntent, &mut Window, &mut App) -> bool + Send + Sync> {
Arc::new(move |_, window, cx| {
let crease_text = crease_text.clone();
@@ -314,7 +261,6 @@ fn confirm_completion_callback(
let editor = editor.clone();
let project_path = project_path.clone();
let mention_set = mention_set.clone();
let project = project.clone();
window.defer(cx, move |window, cx| {
let crease_id = crate::context_picker::insert_crease_for_mention(
excerpt_id,
@@ -326,13 +272,8 @@ fn confirm_completion_callback(
window,
cx,
);
let Some(path) = project.read(cx).absolute_path(&project_path, cx) else {
return;
};
if let Some(crease_id) = crease_id {
mention_set.lock().insert(crease_id, path);
mention_set.lock().insert(crease_id, project_path);
}
});
false

View File

@@ -1,456 +0,0 @@
use crate::acp::completion_provider::ContextPickerCompletionProvider;
use crate::acp::completion_provider::MentionSet;
use acp_thread::MentionUri;
use agent_client_protocol as acp;
use anyhow::Result;
use collections::HashSet;
use editor::{
AnchorRangeExt, ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, EditorMode,
EditorStyle, MultiBuffer,
};
use file_icons::FileIcons;
use gpui::{
AppContext, Context, Entity, EventEmitter, FocusHandle, Focusable, Task, TextStyle, WeakEntity,
};
use language::Buffer;
use language::Language;
use parking_lot::Mutex;
use project::{CompletionIntent, Project};
use settings::Settings;
use std::path::Path;
use std::rc::Rc;
use std::sync::Arc;
use theme::ThemeSettings;
use ui::{
ActiveTheme, App, IconName, InteractiveElement, IntoElement, ParentElement, Render,
SharedString, Styled, TextSize, Window, div,
};
use util::ResultExt;
use workspace::Workspace;
use zed_actions::agent::Chat;
pub struct MessageEditor {
editor: Entity<Editor>,
project: Entity<Project>,
mention_set: Arc<Mutex<MentionSet>>,
}
pub enum MessageEditorEvent {
Send,
Cancel,
}
impl EventEmitter<MessageEditorEvent> for MessageEditor {}
impl MessageEditor {
pub fn new(
workspace: WeakEntity<Workspace>,
project: Entity<Project>,
mode: EditorMode,
window: &mut Window,
cx: &mut Context<Self>,
) -> Self {
let language = Language::new(
language::LanguageConfig {
completion_query_characters: HashSet::from_iter(['.', '-', '_', '@']),
..Default::default()
},
None,
);
let mention_set = Arc::new(Mutex::new(MentionSet::default()));
let editor = cx.new(|cx| {
let buffer = cx.new(|cx| Buffer::local("", cx).with_language(Arc::new(language), cx));
let buffer = cx.new(|cx| MultiBuffer::singleton(buffer, cx));
let mut editor = Editor::new(mode, buffer, None, window, cx);
editor.set_placeholder_text("Message the agent @ to include files", cx);
editor.set_show_indent_guides(false, cx);
editor.set_soft_wrap();
editor.set_use_modal_editing(true);
editor.set_completion_provider(Some(Rc::new(ContextPickerCompletionProvider::new(
mention_set.clone(),
workspace,
cx.weak_entity(),
))));
editor.set_context_menu_options(ContextMenuOptions {
min_entries_visible: 12,
max_entries_visible: 12,
placement: Some(ContextMenuPlacement::Above),
});
editor
});
Self {
editor,
project,
mention_set,
}
}
pub fn is_empty(&self, cx: &App) -> bool {
self.editor.read(cx).is_empty(cx)
}
pub fn contents(&self, cx: &mut Context<Self>) -> Task<Result<Vec<acp::ContentBlock>>> {
let contents = self.mention_set.lock().contents(self.project.clone(), cx);
let editor = self.editor.clone();
cx.spawn(async move |_, cx| {
let contents = contents.await?;
editor.update(cx, |editor, cx| {
let mut ix = 0;
let mut chunks: Vec<acp::ContentBlock> = Vec::new();
let text = editor.text(cx);
editor.display_map.update(cx, |map, cx| {
let snapshot = map.snapshot(cx);
for (crease_id, crease) in snapshot.crease_snapshot.creases() {
// Skip creases that have been edited out of the message buffer.
if !crease.range().start.is_valid(&snapshot.buffer_snapshot) {
continue;
}
if let Some(mention) = contents.get(&crease_id) {
let crease_range = crease.range().to_offset(&snapshot.buffer_snapshot);
if crease_range.start > ix {
chunks.push(text[ix..crease_range.start].into());
}
chunks.push(acp::ContentBlock::Resource(acp::EmbeddedResource {
annotations: None,
resource: acp::EmbeddedResourceResource::TextResourceContents(
acp::TextResourceContents {
mime_type: None,
text: mention.content.clone(),
uri: mention.uri.to_uri(),
},
),
}));
ix = crease_range.end;
}
}
if ix < text.len() {
let last_chunk = text[ix..].trim_end();
if !last_chunk.is_empty() {
chunks.push(last_chunk.into());
}
}
});
chunks
})
})
}
pub fn clear(&mut self, window: &mut Window, cx: &mut Context<Self>) {
self.editor.update(cx, |editor, cx| {
editor.clear(window, cx);
editor.remove_creases(self.mention_set.lock().drain(), cx)
});
}
fn chat(&mut self, _: &Chat, _: &mut Window, cx: &mut Context<Self>) {
cx.emit(MessageEditorEvent::Send)
}
fn cancel(&mut self, _: &menu::Cancel, _: &mut Window, cx: &mut Context<Self>) {
cx.emit(MessageEditorEvent::Cancel)
}
pub fn insert_dragged_files(
&self,
paths: Vec<project::ProjectPath>,
window: &mut Window,
cx: &mut Context<Self>,
) {
let buffer = self.editor.read(cx).buffer().clone();
let Some((&excerpt_id, _, _)) = buffer.read(cx).snapshot(cx).as_singleton() else {
return;
};
let Some(buffer) = buffer.read(cx).as_singleton() else {
return;
};
for path in paths {
let Some(entry) = self.project.read(cx).entry_for_path(&path, cx) else {
continue;
};
let Some(abs_path) = self.project.read(cx).absolute_path(&path, cx) else {
continue;
};
let anchor = buffer.update(cx, |buffer, _cx| buffer.anchor_before(buffer.len()));
let path_prefix = abs_path
.file_name()
.unwrap_or(path.path.as_os_str())
.display()
.to_string();
let completion = ContextPickerCompletionProvider::completion_for_path(
path,
&path_prefix,
false,
entry.is_dir(),
excerpt_id,
anchor..anchor,
self.editor.clone(),
self.mention_set.clone(),
self.project.clone(),
cx,
);
self.editor.update(cx, |message_editor, cx| {
message_editor.edit(
[(
multi_buffer::Anchor::max()..multi_buffer::Anchor::max(),
completion.new_text,
)],
cx,
);
});
if let Some(confirm) = completion.confirm.clone() {
confirm(CompletionIntent::Complete, window, cx);
}
}
}
pub fn set_mode(&mut self, mode: EditorMode, cx: &mut Context<Self>) {
self.editor.update(cx, |editor, cx| {
editor.set_mode(mode);
cx.notify()
});
}
pub fn set_message(
&mut self,
message: &[acp::ContentBlock],
window: &mut Window,
cx: &mut Context<Self>,
) {
let mut text = String::new();
let mut mentions = Vec::new();
for chunk in message {
match chunk {
acp::ContentBlock::Text(text_content) => {
text.push_str(&text_content.text);
}
acp::ContentBlock::Resource(acp::EmbeddedResource {
resource: acp::EmbeddedResourceResource::TextResourceContents(resource),
..
}) => {
if let Some(mention) = MentionUri::parse(&resource.uri).log_err() {
let project_path = self
.project
.read(cx)
.project_path_for_absolute_path(&abs_path, cx);
let start = text.len();
write!(text, "{}", mention.as_link());
let end = text.len();
mentions.push((start..end, project_path, filename));
}
}
acp::ContentBlock::Image(_)
| acp::ContentBlock::Audio(_)
| acp::ContentBlock::Resource(_)
| acp::ContentBlock::ResourceLink(_) => {}
}
}
let snapshot = self.editor.update(cx, |editor, cx| {
editor.set_text(text, window, cx);
editor.buffer().read(cx).snapshot(cx)
});
self.mention_set.lock().clear();
for (range, project_path, filename) in mentions {
let crease_icon_path = if project_path.path.is_dir() {
FileIcons::get_folder_icon(false, cx)
.unwrap_or_else(|| IconName::Folder.path().into())
} else {
FileIcons::get_icon(Path::new(project_path.path.as_ref()), cx)
.unwrap_or_else(|| IconName::File.path().into())
};
let anchor = snapshot.anchor_before(range.start);
if let Some(project_path) = self.project.read(cx).absolute_path(&project_path, cx) {
let crease_id = crate::context_picker::insert_crease_for_mention(
anchor.excerpt_id,
anchor.text_anchor,
range.end - range.start,
filename,
crease_icon_path,
self.editor.clone(),
window,
cx,
);
if let Some(crease_id) = crease_id {
self.mention_set.lock().insert(crease_id, project_path);
}
}
}
cx.notify();
}
#[cfg(test)]
pub fn set_text(&mut self, text: &str, window: &mut Window, cx: &mut Context<Self>) {
self.editor.update(cx, |editor, cx| {
editor.set_text(text, window, cx);
});
}
}
impl Focusable for MessageEditor {
fn focus_handle(&self, cx: &App) -> FocusHandle {
self.editor.focus_handle(cx)
}
}
impl Render for MessageEditor {
fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
div()
.key_context("MessageEditor")
.on_action(cx.listener(Self::chat))
.on_action(cx.listener(Self::cancel))
.flex_1()
.child({
let settings = ThemeSettings::get_global(cx);
let font_size = TextSize::Small
.rems(cx)
.to_pixels(settings.agent_font_size(cx));
let line_height = settings.buffer_line_height.value() * font_size;
let text_style = TextStyle {
color: cx.theme().colors().text,
font_family: settings.buffer_font.family.clone(),
font_fallbacks: settings.buffer_font.fallbacks.clone(),
font_features: settings.buffer_font.features.clone(),
font_size: font_size.into(),
line_height: line_height.into(),
..Default::default()
};
EditorElement::new(
&self.editor,
EditorStyle {
background: cx.theme().colors().editor_background,
local_player: cx.theme().players().local(),
text: text_style,
syntax: cx.theme().syntax().clone(),
..Default::default()
},
)
})
}
}
#[cfg(test)]
mod tests {
use std::path::Path;
use agent_client_protocol as acp;
use editor::EditorMode;
use fs::FakeFs;
use gpui::{AppContext, TestAppContext};
use lsp::{CompletionContext, CompletionTriggerKind};
use project::{CompletionIntent, Project};
use serde_json::json;
use util::path;
use workspace::Workspace;
use crate::acp::{message_editor::MessageEditor, thread_view::tests::init_test};
#[gpui::test]
async fn test_at_mention_removal(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
fs.insert_tree("/project", json!({"file": ""})).await;
let project = Project::test(fs, [Path::new(path!("/project"))], cx).await;
let (workspace, cx) =
cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
let message_editor = cx.update(|window, cx| {
cx.new(|cx| {
MessageEditor::new(
workspace.downgrade(),
project.clone(),
EditorMode::AutoHeight {
min_lines: 1,
max_lines: None,
},
window,
cx,
)
})
});
let editor = message_editor.update(cx, |message_editor, _| message_editor.editor.clone());
cx.run_until_parked();
let excerpt_id = editor.update(cx, |editor, cx| {
editor
.buffer()
.read(cx)
.excerpt_ids()
.into_iter()
.next()
.unwrap()
});
let completions = editor.update_in(cx, |editor, window, cx| {
editor.set_text("Hello @", window, cx);
let buffer = editor.buffer().read(cx).as_singleton().unwrap();
let completion_provider = editor.completion_provider().unwrap();
completion_provider.completions(
excerpt_id,
&buffer,
text::Anchor::MAX,
CompletionContext {
trigger_kind: CompletionTriggerKind::TRIGGER_CHARACTER,
trigger_character: Some("@".into()),
},
window,
cx,
)
});
let [_, completion]: [_; 2] = completions
.await
.unwrap()
.into_iter()
.flat_map(|response| response.completions)
.collect::<Vec<_>>()
.try_into()
.unwrap();
editor.update_in(cx, |editor, window, cx| {
let snapshot = editor.buffer().read(cx).snapshot(cx);
let start = snapshot
.anchor_in_excerpt(excerpt_id, completion.replace_range.start)
.unwrap();
let end = snapshot
.anchor_in_excerpt(excerpt_id, completion.replace_range.end)
.unwrap();
editor.edit([(start..end, completion.new_text)], cx);
(completion.confirm.unwrap())(CompletionIntent::Complete, window, cx);
});
cx.run_until_parked();
// Backspace over the inserted crease (and the following space).
editor.update_in(cx, |editor, window, cx| {
editor.backspace(&Default::default(), window, cx);
editor.backspace(&Default::default(), window, cx);
});
let content = message_editor
.update_in(cx, |message_editor, _window, cx| {
message_editor.contents(cx)
})
.await
.unwrap();
// We don't send a resource link for the deleted crease.
pretty_assertions::assert_matches!(content.as_slice(), [acp::ContentBlock::Text { .. }]);
}
}

View File

@@ -0,0 +1,92 @@
pub struct MessageHistory<T> {
items: Vec<T>,
current: Option<usize>,
}
impl<T> Default for MessageHistory<T> {
fn default() -> Self {
MessageHistory {
items: Vec::new(),
current: None,
}
}
}
impl<T> MessageHistory<T> {
pub fn push(&mut self, message: T) {
self.current.take();
self.items.push(message);
}
pub fn reset_position(&mut self) {
self.current.take();
}
pub fn prev(&mut self) -> Option<&T> {
if self.items.is_empty() {
return None;
}
let new_ix = self
.current
.get_or_insert(self.items.len())
.saturating_sub(1);
self.current = Some(new_ix);
self.items.get(new_ix)
}
pub fn next(&mut self) -> Option<&T> {
let current = self.current.as_mut()?;
*current += 1;
self.items.get(*current).or_else(|| {
self.current.take();
None
})
}
#[cfg(test)]
pub fn items(&self) -> &[T] {
&self.items
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_prev_next() {
let mut history = MessageHistory::default();
// Test empty history
assert_eq!(history.prev(), None);
assert_eq!(history.next(), None);
// Add some messages
history.push("first");
history.push("second");
history.push("third");
// Test prev navigation
assert_eq!(history.prev(), Some(&"third"));
assert_eq!(history.prev(), Some(&"second"));
assert_eq!(history.prev(), Some(&"first"));
assert_eq!(history.prev(), Some(&"first"));
assert_eq!(history.next(), Some(&"second"));
// Test mixed navigation
history.push("fourth");
assert_eq!(history.prev(), Some(&"fourth"));
assert_eq!(history.prev(), Some(&"third"));
assert_eq!(history.next(), Some(&"fourth"));
assert_eq!(history.next(), None);
// Test that push resets navigation
history.prev();
history.prev();
history.push("fifth");
assert_eq!(history.prev(), Some(&"fifth"));
}
}

View File

@@ -1,472 +0,0 @@
use std::{cmp::Reverse, rc::Rc, sync::Arc};
use acp_thread::{AgentModelInfo, AgentModelList, AgentModelSelector};
use agent_client_protocol as acp;
use anyhow::Result;
use collections::IndexMap;
use futures::FutureExt;
use fuzzy::{StringMatchCandidate, match_strings};
use gpui::{Action, AsyncWindowContext, BackgroundExecutor, DismissEvent, Task, WeakEntity};
use ordered_float::OrderedFloat;
use picker::{Picker, PickerDelegate};
use ui::{
AnyElement, App, Context, IntoElement, ListItem, ListItemSpacing, SharedString, Window,
prelude::*, rems,
};
use util::ResultExt;
pub type AcpModelSelector = Picker<AcpModelPickerDelegate>;
pub fn acp_model_selector(
session_id: acp::SessionId,
selector: Rc<dyn AgentModelSelector>,
window: &mut Window,
cx: &mut Context<AcpModelSelector>,
) -> AcpModelSelector {
let delegate = AcpModelPickerDelegate::new(session_id, selector, window, cx);
Picker::list(delegate, window, cx)
.show_scrollbar(true)
.width(rems(20.))
.max_height(Some(rems(20.).into()))
}
enum AcpModelPickerEntry {
Separator(SharedString),
Model(AgentModelInfo),
}
pub struct AcpModelPickerDelegate {
session_id: acp::SessionId,
selector: Rc<dyn AgentModelSelector>,
filtered_entries: Vec<AcpModelPickerEntry>,
models: Option<AgentModelList>,
selected_index: usize,
selected_model: Option<AgentModelInfo>,
_refresh_models_task: Task<()>,
}
impl AcpModelPickerDelegate {
fn new(
session_id: acp::SessionId,
selector: Rc<dyn AgentModelSelector>,
window: &mut Window,
cx: &mut Context<AcpModelSelector>,
) -> Self {
let mut rx = selector.watch(cx);
let refresh_models_task = cx.spawn_in(window, {
let session_id = session_id.clone();
async move |this, cx| {
async fn refresh(
this: &WeakEntity<Picker<AcpModelPickerDelegate>>,
session_id: &acp::SessionId,
cx: &mut AsyncWindowContext,
) -> Result<()> {
let (models_task, selected_model_task) = this.update(cx, |this, cx| {
(
this.delegate.selector.list_models(cx),
this.delegate.selector.selected_model(session_id, cx),
)
})?;
let (models, selected_model) = futures::join!(models_task, selected_model_task);
this.update_in(cx, |this, window, cx| {
this.delegate.models = models.ok();
this.delegate.selected_model = selected_model.ok();
this.delegate.update_matches(this.query(cx), window, cx)
})?
.await;
Ok(())
}
refresh(&this, &session_id, cx).await.log_err();
while let Ok(()) = rx.recv().await {
refresh(&this, &session_id, cx).await.log_err();
}
}
});
Self {
session_id,
selector,
filtered_entries: Vec::new(),
models: None,
selected_model: None,
selected_index: 0,
_refresh_models_task: refresh_models_task,
}
}
pub fn active_model(&self) -> Option<&AgentModelInfo> {
self.selected_model.as_ref()
}
}
impl PickerDelegate for AcpModelPickerDelegate {
type ListItem = AnyElement;
fn match_count(&self) -> usize {
self.filtered_entries.len()
}
fn selected_index(&self) -> usize {
self.selected_index
}
fn set_selected_index(&mut self, ix: usize, _: &mut Window, cx: &mut Context<Picker<Self>>) {
self.selected_index = ix.min(self.filtered_entries.len().saturating_sub(1));
cx.notify();
}
fn can_select(
&mut self,
ix: usize,
_window: &mut Window,
_cx: &mut Context<Picker<Self>>,
) -> bool {
match self.filtered_entries.get(ix) {
Some(AcpModelPickerEntry::Model(_)) => true,
Some(AcpModelPickerEntry::Separator(_)) | None => false,
}
}
fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc<str> {
"Select a model…".into()
}
fn update_matches(
&mut self,
query: String,
window: &mut Window,
cx: &mut Context<Picker<Self>>,
) -> Task<()> {
cx.spawn_in(window, async move |this, cx| {
let filtered_models = match this
.read_with(cx, |this, cx| {
this.delegate.models.clone().map(move |models| {
fuzzy_search(models, query, cx.background_executor().clone())
})
})
.ok()
.flatten()
{
Some(task) => task.await,
None => AgentModelList::Flat(vec![]),
};
this.update_in(cx, |this, window, cx| {
this.delegate.filtered_entries =
info_list_to_picker_entries(filtered_models).collect();
// Finds the currently selected model in the list
let new_index = this
.delegate
.selected_model
.as_ref()
.and_then(|selected| {
this.delegate.filtered_entries.iter().position(|entry| {
if let AcpModelPickerEntry::Model(model_info) = entry {
model_info.id == selected.id
} else {
false
}
})
})
.unwrap_or(0);
this.set_selected_index(new_index, Some(picker::Direction::Down), true, window, cx);
cx.notify();
})
.ok();
})
}
fn confirm(&mut self, _secondary: bool, window: &mut Window, cx: &mut Context<Picker<Self>>) {
if let Some(AcpModelPickerEntry::Model(model_info)) =
self.filtered_entries.get(self.selected_index)
{
self.selector
.select_model(self.session_id.clone(), model_info.id.clone(), cx)
.detach_and_log_err(cx);
self.selected_model = Some(model_info.clone());
let current_index = self.selected_index;
self.set_selected_index(current_index, window, cx);
cx.emit(DismissEvent);
}
}
fn dismissed(&mut self, _: &mut Window, cx: &mut Context<Picker<Self>>) {
cx.emit(DismissEvent);
}
fn render_match(
&self,
ix: usize,
selected: bool,
_: &mut Window,
cx: &mut Context<Picker<Self>>,
) -> Option<Self::ListItem> {
match self.filtered_entries.get(ix)? {
AcpModelPickerEntry::Separator(title) => Some(
div()
.px_2()
.pb_1()
.when(ix > 1, |this| {
this.mt_1()
.pt_2()
.border_t_1()
.border_color(cx.theme().colors().border_variant)
})
.child(
Label::new(title)
.size(LabelSize::XSmall)
.color(Color::Muted),
)
.into_any_element(),
),
AcpModelPickerEntry::Model(model_info) => {
let is_selected = Some(model_info) == self.selected_model.as_ref();
let model_icon_color = if is_selected {
Color::Accent
} else {
Color::Muted
};
Some(
ListItem::new(ix)
.inset(true)
.spacing(ListItemSpacing::Sparse)
.toggle_state(selected)
.start_slot::<Icon>(model_info.icon.map(|icon| {
Icon::new(icon)
.color(model_icon_color)
.size(IconSize::Small)
}))
.child(
h_flex()
.w_full()
.pl_0p5()
.gap_1p5()
.w(px(240.))
.child(Label::new(model_info.name.clone()).truncate()),
)
.end_slot(div().pr_3().when(is_selected, |this| {
this.child(
Icon::new(IconName::Check)
.color(Color::Accent)
.size(IconSize::Small),
)
}))
.into_any_element(),
)
}
}
}
fn render_footer(
&self,
_: &mut Window,
cx: &mut Context<Picker<Self>>,
) -> Option<gpui::AnyElement> {
Some(
h_flex()
.w_full()
.border_t_1()
.border_color(cx.theme().colors().border_variant)
.p_1()
.gap_4()
.justify_between()
.child(
Button::new("configure", "Configure")
.icon(IconName::Settings)
.icon_size(IconSize::Small)
.icon_color(Color::Muted)
.icon_position(IconPosition::Start)
.on_click(|_, window, cx| {
window.dispatch_action(
zed_actions::agent::OpenSettings.boxed_clone(),
cx,
);
}),
)
.into_any(),
)
}
}
fn info_list_to_picker_entries(
model_list: AgentModelList,
) -> impl Iterator<Item = AcpModelPickerEntry> {
match model_list {
AgentModelList::Flat(list) => {
itertools::Either::Left(list.into_iter().map(AcpModelPickerEntry::Model))
}
AgentModelList::Grouped(index_map) => {
itertools::Either::Right(index_map.into_iter().flat_map(|(group_name, models)| {
std::iter::once(AcpModelPickerEntry::Separator(group_name.0))
.chain(models.into_iter().map(AcpModelPickerEntry::Model))
}))
}
}
}
async fn fuzzy_search(
model_list: AgentModelList,
query: String,
executor: BackgroundExecutor,
) -> AgentModelList {
async fn fuzzy_search_list(
model_list: Vec<AgentModelInfo>,
query: &str,
executor: BackgroundExecutor,
) -> Vec<AgentModelInfo> {
let candidates = model_list
.iter()
.enumerate()
.map(|(ix, model)| {
StringMatchCandidate::new(ix, &format!("{}/{}", model.id, model.name))
})
.collect::<Vec<_>>();
let mut matches = match_strings(
&candidates,
&query,
false,
true,
100,
&Default::default(),
executor,
)
.await;
matches.sort_unstable_by_key(|mat| {
let candidate = &candidates[mat.candidate_id];
(Reverse(OrderedFloat(mat.score)), candidate.id)
});
matches
.into_iter()
.map(|mat| model_list[mat.candidate_id].clone())
.collect()
}
match model_list {
AgentModelList::Flat(model_list) => {
AgentModelList::Flat(fuzzy_search_list(model_list, &query, executor).await)
}
AgentModelList::Grouped(index_map) => {
let groups =
futures::future::join_all(index_map.into_iter().map(|(group_name, models)| {
fuzzy_search_list(models, &query, executor.clone())
.map(|results| (group_name, results))
}))
.await;
AgentModelList::Grouped(IndexMap::from_iter(
groups
.into_iter()
.filter(|(_, results)| !results.is_empty()),
))
}
}
}
#[cfg(test)]
mod tests {
use gpui::TestAppContext;
use super::*;
fn create_model_list(grouped_models: Vec<(&str, Vec<&str>)>) -> AgentModelList {
AgentModelList::Grouped(IndexMap::from_iter(grouped_models.into_iter().map(
|(group, models)| {
(
acp_thread::AgentModelGroupName(group.to_string().into()),
models
.into_iter()
.map(|model| acp_thread::AgentModelInfo {
id: acp_thread::AgentModelId(model.to_string().into()),
name: model.to_string().into(),
icon: None,
})
.collect::<Vec<_>>(),
)
},
)))
}
fn assert_models_eq(result: AgentModelList, expected: Vec<(&str, Vec<&str>)>) {
let AgentModelList::Grouped(groups) = result else {
panic!("Expected LanguageModelInfoList::Grouped, got {:?}", result);
};
assert_eq!(
groups.len(),
expected.len(),
"Number of groups doesn't match"
);
for (i, (expected_group, expected_models)) in expected.iter().enumerate() {
let (actual_group, actual_models) = groups.get_index(i).unwrap();
assert_eq!(
actual_group.0.as_ref(),
*expected_group,
"Group at position {} doesn't match expected group",
i
);
assert_eq!(
actual_models.len(),
expected_models.len(),
"Number of models in group {} doesn't match",
expected_group
);
for (j, expected_model_name) in expected_models.iter().enumerate() {
assert_eq!(
actual_models[j].name, *expected_model_name,
"Model at position {} in group {} doesn't match expected model",
j, expected_group
);
}
}
}
#[gpui::test]
async fn test_fuzzy_match(cx: &mut TestAppContext) {
let models = create_model_list(vec![
(
"zed",
vec![
"Claude 3.7 Sonnet",
"Claude 3.7 Sonnet Thinking",
"gpt-4.1",
"gpt-4.1-nano",
],
),
("openai", vec!["gpt-3.5-turbo", "gpt-4.1", "gpt-4.1-nano"]),
("ollama", vec!["mistral", "deepseek"]),
]);
// Results should preserve models order whenever possible.
// In the case below, `zed/gpt-4.1` and `openai/gpt-4.1` have identical
// similarity scores, but `zed/gpt-4.1` was higher in the models list,
// so it should appear first in the results.
let results = fuzzy_search(models.clone(), "41".into(), cx.executor()).await;
assert_models_eq(
results,
vec![
("zed", vec!["gpt-4.1", "gpt-4.1-nano"]),
("openai", vec!["gpt-4.1", "gpt-4.1-nano"]),
],
);
// Fuzzy search
let results = fuzzy_search(models.clone(), "4n".into(), cx.executor()).await;
assert_models_eq(
results,
vec![
("zed", vec!["gpt-4.1-nano"]),
("openai", vec!["gpt-4.1-nano"]),
],
);
}
}

View File

@@ -1,85 +0,0 @@
use std::rc::Rc;
use acp_thread::AgentModelSelector;
use agent_client_protocol as acp;
use gpui::{Entity, FocusHandle};
use picker::popover_menu::PickerPopoverMenu;
use ui::{
ButtonLike, Context, IntoElement, PopoverMenuHandle, SharedString, Tooltip, Window, prelude::*,
};
use zed_actions::agent::ToggleModelSelector;
use crate::acp::{AcpModelSelector, model_selector::acp_model_selector};
pub struct AcpModelSelectorPopover {
selector: Entity<AcpModelSelector>,
menu_handle: PopoverMenuHandle<AcpModelSelector>,
focus_handle: FocusHandle,
}
impl AcpModelSelectorPopover {
pub(crate) fn new(
session_id: acp::SessionId,
selector: Rc<dyn AgentModelSelector>,
menu_handle: PopoverMenuHandle<AcpModelSelector>,
focus_handle: FocusHandle,
window: &mut Window,
cx: &mut Context<Self>,
) -> Self {
Self {
selector: cx.new(move |cx| acp_model_selector(session_id, selector, window, cx)),
menu_handle,
focus_handle,
}
}
pub fn toggle(&self, window: &mut Window, cx: &mut Context<Self>) {
self.menu_handle.toggle(window, cx);
}
}
impl Render for AcpModelSelectorPopover {
fn render(&mut self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
let model = self.selector.read(cx).delegate.active_model();
let model_name = model
.as_ref()
.map(|model| model.name.clone())
.unwrap_or_else(|| SharedString::from("Select a Model"));
let model_icon = model.as_ref().and_then(|model| model.icon);
let focus_handle = self.focus_handle.clone();
PickerPopoverMenu::new(
self.selector.clone(),
ButtonLike::new("active-model")
.when_some(model_icon, |this, icon| {
this.child(Icon::new(icon).color(Color::Muted).size(IconSize::XSmall))
})
.child(
Label::new(model_name)
.color(Color::Muted)
.size(LabelSize::Small)
.ml_0p5(),
)
.child(
Icon::new(IconName::ChevronDown)
.color(Color::Muted)
.size(IconSize::XSmall),
),
move |window, cx| {
Tooltip::for_action_in(
"Change Model",
&ToggleModelSelector,
&focus_handle,
window,
cx,
)
},
gpui::Corner::BottomRight,
cx,
)
.with_handle(self.menu_handle.clone())
.render(window, cx)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
use crate::{Keep, KeepAll, OpenAgentDiff, Reject, RejectAll};
use acp_thread::{AcpThread, AcpThreadEvent};
use action_log::ActionLog;
use agent::{Thread, ThreadEvent, ThreadSummary};
use agent_settings::AgentSettings;
use anyhow::Result;
use assistant_tool::ActionLog;
use buffer_diff::DiffHunkStatus;
use collections::{HashMap, HashSet};
use editor::{
@@ -1521,8 +1521,7 @@ impl AgentDiff {
self.update_reviewing_editors(workspace, window, cx);
}
}
AcpThreadEvent::EntriesRemoved(_)
| AcpThreadEvent::Stopped
AcpThreadEvent::Stopped
| AcpThreadEvent::ToolAuthorizationRequired
| AcpThreadEvent::Error
| AcpThreadEvent::ServerExited(_) => {}

File diff suppressed because it is too large Load Diff

View File

@@ -64,8 +64,6 @@ actions!(
NewTextThread,
/// Toggles the context picker interface for adding files, symbols, or other context.
ToggleContextPicker,
/// Toggles the menu to create new agent threads.
ToggleNewThreadMenu,
/// Toggles the navigation menu for switching between threads and views.
ToggleNavigationMenu,
/// Toggles the options menu for agent settings and preferences.
@@ -157,11 +155,11 @@ enum ExternalAgent {
}
impl ExternalAgent {
pub fn server(&self, fs: Arc<dyn fs::Fs>) -> Rc<dyn agent_servers::AgentServer> {
pub fn server(&self) -> Rc<dyn agent_servers::AgentServer> {
match self {
ExternalAgent::Gemini => Rc::new(agent_servers::Gemini),
ExternalAgent::ClaudeCode => Rc::new(agent_servers::ClaudeCode),
ExternalAgent::NativeAgent => Rc::new(agent2::NativeAgentServer::new(fs)),
ExternalAgent::NativeAgent => Rc::new(agent2::NativeAgentServer),
}
}
}

View File

@@ -2,7 +2,7 @@ mod agent_notification;
mod burn_mode_tooltip;
mod context_pill;
mod end_trial_upsell;
// mod new_thread_button;
mod new_thread_button;
mod onboarding_modal;
pub mod preview;
@@ -10,5 +10,5 @@ pub use agent_notification::*;
pub use burn_mode_tooltip::*;
pub use context_pill::*;
pub use end_trial_upsell::*;
// pub use new_thread_button::*;
pub use new_thread_button::*;
pub use onboarding_modal::*;

View File

@@ -11,7 +11,7 @@ pub struct NewThreadButton {
}
impl NewThreadButton {
fn new(id: impl Into<ElementId>, label: impl Into<SharedString>, icon: IconName) -> Self {
pub fn new(id: impl Into<ElementId>, label: impl Into<SharedString>, icon: IconName) -> Self {
Self {
id: id.into(),
label: label.into(),
@@ -21,12 +21,12 @@ impl NewThreadButton {
}
}
fn keybinding(mut self, keybinding: Option<ui::KeyBinding>) -> Self {
pub fn keybinding(mut self, keybinding: Option<ui::KeyBinding>) -> Self {
self.keybinding = keybinding;
self
}
fn on_click<F>(mut self, handler: F) -> Self
pub fn on_click<F>(mut self, handler: F) -> Self
where
F: Fn(&mut Window, &mut App) + 'static,
{

View File

@@ -58,7 +58,9 @@ impl Assets {
pub fn load_test_fonts(&self, cx: &App) {
cx.text_system()
.add_fonts(vec![
self.load("fonts/lilex/Lilex-Regular.ttf").unwrap().unwrap(),
self.load("fonts/plex-mono/ZedPlexMono-Regular.ttf")
.unwrap()
.unwrap(),
])
.unwrap()
}

View File

@@ -12,10 +12,12 @@ workspace = true
path = "src/assistant_tool.rs"
[dependencies]
action_log.workspace = true
anyhow.workspace = true
buffer_diff.workspace = true
clock.workspace = true
collections.workspace = true
derive_more.workspace = true
futures.workspace = true
gpui.workspace = true
icons.workspace = true
language.workspace = true
@@ -28,6 +30,7 @@ serde.workspace = true
serde_json.workspace = true
text.workspace = true
util.workspace = true
watch.workspace = true
workspace.workspace = true
workspace-hack.workspace = true

View File

@@ -17,6 +17,8 @@ use util::{
pub struct ActionLog {
/// Buffers that we want to notify the model about when they change.
tracked_buffers: BTreeMap<Entity<Buffer>, TrackedBuffer>,
/// Has the model edited a file since it last checked diagnostics?
edited_since_project_diagnostics_check: bool,
/// The project this action log is associated with
project: Entity<Project>,
}
@@ -26,6 +28,7 @@ impl ActionLog {
pub fn new(project: Entity<Project>) -> Self {
Self {
tracked_buffers: BTreeMap::default(),
edited_since_project_diagnostics_check: false,
project,
}
}
@@ -34,6 +37,16 @@ impl ActionLog {
&self.project
}
/// Notifies a diagnostics check
pub fn checked_project_diagnostics(&mut self) {
self.edited_since_project_diagnostics_check = false;
}
/// Returns true if any files have been edited since the last project diagnostics check
pub fn has_edited_files_since_project_diagnostics_check(&self) -> bool {
self.edited_since_project_diagnostics_check
}
pub fn latest_snapshot(&self, buffer: &Entity<Buffer>) -> Option<text::BufferSnapshot> {
Some(self.tracked_buffers.get(buffer)?.snapshot.clone())
}
@@ -530,11 +543,14 @@ impl ActionLog {
/// Mark a buffer as created by agent, so we can refresh it in the context
pub fn buffer_created(&mut self, buffer: Entity<Buffer>, cx: &mut Context<Self>) {
self.edited_since_project_diagnostics_check = true;
self.track_buffer_internal(buffer.clone(), true, cx);
}
/// Mark a buffer as edited by agent, so we can refresh it in the context
pub fn buffer_edited(&mut self, buffer: Entity<Buffer>, cx: &mut Context<Self>) {
self.edited_since_project_diagnostics_check = true;
let tracked_buffer = self.track_buffer_internal(buffer.clone(), false, cx);
if let TrackedBufferStatus::Deleted = tracked_buffer.status {
tracked_buffer.status = TrackedBufferStatus::Modified;

View File

@@ -1,3 +1,4 @@
mod action_log;
pub mod outline;
mod tool_registry;
mod tool_schema;
@@ -9,7 +10,6 @@ use std::fmt::Formatter;
use std::ops::Deref;
use std::sync::Arc;
use action_log::ActionLog;
use anyhow::Result;
use gpui::AnyElement;
use gpui::AnyWindowHandle;
@@ -25,6 +25,7 @@ use language_model::LanguageModelToolSchemaFormat;
use project::Project;
use workspace::Workspace;
pub use crate::action_log::*;
pub use crate::tool_registry::*;
pub use crate::tool_schema::*;
pub use crate::tool_working_set::*;

View File

@@ -1,4 +1,4 @@
use action_log::ActionLog;
use crate::ActionLog;
use anyhow::{Context as _, Result};
use gpui::{AsyncApp, Entity};
use language::{OutlineItem, ParseStatus};

View File

@@ -15,7 +15,6 @@ path = "src/assistant_tools.rs"
eval = []
[dependencies]
action_log.workspace = true
agent_settings.workspace = true
anyhow.workspace = true
assistant_tool.workspace = true

View File

@@ -1,7 +1,6 @@
use crate::schema::json_schema_for;
use action_log::ActionLog;
use anyhow::{Context as _, Result, anyhow};
use assistant_tool::{Tool, ToolResult};
use assistant_tool::{ActionLog, Tool, ToolResult};
use gpui::AnyWindowHandle;
use gpui::{App, AppContext, Entity, Task};
use language_model::LanguageModel;

Some files were not shown because too many files have changed in this diff Show More