Compare commits

..

114 Commits

Author SHA1 Message Date
Nate Butler
7f0ffa0109 Add some random static text to test the thread 2025-04-29 17:59:42 -04:00
Nate Butler
95d8409900 make it gooo 2025-04-29 15:41:27 -04:00
Nate Butler
d27fdd96f2 WIP 2025-04-29 15:19:51 -04:00
Nate Butler
c8e909afc6 wip 2025-04-29 14:56:39 -04:00
Nate Butler
abad6d9be9 Use the same property order for ThreadStore::new and load 2025-04-29 13:58:13 -04:00
Nate Butler
9c50d19841 wip 2025-04-29 13:06:19 -04:00
Nate Butler
17b98d068a Update usage banner scope 2025-04-29 11:53:23 -04:00
Anthony Eid
6386336eee debugger: Fix bug where active debug line highlights weren't cleared (#29562)
## Context
The bug occurred because we stopped propagating the
`BreakpointStoreEvent::SetDebugLine` whenever a new debug line highlight
had been set. This was done to prevent multiple panes from having
editors focus on the debug line. However, it stopped the event from
propagating to editors that needed to clear their debug line highlights.

I fixed this by introducing two phases
1. Clear all debug line highlights
2. Set active debug line highlight in singular editor 

I also added a test to prevent regressions from occurring

Release Notes:

- N/A
2025-04-29 15:15:45 +00:00
Marshall Bowers
c168fc335c collab: Add mode column to subscription_usage_meters table (#29603)
This PR adds a `mode` column to the `subscription_usage_meters` table in
the LLM database.

Release Notes:

- N/A
2025-04-29 14:58:34 +00:00
Marshall Bowers
b2df395918 language_models: Change default fast model for Zed provider (#29600)
This PR changes the default fast model for the Zed provider from Claude
3.5 Haiku to Claude 3.5 Sonnet.

We don't offer Claude 3.5 Haiku to users.

Closes https://github.com/zed-industries/zed/issues/29505.

Release Notes:

- agent: Changed the default fast model for the Zed provider to Claude
3.5 Sonnet.
2025-04-29 14:46:27 +00:00
Richard Feldman
2b431d3e9d Re-add code block formatting instructions (#29574)
Re-enabled instructions about code block formatting.

In practice, the model doesn't seem to use these very often, but there's
no negative effect on evals. In a future PR, I'll experiment with adding
more evals around the model actually using the code blocks.

2 runs before: (`--repetitions=8`)
```
=================================================================
                            AGGREGATE
=================================================================


4 examples failed to run!
Average programmatic score: 37%
Average diff score: 66%
Average thread score: 93%


-----------------------------------------------------------------
                     CUMULATIVE TOOL METRICS
-----------------------------------------------------------------

┌──────────────────────────────┬──────────┬──────────┬──────────┐
│             Tool             │   Uses   │ Failures │   Rate   │
├──────────────────────────────┼──────────┼──────────┼──────────┤
│edit_file                     │   398    │    53    │   13%    │
│terminal                      │    11    │    1     │    9%    │
│create_file                   │    40    │    2     │    5%    │
│read_file                     │   245    │    8     │    3%    │
│find_path                     │    48    │    0     │    0%    │
│list_directory                │    13    │    0     │    0%    │
│grep                          │   133    │    0     │    0%    │
│thinking                      │    18    │    0     │    0%    │
│diagnostics                   │   130    │    0     │    0%    │
```

```
=================================================================
                            AGGREGATE
=================================================================


1 examples failed to run!
Average programmatic score: 41%
Average diff score: 68%
Average thread score: 96%


-----------------------------------------------------------------
                     CUMULATIVE TOOL METRICS
-----------------------------------------------------------------

┌──────────────────────────────┬──────────┬──────────┬──────────┐
│             Tool             │   Uses   │ Failures │   Rate   │
├──────────────────────────────┼──────────┼──────────┼──────────┤
│fetch                         │    1     │    1     │   100%   │
│edit_file                     │   553    │    63    │   11%    │
│read_file                     │   349    │    3     │    1%    │
│diagnostics                   │   158    │    0     │    0%    │
│find_path                     │    70    │    0     │    0%    │
│list_directory                │    10    │    0     │    0%    │
│thinking                      │    45    │    0     │    0%    │
│grep                          │   213    │    0     │    0%    │
│create_file                   │    24    │    0     │    0%    │
│terminal                      │    17    │    0     │    0%    │
└──────────────────────────────┴──────────┴──────────┴──────────┘
```

1 run after this change:

```
=================================================================
                            AGGREGATE
=================================================================

Average programmatic score: 42%
Average diff score: 74%
Average thread score: 100%


-----------------------------------------------------------------
                     CUMULATIVE TOOL METRICS
-----------------------------------------------------------------

┌──────────────────────────────┬──────────┬──────────┬──────────┐
│             Tool             │   Uses   │ Failures │   Rate   │
├──────────────────────────────┼──────────┼──────────┼──────────┤
│edit_file                     │   534    │    92    │   17%    │
│read_file                     │   325    │    6     │    2%    │
│list_directory                │    6     │    0     │    0%    │
│thinking                      │    12    │    0     │    0%    │
│create_file                   │    16    │    0     │    0%    │
│diagnostics                   │    49    │    0     │    0%    │
│grep                          │   234    │    0     │    0%    │
│find_path                     │    65    │    0     │    0%    │
│terminal                      │    38    │    0     │    0%    │
└──────────────────────────────┴──────────┴──────────┴──────────┘
```


Release Notes:

- N/A
2025-04-29 10:37:31 -04:00
Bennet Bo Fenner
4812c9094b agent: Support images via @file and the file context picker (#29596)
Release Notes:

- agent: Add support for @mentioning images
- agent: Add support for including images via file context picker

---------

Co-authored-by: Oleksiy Syvokon <oleksiy.syvokon@gmail.com>
2025-04-29 16:26:27 +02:00
Danilo Leal
fcef101227 agent: Only show expand message editor when focused on it (#29595)
Simplifying the UI as much as possible.

Release Notes:

- N/A
2025-04-29 09:38:53 -03:00
Danilo Leal
7e25460708 agent: Add message editor UI improvements (#29594)
Probably the most relevant change in this PR is the commented out (still
pending) line number diffs. Aside from this, commits are pretty
descriptive.

Release Notes:

- N/A
2025-04-29 09:38:44 -03:00
Danilo Leal
9b37206147 extensions_ui: Add design changes to expose the filters more (#29582)
Closes https://github.com/zed-industries/zed/issues/28086

The main motivator for this change is to have the "MCP Servers" filter
more clearly visible. And because of this, all other filters end up more
visible, as they're not in a dropdown menu anymore. Ended up pushing
some other small changes here and there as well. This is our final
product:

<img
src="https://github.com/user-attachments/assets/16ac78b6-72d9-4a8a-801b-b4b992221331"
width="700"/>

Release Notes:

- N/A
2025-04-29 07:54:39 -03:00
Conrad Irwin
756fcd0733 Git tweaks (#28791)
Release Notes:

- git: Add a `git_panel.sort_by_path` setting to mix untracked/tracked
files in the diff list.
- git: Remove the "•" placeholder for "Tracked". The commit button says
"Commit Tracked" still by default, and this was misinterpreted to mean
"partially staged". Hovering over the button will show you which files
are tracked (in addition to the yellow square-with-a-dot-in-it).
- Increase the default value of `expand_excerpt_lines` from 3 to 5. This
makes it faster to see more context in the git diff view.

---------

Co-authored-by: Birk Skyum <birk.skyum@pm.me>
Co-authored-by: Peter Tripp <peter@zed.dev>
2025-04-28 23:42:23 -06:00
Peter Tripp
3fd37799b4 freebsd: Fix failure to build (#29587)
main was failing to build on FreeBSD.
[joblink](https://github.com/zed-industries/zed/actions/runs/14721383651/job/41315738893)

```
  error[E0425]: cannot find value `platform` in this scope
     --> crates/terminal/src/terminal_settings.rs:298:36
      |
  298 |         let shell_name = format!("{platform}Exec");
      |                                    ^^^^^^^^ not found in this scope
  
  error[E0425]: cannot find value `platform` in this scope
     --> crates/terminal/src/terminal_settings.rs:304:46
      |
  304 |             .read_value(&name(&format!("env.{platform}")))
      |                                              ^^^^^^^^ not found in this scope
```

CC: @P1n3appl3 

Release Notes:

- N/A
2025-04-28 23:55:49 -04:00
Conrad Irwin
ab180855de Debug console tweaks (#29586)
Closes #ISSUE

Release Notes:

- N/A

---------

Co-authored-by: Anthony Eid <hello@anthonyeid.me>
Co-authored-by: Cole Miller <m@cole-miller.net>
2025-04-28 21:53:57 -06:00
Michael Sloan
2beefc8158 Fix gemini model token limits (#29584)
Release Notes:

- N/A
2025-04-29 03:12:59 +00:00
Marshall Bowers
5092f0f18b collab: Sync model request overages to Stripe (#29583)
This PR adds syncing of model request overages to Stripe.

Release Notes:

- N/A
2025-04-28 23:06:30 -04:00
Ben Kunkle
3a212e72a4 Fix data loss when project settings opened with ".zed" in file_scan_exclusions (#29578)
Closes #28640

Before creating an entry for a file opened with `open_local_file`, make
sure it doesn't exist, in addition to checking that it isn't already
tracked in the workspace

Release Notes:

- Fixed an issue where the project settings file would be truncated when
opened with `zed: open project settings` if the ".zed" directory was
excluded from the files scanned in a workspace (in
"file_scan_exclusions")
2025-04-28 22:25:40 -04:00
Peter Tripp
4dc8ce8cf7 ollama: Add Qwen3 and Gemma3 (default to 16K context) (#29580)
If you have the VRAM you can increase the context by adding this to your
settings.json:

```json
  "language_models": {
    "ollama": {
      "available_models": [
        { "max_tokens": 65536, "name": "qwen3", "display_name": "Qwen3-64k" }
      ]
    }
  },
```

Release Notes:

- ollama: Add support for Qwen3. Defaults to 16K token context. See:
[Assistant Configuration
Docs](https://zed.dev/docs/assistant/configuration#ollama-context) to
increase.
2025-04-28 21:44:28 -04:00
Marshall Bowers
2cc5a0de26 zed_extension_api: Fork new version of extension API (#29579)
This PR forks a new version of the `zed_extension_api` in preparation
for new changes.

Release Notes:

- N/A
2025-04-29 01:24:13 +00:00
Max Brunsfeld
bc665b2a76 Ensure thread's model is initialized once settings are loaded
Also, avoid showing token threshold warning when thread has no model.

Co-authored-by: Michael Sloan <mgsloan@gmail.com>
2025-04-28 17:27:56 -07:00
Max Brunsfeld
17903a0999 Associate each thread with a model (#29573)
This PR makes it possible to use different LLM models in the agent
panels of two different projects, simultaneously. It also properly
restores a thread's original model when restoring it from the history,
rather than having it use the default model. As before, newly-created
threads will use the current default model.

Release Notes:

- Enabled different project windows to use different models in the agent
panel
- Enhanced the agent panel so that when revisiting old threads, their
original model will be used.

---------

Co-authored-by: Richard Feldman <oss@rtfeldman.com>
2025-04-28 23:43:16 +00:00
Smit Barmase
5102c4c002 gpui: Fix markdown wrapped background not correctly rendering (#29571)
Closes #29532

Fixes case where the markdown background was not rendering correctly.
This regression was introduced in
https://github.com/zed-industries/zed/pull/26454.

<img
src="https://github.com/user-attachments/assets/1e64930f-c98e-4042-a20e-46eed03293d6"
alt="image" width="400" />


Release Notes:

- Fixed an issue where markdown code blocks did not wrap correctly.
2025-04-29 04:48:24 +05:30
Smit Barmase
2139219832 editor: Fix selection and bracket pair highlight not appearing on collab updates (#29558)
This PR fixes bug where selection and bracket pair highlights would not
update when new text was added via collab.

Release Notes:

- Fixed an issue where selection and bracket pair highlights would not
update when new text was added via collab.

---------

Co-authored-by: Ben Kunkle <ben@zed.dev>
2025-04-29 04:33:18 +05:30
Marshall Bowers
9abeedf0c6 collab: Rename symbols for existing Stripe synchronization (#29570)
This PR renames the symbols for the existing Stripe synchronization.

This will make things clearer once the new synchronization job for the
new billing is added.

Release Notes:

- N/A
2025-04-28 22:37:18 +00:00
Mikayla Maki
1d7c86bf0d Simplify the SerializableItem::cleanup implementation (#29567)
Release Notes:

- N/A

---------

Co-authored-by: Julia Ryan <juliaryan3.14@gmail.com>
2025-04-28 22:15:24 +00:00
Marshall Bowers
17703310ae collab: Avoid creating duplicate Stripe customers (#29566)
This PR makes it so we check for an existing Stripe customer by email
address before attempting to create a new one.

This should avoid the case where we end up creating multiple Stripe
customers for the same user.

Release Notes:

- N/A
2025-04-28 22:04:05 +00:00
Danilo Leal
bbe8d6a654 agent: Cancel pending in-edit user message upon new message submit (#29565)
Previously, if you clicked on a user message to edit it, and then, while
the user message has the editor pending, sent a new message via the
textarea, the whole thread would be grayed out because we hadn't
dismissed the to-be-edited pending user message. That's now fixed.

Release Notes:

- agent: Fixed a bug that would make the whole thread be grayed out upon
sending a new message while a user message had a pending edit.
2025-04-28 18:51:41 -03:00
Michael Sloan
bbc66748dd Make thread context wait on detailed summary + remove "Summarizing context..." (#29564)
This moves summarization task management out of `context_store`. The
code there was draining a Vec of tasks to block on, but this is no
longer a good fit for message_editor's context loading. It needs to be
able to repeatedly await on the thread summarization tasks involved in
the context.

Discussed with Danilo, and he thinks it'd be good to remove the current
"Summarizing context" anyway since it causes layout shift. If message
send is blocked on summarizing, the pulsing context pill is sufficient
for now. This UI change made this overall change more straightforward.

Release Notes:

- N/A
2025-04-28 21:21:20 +00:00
Oleksiy Syvokon
99df1190a9 agent: Include grep-related instructions in the prompt only if the tool is available (#29536)
This change updates the system prompt to conditionally include
`grep`-related instructions based on whether the `grep` tool is enabled.

Implementation details:
1. Add a `has_tool` handlebars helper.
2. Pass the `model` to all locations where the prompt is built.
3. Use `{{#if has_tool "grep"}}` in the system prompt to gate
`grep`-specific instructions.

Testing:
- Unit tests for the `hasTool` helper.
- Unit tests to verify that `grep`-related instructions are included /
omitted from the prompt as appropriate.
- Manual agent evaluation:
- Setup: Asked the Agent "List all impls of MyTrait in the project"
using a custom "No tools" profile (all tools disabled).
- Before the change: The Agent attempted to call `grep`, encountered an
error, then realized the tool was unavailable.
- After the change: The Agent immediately asked to enable a search tool.

Note: in principle, `grep`/`read_file` tool descriptions alone might be
enough, but to confirm this we need more evaluation. If it turns out to
be true, we'll be able to remove grep-specific instructions from the
system prompt and undo this change.

Release Notes:

- N/A
2025-04-28 19:47:40 +00:00
Peter Tripp
0e477e7db9 Less log spam (non git worktrees; saving with no LSP) (#29557)
- See: https://github.com/zed-industries/zed/discussions/29541
- `failed to get git blame data:` occurred whenever opening a file that
does not have git blame data and with `git.inline_blame.enabled` = true
(the default). Notably this would be triggered whenever you opened your
settings or keymap (unless ~/.config/zed was git managed).
- `No language server found to format buffer` triggered whenever you
saved a buffer with `format_on_save` (the default for most languages)
but had no LSP configured for this file type (e.g. Plain Text).


Release Notes:

- N/A
2025-04-28 19:09:33 +00:00
Max Brunsfeld
0afb980f7b Move Show Code Actions lower in editor context menu (#29556)
The 'Go to Definition' action is more commonly used.

Release Notes:

- N/A
2025-04-28 18:42:07 +00:00
Anthony Eid
d360f77796 debugger: Fix bug where args from debug config weren't sent to adapters (#29445)
I added some tests to ensure that this regression doesn't happen again.
This also fixes the cargo test locators, debugging all tests in a module
instead of just the singular test a user selects.

Release Notes:

- N/A
2025-04-28 18:20:03 +00:00
Ben Kunkle
92b9bc599d format: Minor logging improvements (#29554)
Closes #ISSUE

Release Notes:

- N/A *or* Added/Fixed/Improved ...
2025-04-28 18:17:21 +00:00
5brian
ed367e1636 vim: Add neovim 0.11 default mappings (#28602)
Update the keymap to include:
https://neovim.io/doc/user/news-0.11.html#_defaults

This does conflict with `gr` replace with register though, is `gR` a
good alternative?

Release Notes:

- vim: Update the keymap to include: https://neovim.io/doc/user/news-0.11.html#_defaults
- vim: Replace with register has been remapped from `gr` to `gR`.
2025-04-28 14:14:43 -04:00
Michael Sloan
b41ffae161 Method renaming intended to be included in #29551 (#29553)
`text_hover_view` --> `ContextPillHover::new_text`

Release Notes:

- N/A
2025-04-28 18:00:53 +00:00
Ben Kunkle
ef33666701 linux(x11): Add support for pasting images from clipboard (#29387)
Closes:
https://github.com/zed-industries/zed/pull/29177#issuecomment-2823359242

Removes dependency on
[quininer/x11-clipboard](https://github.com/quininer/x11-clipboard) as
it is in [maintenance
mode](https://github.com/quininer/x11-clipboard/issues/19).

X11 clipboard functionality is now built-in to GPUI which was
accomplished by stripping the non-x11-related code/abstractions from
[1Password/arboard](https://github.com/1Password/arboard) and extending
it to support all image formats already supported by GPUI on wayland and
macos.

A benefit of switching over to the `arboard` implementation, is that we
now make an attempt to have an X11 "clipboard manager" (if available -
something the user has to setup themselves) save the contents of
clipboard (if the last copy operation was within Zed) so that the copied
contents can still be pasted once Zed has completely stopped.

Release Notes:

- Linux(X11): Add support for pasting images from clipboard
2025-04-28 13:55:26 -04:00
Marshall Bowers
cd86905ebe language_models: Pass up mode from the LanguageModelRequest (#29552)
This PR makes it so we pass up the `mode` from the
`LanguageModelRequest` when interacting with the Zed provider instead of
passing a hard-coded value.

Release Notes:

- N/A
2025-04-28 17:38:55 +00:00
Michael Sloan
abb48b7711 agent: Improve attached context display and show hovers for symbol / selection / rules / thread (#29551)
* Brings back hover popover of selection context.

* Adds hover popover for symbol, rules, and thread context.

* Makes context attached to messages display the names / content at
attachment time.

* Adds the file name as the displayed parent of symbol context.

* Brings back `impl Component for AddedContext`

Release Notes:

- N/A

---------

Co-authored-by: Bennet Bo Fenner <bennet@zed.dev>
2025-04-28 16:58:18 +00:00
AidanV
8afac388bb vim: Fix 't' motion to start of soft wrapped line (#29303)
Closes #28853

Release Notes:

- Fixes `'t'` motion going on top of character that is at the beginning
of soft wrapped line instead of before
2025-04-28 10:55:15 -06:00
alexfertel
53b36b328e Update docs around vim's substitute command (#29404)
With the introduction of
https://github.com/zed-industries/zed/pull/28138, the current vim docs
became stale.

This PR makes a small update to the docs to reflect this.
2025-04-28 16:53:01 +00:00
Marshall Bowers
ce93961fe0 agent: Add "max mode" toggle (#29549)
This PR adds a "max mode" toggle to the Agent panel, for models that
support it.

Only visible to folks in the `new-billing` feature flag.

Icon is just a placeholder.

Release Notes:

- N/A
2025-04-28 16:50:47 +00:00
Marshall Bowers
e3c987e2fb zed: Don't feature-gate zed: open account settings action (#29542)
This PR removes the feature-gating of the `zed: open account settings`
action, as everyone has access to the account page now.

Release Notes:

- N/A
2025-04-28 15:40:24 +00:00
Piotr Osiewicz
4dc0551105 debugger_ui: Show zoom buttons only when the pane is hovered (#29543)
Closes #ISSUE

Release Notes:

- N/A
2025-04-28 17:36:54 +02:00
chbk
bf9e5b4f76 Fix Python builtin type highlighting (#29475)
| Zed 0.183.11 | With this PR |
| --- | --- |
|
![0.183.11](https://github.com/user-attachments/assets/6bb16c9b-2d6a-4424-a2ea-9ff051eb3085)
|
![pr](https://github.com/user-attachments/assets/d39d12d1-e6c3-4c06-8829-c5ee508ae83a)
|

```python
class A:
  @classmethod
  def list(cls, list: list) -> list:
    return list

A.list(list())
A.list(list=list())
A.list(list=list(A.list(list())))

list: list = list()
A.list(list)
A.list(list=list)
A.list(list=A.list(list))
```

Release Notes:

  - Improved Python builtin type highlighting
2025-04-28 11:34:55 -04:00
Tuur Vanhoutte
cfb8cae29c docs: Fix linux crash logs location (#29444)
Release Notes:

- N/A
2025-04-28 15:33:19 +00:00
Mani Rash Ahmadi
68e0105627 Agent: Include partial output if terminal tool fails (#29115)
This PR addresses the behavior of the agent's terminal tool when the
executed command is interrupted or fails after producing some output.
Currently, if the command doesn't finish successfully, any partial
output captured before the interruption/failure is discarded, and only
an error message (or a generic cancellation message) is returned to the
LLM.

This change modifies the `run_command_limited` function in the terminal
tool to catch errors when awaiting the command's status (which includes
interruptions). In the case of such an error, it now includes any
partial stdout/stderr captured up to that point within the error message
returned to the `ToolUseState`. This ensures the LLM receives the
partial context even when the command doesn't complete cleanly, framed
appropriately as part of an error/interruption message.

Closes #29101

Release Notes:

- N/A
2025-04-28 12:25:11 -03:00
Marshall Bowers
e98e6c7426 title_bar: Put plan behind new-billing feature flag (#29540)
This PR puts the displaying of the plan in the user menu behind the
`new-billing` feature flag instead of the old `zed-pro` feature flag.

Release Notes:

- N/A
2025-04-28 15:23:25 +00:00
Finn Evers
3a1bd38503 reqwest_client: Only register proxies with valid proxy URIs (#27773)
Closes #27641

This PR fixes invalid proxy URIs being registered despite the URI not
being a valid proxy URI.

Whilst investigating #27641 , I noticed that currently any proxy URI
passed to `RequestClient::proxy_and_user_agent` will be assigned to the
created client, even if the URI is not a valid proxy URI. Given a test
as an example:

We create an URI here and pass it as a proxy to
`ReqwestClient::proxy_and_user_agent`:

https://github.com/zed-industries/zed/blob/main/crates/reqwest_client/src/reqwest_client.rs#L272-L273

In `ReqwestClient::proxy_and_user_agent`we take the proxy parameter here

9b40770e9f/crates/reqwest_client/src/reqwest_client.rs (L46)

and set it unconditionally here:

9b40770e9f/crates/reqwest_client/src/reqwest_client.rs (L62)

, not considering at all whether the proxy was successfully created
above. Concluding, we currently do not actually check whether a proxy
was successfully created, but rather whether an URI is equal to itself,
which trivially holds. The existing test for a malformed proxy URI


9b40770e9f/crates/reqwest_client/src/reqwest_client.rs (L293-L297)

does not check whether invalid proxies cause an error, but rather checks
whether `http::Uri::from_static` panics on an invalid URI, [which it
does as
documented](https://docs.rs/http/latest/http/uri/struct.Uri.html#panics).
Thus, the tests currently do not really check anything proxy-related and
invalid proxies are assigned as valid proxies.

---

This PR fixes the behaviour by considering whether the proxy was
actually properly parsed and only assigning it if that is the case.
Furthermore, it improves logging in case of errors so issues like the
linked one are easier to debug (for the linked issue, the log will now
include that the proxy schema is not supported in the logs).
Lastly, it also updates the test for a malformed proxy URI. The test now
actually checks that malformed proxy URIs are not registered for the
client rather than testing the `http` crate.

The update also initially caused the [test for a `socks4a`
proxy](9b40770e9f/crates/reqwest_client/src/reqwest_client.rs (L280C1-L282C50))
to fail. This happened because the reqwest-library introduced supports
for `socks4a` proxies in [version
0.12.13](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md#v01213).
Thus, this PR includes a bump of the reqwest library to add proper
support for socks4a proxies.

Release Notes:

- Added support for socks4a proxies.

---------

Co-authored-by: Peter Tripp <peter@zed.dev>
2025-04-28 11:12:16 -04:00
张小白
8a69d252f5 Fix a bug in blob-store.sh (#29537)
This will overwrite the `acl` parameter that we pass in

Release Notes:

- N/A
2025-04-28 11:10:58 -04:00
jneem
bf30beacc2 Honor default_mode in NormalBefore (#29518)
Addresses
[this](https://github.com/zed-industries/zed/pull/28103#issuecomment-2832038415)
comment.

Release Notes:

- Improved default helix keybindings
2025-04-28 08:51:06 -06:00
Danilo Leal
2a0be48875 agent: Simplify elements of the thread design (#29533)
Namely, this PR removes the layout shift when you click on a user
message to edit it and displays the feedback disclaimer only upon
hovering the thumbs up/down button container.

Release Notes:

- N/A
2025-04-28 11:36:06 -03:00
Hendrik Sollich
1c4ba07b20 theme_selector: Don't select last theme when fuzzy searching (#28278)
The theme selector should select the last match to when opening to keep
the current theme active but it should select the first when searching
to pick the best match.

fixes #28081


https://github.com/user-attachments/assets/b46b9742-4715-4c7a-8f17-2c19a8668333

Release Notes:

- Fixed selecting the correct theme when searching

---------

Co-authored-by: Peter Tripp <peter@zed.dev>
2025-04-28 14:29:17 +00:00
tidely
8a717abe0d ollama: Fix build with default features (#29502)
The `ollama` crate has a `use schemars::JsonSchema` statement even when
building with default features, which doesn't include the `schemars`
crate.

Release Notes:

- N/A
2025-04-28 09:58:10 -04:00
Danilo Leal
f735c90c3f agent: Bring title editing back to text threads (#29425)
This also fixes a little UI bug where the text thread title would push
the buttons away from the UI when there was still space.

Release Notes:

- agent: Made text thread titles editable again.

---------

Co-authored-by: Michael Sloan <mgsloan@gmail.com>
2025-04-28 09:09:19 -03:00
Piotr Osiewicz
ddfeb202a3 editor: Show tooltips on breakpoints (#29523)
Closes #ISSUE

Release Notes:

- N/A
2025-04-28 12:28:13 +02:00
Oleksiy Syvokon
9bd0828303 agent tools: Make read_file.end_line inclusive (#29524)
One motivation is that the outlines returned by `read_file` for large
files list line numbers assuming an inclusive `end_line`. As a result,
when the agent uses these outlines for `read_line` calls, it would
otherwise miss the last line.

Release Notes:

- N/A
2025-04-28 09:37:13 +00:00
Julia Ryan
4dff47ae20 Add searchable global tab switcher (#28047)
resolves #24655
resolves #23945

I haven't yet added a default binding for the new command. #27797 added `:ls` and
`:buffers` which in my opinion should use the global searchable version
given that that matches the vim semantics of those commands better than
just showing the tabs in the local pane.

There's also a question of what to do when you select a tab from another
pane, should the focus jump to that pane or should that tab move to the
currently focused pane? For now I've implemented the former.

Release Notes:

- Added `tab_switcher::ToggleAll` to search open tabs from all panes and focus the selected one.

---------

Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
2025-04-28 09:21:27 +00:00
Smit Barmase
52eef3c35d editor: Fix inconsistent relative indent when using tab with multi cursors (#29519)
Do not insert hard/soft tabs for cursors at the suggested indent level
if any other cursor lies before the suggested indent level. This PR
brings us one step closer to fixing
https://github.com/zed-industries/zed/issues/26157.

Before:


https://github.com/user-attachments/assets/8fd5cde4-99f4-4363-9292-5da8dadab658

After:


https://github.com/user-attachments/assets/17c9f8ca-5842-452b-8665-7c7138d50162

Release Notes:

- Fixed an issue where using tab with multiple cursors would result in
inconsistent relative indentation across lines.
2025-04-28 13:13:53 +05:30
tidely
f060918b57 zed: Remove unnecessary clones (#29513)
`App::http_client` and `Client::http_client` both return an owned `Arc`
which it clones internally. This means we can remove unnecessary clones
when calling these methods.

Release Notes:

- N/A
2025-04-27 19:23:37 -07:00
Michael Sloan
609c528ceb Refactor markdown formatting utilities to avoid building intermediate strings (#29511)
These were nearly always used when using `format!` / `write!` etc, so it
makes sense to not have an intermediate `String`.

Release Notes:

- N/A
2025-04-27 19:04:51 +00:00
Peter Tripp
6db974dd32 docs: TOML LSP (Taplo) requires a manual restart to apply changes (#29504)
Add note about manual restart
Remove taplo LSP settings example (broken)

Release Notes:

- N/A
2025-04-27 11:53:55 -04:00
Michael Sloan
60ec55b179 Use u64 instead of usize in ElementId (#29493)
Truncation to a 32 bit `usize` could cause two distinct IDs to be
considered the same element.

Release Notes:

- N/A
2025-04-27 02:31:25 +00:00
Oleksiy Syvokon
bb7a5b13df [WIP] markdown: Add a test to reproduce the parser's panic (#29479)
Backtrace of the panic in the Agent pane:
```
Thread "<unnamed>" panicked with "called `Option::unwrap()` on a `None` value" at crates/markdown/src/parser.rs:264:55
3fdbc3090d/src/crates/markdown/src/parser.rs (L264) (may not be uploaded, line may be incorrect if files modified)
   0: zed::reliability::init_panic_hook::{{closure}}
             at /home/silver/develop/zed/crates/zed/src/reliability.rs:56:29
   1: <alloc::boxed::Box<F,A> as core::ops::function::Fn<Args>>::call
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/alloc/src/boxed.rs:1990:9
      std::panicking::rust_panic_with_hook
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/std/src/panicking.rs:839:13
   2: std::panicking::begin_panic_handler::{{closure}}
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/std/src/panicking.rs:697:13
   3: std::sys::backtrace::__rust_end_short_backtrace
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/std/src/sys/backtrace.rs:168:18
   4: rust_begin_unwind
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/std/src/panicking.rs:695:5
   5: core::panicking::panic_fmt
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/core/src/panicking.rs:75:14
   6: core::panicking::panic
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/core/src/panicking.rs:145:5
   7: core::option::unwrap_failed
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/core/src/option.rs:2015:5
   8: core::option::Option<T>::unwrap
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/option.rs:978:21
      markdown::parser::parse_markdown
             at /home/silver/develop/zed/crates/markdown/src/parser.rs:264:37
   9: markdown::Markdown::parse::{{closure}}
             at /home/silver/develop/zed/crates/markdown/src/markdown.rs:282:51
  10: <core::pin::Pin<P> as core::future::future::Future>::poll
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/future/future.rs:124:9
  11: async_task::raw::RawTask<F,T,S,M>::run
             at /home/silver/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/async-task-4.7.1/src/raw.rs:557:17
  12: async_task::runnable::Runnable<M>::run
             at /home/silver/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/async-task-4.7.1/src/runnable.rs:781:18
  13: gpui::platform::linux::dispatcher::LinuxDispatcher::new::{{closure}}::{{closure}}
             at /home/silver/develop/zed/crates/gpui/src/platform/linux/dispatcher.rs:44:25
  14: std::sys::backtrace::__rust_begin_short_backtrace
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/std/src/sys/backtrace.rs:152:18
  15: std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/std/src/thread/mod.rs:559:17
  16: <core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/panic/unwind_safe.rs:272:9
  17: std::panicking::try::do_call
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/std/src/panicking.rs:587:40
  18: __rust_try
  19: std::panicking::try
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/std/src/panicking.rs:550:19
      std::panic::catch_unwind
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/std/src/panic.rs:358:14
      std::thread::Builder::spawn_unchecked_::{{closure}}
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/std/src/thread/mod.rs:557:30
  20: core::ops::function::FnOnce::call_once{{vtable.shim}}
             at /home/silver/.rustup/toolchains/1.86-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/ops/function.rs:250:5
  21: <alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/alloc/src/boxed.rs:1976:9
      <alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/alloc/src/boxed.rs:1976:9
      std::sys::pal::unix::thread::Thread::new::thread_start
             at /rustc/05f9846f893b09a1be1fc8560e33fc3c815cfecb/library/std/src/sys/pal/unix/thread.rs:106:17
  22: start_thread
             at ./nptl/pthread_create.c:447:8
  23: clone3
             at ./misc/../sysdeps/unix/sysv/linux/x86_64/clone3.S:78:0

Segmentation fault
```


Release Notes:

- N/A *or* Added/Fixed/Improved ...

---------

Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
2025-04-26 15:31:06 +00:00
Piotr Osiewicz
1e47dfce79 debugger: Improve focus states (#29469)
Closes #ISSUE

Release Notes:

- N/A *or* Added/Fixed/Improved ...
2025-04-26 15:02:07 +02:00
Antonio Scandurra
3fdbc3090d Fix error when deserializing Gemini streams (#29470)
Sometimes Gemini would report `Content` without a `parts` field.

Release Notes:

- Fixed a bug that would sometimes cause Gemini models to fail streaming
their response.
2025-04-26 11:51:04 +00:00
Smit Barmase
f2b4004c00 editor: Improve code completions by prioritizing prefix matching (#29456)
- Use common prefix length-based matching as primary criteria.
- Test added for multiple cases.

Before:
<img width="500" alt="image"
src="https://github.com/user-attachments/assets/8c653225-cac2-41bd-95f0-0fb8724284c9"
/>

After:
<img width="500" alt="image"
src="https://github.com/user-attachments/assets/a3d59399-cff2-435d-9b56-69a530f35da4"
/>

Release Notes:

- Fixed issues with code completions where they wouldn't show
completions with matched prefix at top.
2025-04-26 17:15:48 +05:30
Oleksiy Syvokon
ec5821f76d docs: Fix relative navigation links (#29468)
Release Notes:

- N/A
2025-04-26 10:29:05 +00:00
Piotr Osiewicz
e22cae6459 chore: Update .gitignore (#29466)
Closes #ISSUE

Release Notes:

- N/A
2025-04-26 10:00:36 +00:00
Oleksiy Syvokon
21bafd7856 docs: Fix relative navigation links (#29465)
Release Notes:

- N/A
2025-04-26 09:42:12 +00:00
Oleksiy Syvokon
ee74edbbb1 docs: Add ‹› navigation buttons (#29461)
While visually unintrusive, these navigation links enable proper
navigation in readers and extensions like Vimium that rely on
rel=next/prev

Release Notes:

- N/A
2025-04-26 06:52:02 +00:00
Marshall Bowers
d832b8e687 Sort Cargo.toml (#29459)
This PR sorts the workspace `Cargo.toml`.

Release Notes:

- N/A
2025-04-26 04:07:09 +00:00
Marshall Bowers
539f4f1576 collab: Update billing preference endpoints with new preferences (#29458)
This PR updates the billing preference endpoints with the new
overage-related billing preferences.

Release Notes:

- N/A
2025-04-26 03:11:44 +00:00
Marshall Bowers
9a325a23e5 collab: Set overage preferences in LLM token claims (#29457)
This PR sets the user's overage preferences in the LLM token claims.

Release Notes:

- N/A
2025-04-26 02:27:47 +00:00
Marshall Bowers
ce31312268 collab: Return feature flags with authenticated user (#29455)
This PR makes it so the `GET /user` endpoint returns the user's feature
flags with the authenticated user.

Release Notes:

- N/A
2025-04-26 01:49:25 +00:00
Piotr Osiewicz
d46890978a debugger: Fix new session modal not having worktree scenarios (#29453)
Closes #ISSUE

Release Notes:

- N/A
2025-04-26 00:16:21 +00:00
Piotr Osiewicz
67615b968b debugger/tasks: Remove TaskType enum (#29208)
Closes #ISSUE

Release Notes:

- N/A

---------

Co-authored-by: Cole Miller <m@cole-miller.net>
Co-authored-by: Anthony Eid <hello@anthonyeid.me>
Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
Co-authored-by: Anthony <anthony@zed.dev>
Co-authored-by: Conrad <conrad@zed.dev>
2025-04-26 01:44:56 +02:00
Conrad Irwin
053fafa90e Fix markdown escaping
Closes #29255

Release Notes:

- Improved handling of markdown escape sequences
2025-04-25 16:53:44 -06:00
Ben Kunkle
d23024609f askpass: Shell escape Zed path in askpass script (#29447)
Closes #29439

Add shell escaping as well as additional sanity check for Zed path when
used in askpass. This caused issues on preview and nightly as the
standard paths for those releases contain spaces which were not escaped
appropriately leading to erroneous "Permission denied" errors from SSH
when the askpass script failed

Release Notes:

- Fixed a missing shell-escape in askpass resulting in erroneous
"Permission denied" errors when trying to connect to a remote server
over ssh (effecting preview release v0.184.1 and nightly only)
2025-04-25 21:16:43 +00:00
Smit Barmase
3961d87ae0 editor: Improve fuzzy match bucket logic for code completions (#29442)
Add new test and improve fuzzy match bucket logic which results into far
better balance between LSP and fuzzy search.

Before:
<img width="500" alt="before"
src="https://github.com/user-attachments/assets/3e8900a6-c0ff-4f37-b88e-b0e3783b7e9a"
/>

After:
<img width="500" alt="after"
src="https://github.com/user-attachments/assets/738c074c-d446-4697-aac6-9814362e88db"
/>

Release Notes:

- N/A
2025-04-26 02:14:18 +05:30
Marshall Bowers
8b910e1cd9 collab: Add LLM request overage columns to billing_preferences (#29446)
This PR adds two new columns to the `billing_preferences` table to allow
users to opt in to overages on LLM requests.

Release Notes:

- N/A
2025-04-25 20:43:43 +00:00
Michael Sloan
12c645e154 Fix inclusion of message when counting tokens from message editor (#29443)
Accidentally omitted this in #29233

Release Notes:

- N/A
2025-04-25 20:21:20 +00:00
Michael Sloan
cfb7a30724 Fix agent rules files for remote project by loading via buffer (#29440)
When using the agent with a project shared by a collaborator, rules file
loading didn't work as it was trying to read from the client's
filesystem

Release Notes:

- Fixed rules file loading when using the agent with a project shared by
a collaborator.
2025-04-25 20:06:40 +00:00
Cole Miller
7623fce4b4 Start improving support for keyboard-driven debugging (#29380)
Closes #ISSUE

Release Notes:

- N/A

---------

Co-authored-by: Piotr Osiewicz <peterosiewicz@gmail.com>
Co-authored-by: Anthony Eid <hello@anthonyeid.me>
2025-04-25 19:14:47 +00:00
Cole Miller
7f5c874a38 git: Use the CLI for loading commit SHAs and details (#29351)
Since #28065 merged we've seen deadlocks inside iconv when opening Zed
in a repository containing many submodules. These calls to iconv happen
inside libgit2, in our implementations of the methods `head_sha`,
`merge_head_shas`, and `show` on `RealGitRepository`. This PR moves
those methods to use the git CLI instead, sidestepping the issue. For
the sake of efficiency, a new `revparse_batch` method is added that uses
`git cat-file` to resolve several ref names in one invocation. I
originally intended to make `show` operate in batch mode as well (or
instead), but I can't see a good way to do that with the git CLI; `git
show` always bails on the first ref that it can't resolve, and
`for-each-ref` doesn't support symbolic refs like `HEAD`.

Separately, I removed the calls to `show` in `MergeDetails::load`, going
back to only loading the SHAs of the various merge heads. Loading full
commit details was intended to support the inlays feature that ended up
being cut from #28065, and we can add it back in when we need it.

Release Notes:

- N/A
2025-04-25 14:46:02 -04:00
Marshall Bowers
8cc2ade21c collab: Add subscription_usage_meters table (#29433)
This PR adds a new `subscription_usage_meters` table to the LLM
database.

We'll use this to track usage of individual models over the number of
requests built-in to the plan.

Release Notes:

- N/A
2025-04-25 14:32:00 -04:00
Anthony Eid
c3177e6f5b debugger: Fix deadlock in on_app_quit with debugger running (#29372)
This fixes a deadlock that would occur when `DapStore` had its on quit
handler called. The deadlock was caused by `DapStore` spawning on the
main thread while `App::shutdown` blocks the main thread.

We added a debug_panic in GPUI that panics if a foreground task is
spawned while the App context is shutting down. This will help tests
catch hangs in `cx.on_app_quit` calls.

Release Notes:

- N/A

---------

Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
2025-04-25 14:15:10 -04:00
Danilo Leal
c3570fbcf3 agent: Render path search results with ToolCard (#28894)
Implementing the `ToolCard` for the path_search tool. It also adds the
"jump to file" functionality if you expand the results.

Release Notes:

- N/A

---------

Co-authored-by: Richard Feldman <oss@rtfeldman.com>
Co-authored-by: Agus Zubiaga <hi@aguz.me>
2025-04-25 14:42:51 -03:00
Danilo Leal
3aa313010f agent: Make markdown code blocks uncollapsed by default (#29424)
Seeing the markdown code stream in is actually more helpful than hiding
parts of it; parts that you may be interested in.

Release Notes:

- N/A
2025-04-25 14:39:51 -03:00
Marshall Bowers
5f9c91d05a danger: Update PR prefix pattern (#29432)
This PR updates the Danger PR prefix pattern to allow underscores (`_`)
and spaces (` `) in the prefix.

Release Notes:

- N/A
2025-04-25 17:19:56 +00:00
Conrad Irwin
6692bd9f2b Maybe fix panic (#29352)
Since around the time we shipped block diagnostics, we've been seeing an
out of range panic in the editor.

Although the code is heavily inlined, so the stacktrace is missing, this
seems like a likely place that indexing may have gone wrong.

Release Notes:

- Fixed a rare panic in the editor
2025-04-25 11:12:16 -06:00
Smit Barmase
cc57bc7c96 editor: Add setting for snippet sorting behavior for code completion (#29429)
Added `snippet_sort_order`, which determines how snippets are sorted
relative to other completion items. It can have the values `top`,
`bottom`, or `inline`, with `inline` being the default.

This mimics VS Code’s setting:
https://code.visualstudio.com/docs/editing/intellisense#_snippets-in-suggestions

Release Notes:

- Added support for `snippet_sort_order` to control snippet sorting
behavior in code completion menus.
2025-04-25 22:35:12 +05:30
Agus Zubiaga
c157b1c455 rules: How to run clippy (#29247)
Tell model how to run clippy in `.rules`

Release Notes:

- N/A
2025-04-25 13:25:39 -03:00
Ben Kunkle
136e83e0b1 zlog: Fix incorrect assumption with filters (#29428)
- **do not assume logs over LEVEL_ENABLED_MAX_STATIC (the static global
log level) are enabled**
- **make it so filters that are just module names get overridden by
submodule path filters**

Closes #ISSUE

Release Notes:

- N/A
2025-04-25 16:11:58 +00:00
Marshall Bowers
b28756ae3f eval: Use workspace dependencies (#29430)
This PR updates the `eval` crate to use workspace dependencies.

Also did a bit of cleanup of the `Cargo.toml`.

Release Notes:

- N/A
2025-04-25 16:11:26 +00:00
Cole Miller
65401d6d7b debugger: Make debug panes zoomable (#29365)
- [x] Buttons
- [x] Make it keyboard-driven

Co-authored-by: Anthony <anthony@zed.dev>

Release Notes:

- N/A

---------

Co-authored-by: Anthony Eid <hello@anthonyeid.me>
2025-04-25 11:56:16 -04:00
Marshall Bowers
a5405fcbd7 eval: Add support for reading from a .env file (#29426)
This PR adds support for the eval to read environment variables from a
`.env` file located in the `crates/eval` directory.

For instance, you can use it to set your Anthropic API key:

```
ANTHROPIC_API_KEY=<secret>
```

Release Notes:

- N/A
2025-04-25 15:53:02 +00:00
Kirill Bulatov
4f9cadabf7 Bump Danger's packages (#29422)
https://github.com/zed-industries/zed/security/dependabot reports a
number of vulnerabilities, attempt to fix them with the dependency bump.

Release Notes:

- N/A
2025-04-25 15:21:58 +00:00
Marshall Bowers
7443f89a2e xtask: Ignore workspace-hack when checking for non-workspace dependencies (#29419)
This PR makes it so `workspace-hack` is ignored by `cargo xtask
package-conformity` when looking for non-workspace dependencies.

Also added `zed_extension_api` to the exclude list.

Release Notes:

- N/A
2025-04-25 15:01:53 +00:00
Marshall Bowers
9bee765d7f ci: Fix typo (#29421)
This PR fixes a small typo in a comment added in #29420.

Release Notes:

- N/A
2025-04-25 15:00:12 +00:00
Marshall Bowers
8c553ee9f0 ci: Add no-op job for "Run Agent Eval" workflow (#29420)
This PR adds a no-op job for the "Run Agent Eval" workflow.

This aims to avoid marking the check as failed on a PR that does not
include the `run-eval` label.

Release Notes:

- N/A
2025-04-25 10:54:12 -04:00
Oleksiy Syvokon
3389327df5 eval: Add HTML overview for evaluation runs (#29413)
This update generates a single self-contained .html file that shows an
overview of evaluation threads in the browser. It's useful for:

- Quickly reviewing results
- Sharing evaluation runs
- Debugging
- Comparing models (TBD)

Features:

- Export thread JSON from the UI
- Keyboard navigation (j/k or Ctrl + ←/→)
- Toggle between compact and full views

Generating the overview:

- `cargo run -p eval` will write this file in the run dir's root.
- Or you can call `cargo run -p eval --bin explorer` to generate it
without running evals.


Screenshot:

![image](https://github.com/user-attachments/assets/4ead71f6-da08-48ea-8fcb-2148d2e4b4db)


Release Notes:

- N/A
2025-04-25 17:49:05 +03:00
Kirill Bulatov
f106dfca42 Avoid unnecessary DB writes (#29417)
Part of https://github.com/zed-industries/zed/issues/16472

* Adds debug logging to everywhere near INSERT/UPDATEs in the DB

So something like 
`env RUST_LOG=debug,wasmtime_cranelift=off,cranelift_codegen=off,vte=off
cargo run` could be used to view these (current zlog seems to process
the exclusions odd, so not sure this is the optimal RUST_LOG line) can
be used to debug any further writes.

* Removes excessive window stack serialization

Previously, it serialized unconditionally every 100ms.
Now, only if the stack had changed, which is now check every 500ms.

* Removes excessive terminal serialization

Previously, it serialized its `cwd` on every `ItemEvent::UpdateTab`
which was caused by e.g. any character output.
Now, only if the `cwd` has changed at the next event processing time.

Release Notes:

- Fixed more excessive DB writes
2025-04-25 17:41:49 +03:00
Bennet Bo Fenner
37fa437990 agent: Allow to explictly disable tools when using enable_all_context_servers (#29414)
Previously, all MCP tools would be completed regardless if they were
disabled/enabled for the profile. This meant that the "Write" profile
was always using all MCP tools, even if you disabled them in the
settings.

Now, when `enable_all_context_servers` is set to `true`, we will enable
all tools from all MCP servers by default but disable the ones that are
explicitly disabled for the profile.

Also fixes an issue where the tools would not show up as enabled when
using `enable_all_context_servers: true`

Release Notes:

- agent: Fix an issue where MCP tools could not be enabled/disabled
2025-04-25 14:19:04 +00:00
Marshall Bowers
9be7bf72a4 language_models: Remove language-models feature flag (#29416)
This PR removes the `language-models` feature flag.

This feature is already generally available, so we no longer need the
feature flag.

Release Notes:

- N/A
2025-04-25 14:18:48 +00:00
Smit Barmase
357e38b471 workspace: Add right border to pinned tabs only on scroll (#29405)
Before (scrolled state, bug):
<img width="392" alt="before - with scroll"
src="https://github.com/user-attachments/assets/5f62f050-41e0-4740-8f90-9822348eaa4b"
/>

After (scrolled state, bug fixed):
<img width="344" alt="after - with scroll"
src="https://github.com/user-attachments/assets/33003358-0009-4748-8a6e-642158114b82"
/>

Before (without scrolled state, as it is):
<img width="541" alt="before - without scroll"
src="https://github.com/user-attachments/assets/42487d61-7f7c-49a7-a087-da7faf5a0a89"
/>

After (without scrolled state, as it is):
<img width="462" alt="after - without scroll"
src="https://github.com/user-attachments/assets/738a4d24-3a89-466b-8976-2bf47cfeb0f5"
/>

cc @danilo-leal 

Release Notes:

- N/A
2025-04-25 18:46:13 +05:30
Bennet Bo Fenner
ae37f3ca2e agent: Improve MCP tools compatibility with Gemini models (#29411)
Release Notes:

- agent: Improve MCP tools compatibility with Gemini models
2025-04-25 15:14:16 +02:00
Kirill Bulatov
49003d8038 When hovering paths in terminal, search worktree entries for relative ones only (#29406)
Follow-up of https://github.com/zed-industries/zed/pull/29274

Release Notes:

- N/A
2025-04-25 15:34:09 +03:00
Bennet Bo Fenner
93862838bd assistant: Fix issue when using inline assistant with Gemini models (#29407)
Closes #29020

Release Notes:

- assistant: Fix issue when using inline assistant with Gemini models
2025-04-25 12:24:21 +00:00
Julia Ryan
c39adc5242 Select collab channel filter query upon focusing (#29383)
In the process of implementing this I learned that you can also hit
escape to clear the query which is a decent workaround, but I think this
behavior more closely matches expectations. For example when you run the
"focus search" actions, those select the query.

Release Notes:

- N/A
2025-04-25 04:00:25 -07:00
Julia Ryan
ebb39d9231 Add "upstream" as a hardcoded remote name (#29382)
The ideal solution here would be the ability to pick a default remote
the first time you click on a PR or commit link from a blame, and then
store that state in the repo or project and allow you to change it
somehow.

Because that's complicated, and because the vast majority of users
follow the convention of using `upstream` and `origin`, this change just
adds `upstream` as a possible remote that takes precedence for
generating links. I've sometimes seen `origin` and `fork` used for the
same purposes, which will still work fine with this change.

Here are some sources recommending the `upstream`/`origin` convention:
-
https://www.atlassian.com/git/tutorials/comparing-workflows/forking-workflow
-
https://github.blog/open-source/git/git-2-5-including-multiple-worktrees-and-triangular-workflows/
- https://cli.github.com/manual/gh_repo_fork

The fact that the github cli renames them to those when you `gh repo
fork` is pretty strong evidence that it's worth supporting them even if
users can set arbitrary remote names or could actually want to open a PR
link on their fork.

Resolves #13511

Release Notes:

- Git blame links now prefer the `upstream` remote over `origin` if it
exists.
2025-04-25 03:59:38 -07:00
310 changed files with 13964 additions and 8321 deletions

View File

@@ -25,6 +25,15 @@ env:
ZED_EVAL_TELEMETRY: 1
jobs:
# This is a no-op job that we run to prevent GitHub from marking the workflow
# as failed for PRs that don't have the `run-eval` label.
noop:
name: No-op
runs-on: ubuntu-latest
steps:
- name: No-op
run: echo "Nothing to do"
run_eval:
timeout-minutes: 60
name: Run Agent Eval

2
.gitignore vendored
View File

@@ -23,6 +23,7 @@
/crates/theme/schemas/theme.json
/crates/zed/resources/flatpak/flatpak-cargo-sources.json
/dev.zed.Zed*.json
/node_modules/
/plugins/bin
/script/node_modules
/snap
@@ -32,4 +33,5 @@ Packages
xcuserdata/
# Don't commit any secrets to the repo.
.env
.env.secret.toml

5
.rules
View File

@@ -119,3 +119,8 @@ GPUI has had some changes to its APIs. Always write code using the new APIs:
* Use `App` references. This replaces `AppContext` which no longer exists and should NEVER be used.
* Use `Context<T>` references. This replaces `ModelContext<T>` which no longer exists and should NEVER be used.
* `Window` is now passed around explicitly. The new interface adds a `Window` reference parameter to some methods, and adds some new "*_in" methods for plumbing `Window`. The old types `WindowContext` and `ViewContext<T>` should NEVER be used.
## General guidelines
- Use `./script/clippy` instead of `cargo clippy`

142
Cargo.lock generated
View File

@@ -94,6 +94,7 @@ dependencies = [
"parking_lot",
"paths",
"picker",
"postage",
"project",
"prompt_store",
"proto",
@@ -446,6 +447,7 @@ dependencies = [
"anyhow",
"futures 0.3.31",
"gpui",
"shlex",
"smol",
"tempfile",
"util",
@@ -710,31 +712,24 @@ dependencies = [
"collections",
"component",
"editor",
"fs",
"futures 0.3.31",
"gpui",
"gpui_tokio",
"handlebars 4.5.0",
"html_to_markdown",
"http_client",
"indoc",
"itertools 0.14.0",
"language",
"language_model",
"language_models",
"linkme",
"open",
"pretty_assertions",
"project",
"rand 0.8.5",
"regex",
"reqwest_client",
"rust-embed",
"schemars",
"serde",
"serde_json",
"settings",
"smallvec",
"tree-sitter-rust",
"ui",
"unindent",
@@ -742,7 +737,6 @@ dependencies = [
"web_search",
"workspace",
"workspace-hack",
"worktree",
"zed_llm_client",
]
@@ -3165,6 +3159,7 @@ dependencies = [
"go_to_line",
"gpui",
"language",
"log",
"menu",
"picker",
"postage",
@@ -3208,18 +3203,23 @@ dependencies = [
name = "component_preview"
version = "0.1.0"
dependencies = [
"agent",
"anyhow",
"assistant_tool",
"client",
"collections",
"component",
"db",
"gpui",
"languages",
"log",
"notifications",
"project",
"prompt_store",
"serde",
"ui",
"ui_input",
"util",
"workspace",
"workspace-hack",
]
@@ -4553,6 +4553,12 @@ dependencies = [
"syn 2.0.100",
]
[[package]]
name = "dotenv"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
[[package]]
name = "dotenvy"
version = "0.15.7"
@@ -4974,7 +4980,8 @@ dependencies = [
"client",
"collections",
"context_server",
"dirs 5.0.1",
"dirs 4.0.0",
"dotenv",
"env_logger 0.11.8",
"extension",
"fs",
@@ -4988,8 +4995,8 @@ dependencies = [
"language_models",
"languages",
"node_runtime",
"pathdiff",
"paths",
"pretty_assertions",
"project",
"prompt_store",
"regex",
@@ -6354,7 +6361,6 @@ dependencies = [
"log",
"pest",
"pest_derive",
"rust-embed",
"serde",
"serde_json",
"thiserror 1.0.69",
@@ -7109,6 +7115,7 @@ dependencies = [
"editor",
"file_icons",
"gpui",
"log",
"project",
"schemars",
"serde",
@@ -8243,7 +8250,7 @@ dependencies = [
"prost 0.9.0",
"prost-build 0.9.0",
"prost-types 0.9.0",
"reqwest 0.12.8",
"reqwest 0.12.15",
"serde",
"workspace-hack",
]
@@ -10101,7 +10108,7 @@ name = "perplexity"
version = "0.1.0"
dependencies = [
"serde",
"zed_extension_api 0.4.0",
"zed_extension_api 0.5.0",
]
[[package]]
@@ -11112,6 +11119,7 @@ dependencies = [
"paths",
"rope",
"serde",
"serde_json",
"text",
"util",
"uuid",
@@ -12016,8 +12024,8 @@ dependencies = [
[[package]]
name = "reqwest"
version = "0.12.8"
source = "git+https://github.com/zed-industries/reqwest.git?rev=fd110f6998da16bbca97b6dddda9be7827c50e29#fd110f6998da16bbca97b6dddda9be7827c50e29"
version = "0.12.15"
source = "git+https://github.com/zed-industries/reqwest.git?rev=951c770a32f1998d6e999cef3e59e0013e6c4415#951c770a32f1998d6e999cef3e59e0013e6c4415"
dependencies = [
"base64 0.22.1",
"bytes 1.10.1",
@@ -12052,13 +12060,14 @@ dependencies = [
"tokio-rustls 0.26.2",
"tokio-socks",
"tokio-util",
"tower 0.5.2",
"tower-service",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",
"wasm-streams",
"web-sys",
"windows-registry 0.2.0",
"windows-registry 0.4.0",
]
[[package]]
@@ -12073,7 +12082,7 @@ dependencies = [
"http_client_tls",
"log",
"regex",
"reqwest 0.12.8",
"reqwest 0.12.15",
"serde",
"smol",
"tokio",
@@ -14310,6 +14319,7 @@ dependencies = [
"ctor",
"editor",
"env_logger 0.11.8",
"fuzzy",
"gpui",
"language",
"menu",
@@ -14319,6 +14329,7 @@ dependencies = [
"serde",
"serde_json",
"settings",
"smol",
"theme",
"ui",
"util",
@@ -15130,6 +15141,11 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
dependencies = [
"futures-core",
"futures-util",
"pin-project-lite",
"sync_wrapper 1.0.2",
"tokio",
"tower-layer",
"tower-service",
]
@@ -17216,13 +17232,13 @@ dependencies = [
[[package]]
name = "windows-registry"
version = "0.2.0"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0"
checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3"
dependencies = [
"windows-result 0.2.0",
"windows-strings 0.1.0",
"windows-targets 0.52.6",
"windows-result 0.3.2",
"windows-strings 0.3.1",
"windows-targets 0.53.0",
]
[[package]]
@@ -17273,6 +17289,15 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-strings"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.4.0"
@@ -17357,13 +17382,29 @@ dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm",
"windows_i686_gnullvm 0.52.6",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b"
dependencies = [
"windows_aarch64_gnullvm 0.53.0",
"windows_aarch64_msvc 0.53.0",
"windows_i686_gnu 0.53.0",
"windows_i686_gnullvm 0.53.0",
"windows_i686_msvc 0.53.0",
"windows_x86_64_gnu 0.53.0",
"windows_x86_64_gnullvm 0.53.0",
"windows_x86_64_msvc 0.53.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@@ -17382,6 +17423,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
@@ -17400,6 +17447,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
@@ -17418,12 +17471,24 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
@@ -17442,6 +17507,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
@@ -17460,6 +17531,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@@ -17478,6 +17555,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
@@ -17496,6 +17579,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
[[package]]
name = "winnow"
version = "0.7.6"
@@ -18006,6 +18095,7 @@ dependencies = [
"subtle",
"syn 1.0.109",
"syn 2.0.100",
"sync_wrapper 1.0.2",
"thiserror 2.0.12",
"time",
"time-macros",
@@ -18016,6 +18106,7 @@ dependencies = [
"tokio-util",
"toml_datetime",
"toml_edit",
"tower 0.5.2",
"tracing",
"tracing-core",
"tungstenite 0.26.2",
@@ -18401,7 +18492,6 @@ dependencies = [
"collab_ui",
"collections",
"command_palette",
"command_palette_hooks",
"component_preview",
"copilot",
"dap",
@@ -18542,7 +18632,7 @@ dependencies = [
[[package]]
name = "zed_extension_api"
version = "0.4.0"
version = "0.5.0"
dependencies = [
"serde",
"serde_json",
@@ -18602,7 +18692,7 @@ dependencies = [
name = "zed_test_extension"
version = "0.1.0"
dependencies = [
"zed_extension_api 0.4.0",
"zed_extension_api 0.5.0",
]
[[package]]

View File

@@ -39,9 +39,9 @@ members = [
"crates/credentials_provider",
"crates/dap",
"crates/dap_adapters",
"crates/db",
"crates/debugger_tools",
"crates/debugger_ui",
"crates/db",
"crates/deepseek",
"crates/diagnostics",
"crates/docs_preprocessor",
@@ -109,7 +109,6 @@ members = [
"crates/project",
"crates/project_panel",
"crates/project_symbols",
"crates/rules_library",
"crates/prompt_store",
"crates/proto",
"crates/recent_projects",
@@ -123,6 +122,7 @@ members = [
"crates/rich_text",
"crates/rope",
"crates/rpc",
"crates/rules_library",
"crates/schema_generator",
"crates/search",
"crates/semantic_index",
@@ -229,6 +229,7 @@ auto_update_ui = { path = "crates/auto_update_ui" }
aws_http_client = { path = "crates/aws_http_client" }
bedrock = { path = "crates/bedrock" }
breadcrumbs = { path = "crates/breadcrumbs" }
buffer_diff = { path = "crates/buffer_diff" }
call = { path = "crates/call" }
channel = { path = "crates/channel" }
cli = { path = "crates/cli" }
@@ -248,11 +249,10 @@ credentials_provider = { path = "crates/credentials_provider" }
dap = { path = "crates/dap" }
dap_adapters = { path = "crates/dap_adapters" }
db = { path = "crates/db" }
debugger_ui = { path = "crates/debugger_ui" }
debugger_tools = { path = "crates/debugger_tools" }
debugger_ui = { path = "crates/debugger_ui" }
deepseek = { path = "crates/deepseek" }
diagnostics = { path = "crates/diagnostics" }
buffer_diff = { path = "crates/buffer_diff" }
editor = { path = "crates/editor" }
extension = { path = "crates/extension" }
extension_host = { path = "crates/extension_host" }
@@ -296,7 +296,6 @@ livekit_api = { path = "crates/livekit_api" }
livekit_client = { path = "crates/livekit_client" }
lmstudio = { path = "crates/lmstudio" }
lsp = { path = "crates/lsp" }
lsp-types = { git = "https://github.com/zed-industries/lsp-types", rev = "c9c189f1c5dd53c624a419ce35bc77ad6a908d18" }
markdown = { path = "crates/markdown" }
markdown_preview = { path = "crates/markdown_preview" }
media = { path = "crates/media" }
@@ -310,8 +309,8 @@ ollama = { path = "crates/ollama" }
open_ai = { path = "crates/open_ai" }
outline = { path = "crates/outline" }
outline_panel = { path = "crates/outline_panel" }
paths = { path = "crates/paths" }
panel = { path = "crates/panel" }
paths = { path = "crates/paths" }
picker = { path = "crates/picker" }
plugin = { path = "crates/plugin" }
plugin_macros = { path = "crates/plugin_macros" }
@@ -319,7 +318,6 @@ prettier = { path = "crates/prettier" }
project = { path = "crates/project" }
project_panel = { path = "crates/project_panel" }
project_symbols = { path = "crates/project_symbols" }
rules_library = { path = "crates/rules_library" }
prompt_store = { path = "crates/prompt_store" }
proto = { path = "crates/proto" }
recent_projects = { path = "crates/recent_projects" }
@@ -332,6 +330,7 @@ reqwest_client = { path = "crates/reqwest_client" }
rich_text = { path = "crates/rich_text" }
rope = { path = "crates/rope" }
rpc = { path = "crates/rpc" }
rules_library = { path = "crates/rules_library" }
search = { path = "crates/search" }
semantic_index = { path = "crates/semantic_index" }
semantic_version = { path = "crates/semantic_version" }
@@ -418,7 +417,6 @@ bitflags = "2.6.0"
blade-graphics = { git = "https://github.com/kvark/blade", rev = "b16f5c7bd873c7126f48c82c39e7ae64602ae74f" }
blade-macros = { git = "https://github.com/kvark/blade", rev = "b16f5c7bd873c7126f48c82c39e7ae64602ae74f" }
blade-util = { git = "https://github.com/kvark/blade", rev = "b16f5c7bd873c7126f48c82c39e7ae64602ae74f" }
naga = { version = "23.1.0", features = ["wgsl-in"] }
blake3 = "1.5.3"
bytes = "1.0"
cargo_metadata = "0.19"
@@ -428,15 +426,16 @@ circular-buffer = "1.0"
clap = { version = "4.4", features = ["derive"] }
cocoa = "0.26"
cocoa-foundation = "0.2.0"
core-video = { version = "0.4.3", features = ["metal"] }
convert_case = "0.8.0"
core-foundation = "0.10.0"
core-foundation-sys = "0.8.6"
core-video = { version = "0.4.3", features = ["metal"] }
ctor = "0.4.0"
dashmap = "6.0"
dap-types = { git = "https://github.com/zed-industries/dap-types", rev = "be69a016ba710191b9fdded28c8b042af4b617f7" }
dashmap = "6.0"
derive_more = "0.99.17"
dirs = "4.0"
dotenv = "0.15.0"
ec4rs = "1.1"
emojis = "0.6.1"
env_logger = "0.11"
@@ -453,8 +452,8 @@ heck = "0.5"
heed = { version = "0.21.0", features = ["read-txn-no-tls"] }
hex = "0.4.3"
html5ever = "0.27.0"
hyper = "0.14"
http = "1.1"
hyper = "0.14"
ignore = "0.4.22"
image = "0.25.1"
imara-diff = "0.1.8"
@@ -470,24 +469,27 @@ libsqlite3-sys = { version = "0.30.1", features = ["bundled"] }
linkify = "0.10.0"
linkme = "0.3.31"
log = { version = "0.4.16", features = ["kv_unstable_serde", "serde"] }
lsp-types = { git = "https://github.com/zed-industries/lsp-types", rev = "c9c189f1c5dd53c624a419ce35bc77ad6a908d18" }
markup5ever_rcdom = "0.3.0"
metal = "0.29"
mlua = { version = "0.10", features = ["lua54", "vendored", "async", "send"] }
naga = { version = "23.1.0", features = ["wgsl-in"] }
nanoid = "0.4"
nbformat = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734" }
nix = "0.29"
num-format = "0.4.4"
objc = "0.2"
open = "5.0.0"
num-format = "0.4.4"
ordered-float = "2.1.1"
palette = { version = "0.7.5", default-features = false, features = ["std"] }
parking_lot = "0.12.1"
partial-json-fixer = "0.5.3"
pathdiff = "0.2"
pet = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-conda = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-core = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-poetry = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
pet-reporter = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
postage = { version = "0.5", features = ["futures-traits"] }
@@ -500,11 +502,11 @@ prost-types = "0.9"
pulldown-cmark = { version = "0.12.0", default-features = false }
quote = "1.0.9"
rand = "0.8.5"
ref-cast = "1.0.24"
rayon = "1.8"
ref-cast = "1.0.24"
regex = "1.5"
repair_json = "0.1.0"
reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "fd110f6998da16bbca97b6dddda9be7827c50e29", default-features = false, features = [
reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "951c770a32f1998d6e999cef3e59e0013e6c4415", default-features = false, features = [
"charset",
"http2",
"macos-system-configuration",
@@ -516,8 +518,8 @@ rsa = "0.9.6"
runtimelib = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734", default-features = false, features = [
"async-dispatcher-runtime",
] }
rustc-demangle = "0.1.23"
rust-embed = { version = "8.4", features = ["include-exclude"] }
rustc-demangle = "0.1.23"
rustc-hash = "2.1.0"
rustls = { version = "0.23.26" }
rustls-platform-verifier = "0.5.0"
@@ -559,15 +561,16 @@ time = { version = "0.3", features = [
"formatting",
] }
tiny_http = "0.8"
toml = "0.8"
tokio = { version = "1" }
tokio-tungstenite = { version = "0.26", features = ["__rustls-tls"] }
toml = "0.8"
tower-http = "0.4.4"
tree-sitter = { version = "0.25.3", features = ["wasm"] }
tree-sitter-bash = "0.23"
tree-sitter-c = "0.23"
tree-sitter-cpp = "0.23"
tree-sitter-css = "0.23"
tree-sitter-diff = "0.1.0"
tree-sitter-elixir = "0.3"
tree-sitter-embedded-template = "0.23.0"
tree-sitter-gitcommit = { git = "https://github.com/zed-industries/tree-sitter-git-commit", rev = "88309716a69dd13ab83443721ba6e0b491d37ee9" }
@@ -575,7 +578,6 @@ tree-sitter-go = "0.23"
tree-sitter-go-mod = { git = "https://github.com/camdencheek/tree-sitter-go-mod", rev = "6efb59652d30e0e9cd5f3b3a669afd6f1a926d3c", package = "tree-sitter-gomod" }
tree-sitter-gowork = { git = "https://github.com/zed-industries/tree-sitter-go-work", rev = "acb0617bf7f4fda02c6217676cc64acb89536dc7" }
tree-sitter-heex = { git = "https://github.com/zed-industries/tree-sitter-heex", rev = "1dd45142fbb05562e35b2040c6129c9bca346592" }
tree-sitter-diff = "0.1.0"
tree-sitter-html = "0.23"
tree-sitter-jsdoc = "0.23"
tree-sitter-json = "0.24"
@@ -587,15 +589,15 @@ tree-sitter-rust = "0.24"
tree-sitter-typescript = "0.23"
tree-sitter-yaml = { git = "https://github.com/zed-industries/tree-sitter-yaml", rev = "baff0b51c64ef6a1fb1f8390f3ad6015b83ec13a" }
unicase = "2.6"
unindent = "0.2.0"
unicode-segmentation = "1.10"
unicode-script = "0.5.7"
unicode-segmentation = "1.10"
unindent = "0.2.0"
url = "2.2"
urlencoding = "2.1.2"
uuid = { version = "1.1.2", features = ["v4", "v5", "v7", "serde"] }
walkdir = "2.3"
wasmparser = "0.221"
wasm-encoder = "0.221"
wasmparser = "0.221"
wasmtime = { version = "29", default-features = false, features = [
"async",
"demangle",
@@ -609,7 +611,6 @@ wit-component = "0.221"
workspace-hack = "0.1.0"
zed_llm_client = "0.7.1"
zstd = "0.11"
metal = "0.29"
[workspace.dependencies.async-stripe]
git = "https://github.com/zed-industries/async-stripe"

View File

@@ -50,6 +50,12 @@
"] -": "vim::NextLesserIndent",
"] +": "vim::NextGreaterIndent",
"] =": "vim::NextSameIndent",
"] b": "pane::ActivateNextItem",
"[ b": "pane::ActivatePreviousItem",
"] shift-b": "pane::ActivateLastItem",
"[ shift-b": ["pane::ActivateItem", 0],
"] space": "vim::InsertEmptyLineBelow",
"[ space": "vim::InsertEmptyLineAbove",
// Word motions
"w": "vim::NextWordStart",
"e": "vim::NextWordEnd",
@@ -108,7 +114,11 @@
"ctrl-e": "vim::LineDown",
"ctrl-y": "vim::LineUp",
// "g" commands
"g r": "vim::PushReplaceWithRegister",
"g shift-r": "vim::PushReplaceWithRegister",
"g r n": "editor::Rename",
"g r r": "editor::FindAllReferences",
"g r i": "editor::GoToImplementation",
"g r a": "editor::ToggleCodeActions",
"g g": "vim::StartOfDocument",
"g h": "editor::Hover",
"g t": "pane::ActivateNextItem",
@@ -127,6 +137,7 @@
"g <": ["editor::SelectPrevious", { "replace_newest": true }],
"g a": "editor::SelectAllMatches",
"g s": "outline::Toggle",
"g shift-o": "outline::Toggle",
"g shift-s": "project_symbols::Toggle",
"g .": "editor::ToggleCodeActions", // zed specific
"g shift-a": "editor::FindAllReferences", // zed specific
@@ -305,7 +316,7 @@
"!": "vim::ShellCommand",
"i": ["vim::PushObject", { "around": false }],
"a": ["vim::PushObject", { "around": true }],
"g r": ["vim::Paste", { "preserve_clipboard": true }],
"g shift-r": ["vim::Paste", { "preserve_clipboard": true }],
"g c": "vim::ToggleComments",
"g q": "vim::Rewrap",
"g ?": "vim::ConvertToRot13",
@@ -339,7 +350,8 @@
"ctrl-shift-q": ["vim::PushLiteral", {}],
"ctrl-r": "vim::PushRegister",
"insert": "vim::ToggleReplace",
"ctrl-o": "vim::TemporaryNormal"
"ctrl-o": "vim::TemporaryNormal",
"ctrl-s": "editor::ShowSignatureHelp"
}
},
{
@@ -630,9 +642,10 @@
}
},
{
"context": "vim_operator == gr",
"context": "vim_operator == gR",
"bindings": {
"r": "vim::CurrentLine"
"r": "vim::CurrentLine",
"shift-r": "vim::CurrentLine"
}
},
{
@@ -693,7 +706,7 @@
}
},
{
"context": "GitPanel || ProjectPanel || CollabPanel || OutlinePanel || ChatPanel || VimControl || EmptyPane || SharedScreen || MarkdownPreview || KeyContextView",
"context": "GitPanel || ProjectPanel || CollabPanel || OutlinePanel || ChatPanel || VimControl || EmptyPane || SharedScreen || MarkdownPreview || KeyContextView || DebugPanel",
"bindings": {
// window related commands (ctrl-w X)
"ctrl-w": null,

View File

@@ -27,13 +27,28 @@ If appropriate, use tool calls to explore the current project, which contains th
- `{{root_name}}`
{{/each}}
- Bias towards not asking the user for help if you can find the answer yourself.
- When providing paths to tools, the path should always begin with a path that starts with a project root directory listed above.
- Before you read or edit a file, you must first find the full path. DO NOT ever guess a file path!
{{# if (has_tool 'grep') }}
- When looking for symbols in the project, prefer the `grep` tool.
- As you learn about the structure of the project, use that information to scope `grep` searches to targeted subtrees of the project.
- Bias towards not asking the user for help if you can find the answer yourself.
{{! TODO: Only mention tools if they are enabled }}
- The user might specify a partial file path. If you don't know the full path, use `find_path` (not `grep`) before you read the file.
- Before you read or edit a file, you must first find the full path. DO NOT ever guess a file path!
{{/if}}
## Code Block Formatting
Whenever you mention a code block, you MUST use ONLY use the following format when the code in the block comes from a file
in the project:
```path/to/Something.blah#L123-456
(code goes here)
```
The `#L123-456` means the line number range 123 through 456, and the path/to/Something.blah
is a path in the project. (If this code block does not come from a file in the project, then you may instead use
the normal markdown style of three backticks followed by language name. However, you MUST use this format if
the code in the block comes from a file in the project.)
## Fixing Diagnostics

View File

@@ -167,7 +167,23 @@
// Default: not set, defaults to "bar"
"cursor_shape": null,
// Determines when the mouse cursor should be hidden in an editor or input box.
//
// 1. Never hide the mouse cursor:
// "never"
// 2. Hide only when typing:
// "on_typing"
// 3. Hide on both typing and cursor movement:
// "on_typing_and_movement"
"hide_mouse": "on_typing_and_movement",
// Determines how snippets are sorted relative to other completion items.
//
// 1. Place snippets at the top of the completion list:
// "top"
// 2. Place snippets normally without any preference:
// "inline"
// 3. Place snippets at the bottom of the completion list:
// "bottom"
"snippet_sort_order": "inline",
// How to highlight the current line in the editor.
//
// 1. Don't highlight the current line:
@@ -210,7 +226,7 @@
// Hide the values of in variables from visual display in private files
"redact_private_values": false,
// The default number of lines to expand excerpts in the multibuffer by.
"expand_excerpt_lines": 3,
"expand_excerpt_lines": 5,
// Globs to match against file paths to determine if a file is private.
"private_files": ["**/.env*", "**/*.pem", "**/*.key", "**/*.cert", "**/*.crt", "**/secrets.yml"],
// Whether to use additional LSP queries to format (and amend) the code after
@@ -585,6 +601,13 @@
//
// Default: main
"fallback_branch_name": "main",
// Whether to sort entries in the panel by path
// or by status (the default).
//
// Default: false
"sort_by_path": false,
"scrollbar": {
// When to show the scrollbar in the git panel.
//

View File

@@ -60,6 +60,7 @@ ordered-float.workspace = true
parking_lot.workspace = true
paths.workspace = true
picker.workspace = true
postage.workspace = true
project.workspace = true
rules_library.workspace = true
prompt_store.workspace = true

View File

@@ -1,4 +1,4 @@
use crate::context::{AgentContext, RULES_ICON};
use crate::context::{AgentContextHandle, RULES_ICON};
use crate::context_picker::MentionLink;
use crate::thread::{
LastRestoreCheckpoint, MessageId, MessageSegment, Thread, ThreadError, ThreadEvent,
@@ -25,12 +25,12 @@ use gpui::{
};
use language::{Buffer, LanguageRegistry};
use language_model::{
LanguageModelRegistry, LanguageModelRequestMessage, LanguageModelToolUseId, MessageContent,
RequestUsage, Role, StopReason,
LanguageModelRequestMessage, LanguageModelToolUseId, MessageContent, RequestUsage, Role,
StopReason,
};
use markdown::parser::{CodeBlockKind, CodeBlockMetadata};
use markdown::{HeadingLevelStyles, Markdown, MarkdownElement, MarkdownStyle, ParsedMarkdown};
use project::ProjectItem as _;
use project::{ProjectEntryId, ProjectItem as _};
use rope::Point;
use settings::{Settings as _, update_settings_file};
use std::path::Path;
@@ -43,8 +43,8 @@ use ui::{
Disclosure, IconButton, KeyBinding, Scrollbar, ScrollbarState, TextSize, Tooltip, prelude::*,
};
use util::ResultExt as _;
use util::markdown::MarkdownString;
use workspace::{OpenOptions, Workspace};
use util::markdown::MarkdownCodeBlock;
use workspace::Workspace;
use zed_actions::assistant::OpenRulesLibrary;
pub struct ActiveThread {
@@ -483,7 +483,7 @@ fn render_markdown_code_block(
.expanded_code_blocks
.get(&(message_id, ix))
.copied()
.unwrap_or(false);
.unwrap_or(true);
let codeblock_header_bg = cx
.theme()
@@ -504,51 +504,47 @@ fn render_markdown_code_block(
.children(label)
.child(
h_flex()
.visible_on_hover("codeblock_container")
.gap_1()
.child(
div().visible_on_hover("codeblock_container").child(
IconButton::new(
("copy-markdown-code", ix),
if codeblock_was_copied {
IconName::Check
} else {
IconName::Copy
},
)
.icon_color(Color::Muted)
.shape(ui::IconButtonShape::Square)
.tooltip(Tooltip::text("Copy Code"))
.on_click({
let active_thread = active_thread.clone();
let parsed_markdown = parsed_markdown.clone();
let code_block_range = metadata.content_range.clone();
move |_event, _window, cx| {
active_thread.update(cx, |this, cx| {
this.copied_code_block_ids.insert((message_id, ix));
IconButton::new(
("copy-markdown-code", ix),
if codeblock_was_copied {
IconName::Check
} else {
IconName::Copy
},
)
.icon_color(Color::Muted)
.shape(ui::IconButtonShape::Square)
.tooltip(Tooltip::text("Copy Code"))
.on_click({
let active_thread = active_thread.clone();
let parsed_markdown = parsed_markdown.clone();
let code_block_range = metadata.content_range.clone();
move |_event, _window, cx| {
active_thread.update(cx, |this, cx| {
this.copied_code_block_ids.insert((message_id, ix));
let code = parsed_markdown.source()[code_block_range.clone()]
.to_string();
cx.write_to_clipboard(ClipboardItem::new_string(code));
let code =
parsed_markdown.source()[code_block_range.clone()].to_string();
cx.write_to_clipboard(ClipboardItem::new_string(code));
cx.spawn(async move |this, cx| {
cx.background_executor()
.timer(Duration::from_secs(2))
.await;
cx.spawn(async move |this, cx| {
cx.background_executor().timer(Duration::from_secs(2)).await;
cx.update(|cx| {
this.update(cx, |this, cx| {
this.copied_code_block_ids
.remove(&(message_id, ix));
cx.notify();
})
cx.update(|cx| {
this.update(cx, |this, cx| {
this.copied_code_block_ids.remove(&(message_id, ix));
cx.notify();
})
.ok();
})
.detach();
});
}
}),
),
.ok();
})
.detach();
});
}
}),
)
.when(
metadata.line_count > MAX_UNCOLLAPSED_LINES_IN_CODE_BLOCK,
@@ -576,7 +572,7 @@ fn render_markdown_code_block(
let is_expanded = this
.expanded_code_blocks
.entry((message_id, ix))
.or_insert(false);
.or_insert(true);
*is_expanded = !*is_expanded;
cx.notify();
});
@@ -886,7 +882,11 @@ impl ActiveThread {
});
rendered.input.update(cx, |this, cx| {
this.replace(
MarkdownString::code_block("json", tool_input).to_string(),
MarkdownCodeBlock {
tag: "json",
text: tool_input,
}
.to_string(),
cx,
);
});
@@ -903,6 +903,11 @@ impl ActiveThread {
cx: &mut Context<Self>,
) {
match event {
ThreadEvent::CancelEditing => {
if self.editing_message.is_some() {
self.cancel_editing_message(&menu::Cancel, window, cx);
}
}
ThreadEvent::ShowError(error) => {
self.last_error = Some(error.clone());
}
@@ -1247,7 +1252,7 @@ impl ActiveThread {
cx.emit(ActiveThreadEvent::EditingMessageTokenCountChanged);
state._update_token_count_task.take();
let Some(default_model) = LanguageModelRegistry::read_global(cx).default_model() else {
let Some(configured_model) = self.thread.read(cx).configured_model() else {
state.last_estimated_token_count.take();
return;
};
@@ -1293,13 +1298,14 @@ impl ActiveThread {
let request = language_model::LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![request_message],
tools: vec![],
stop: vec![],
temperature: None,
};
Some(default_model.model.count_tokens(request, cx))
Some(configured_model.model.count_tokens(request, cx))
})? {
task.await?
} else {
@@ -1332,7 +1338,7 @@ impl ActiveThread {
return;
};
let edited_text = state.editor.read(cx).text(cx);
self.thread.update(cx, |thread, cx| {
let thread_model = self.thread.update(cx, |thread, cx| {
thread.edit_message(
message_id,
Role::User,
@@ -1342,9 +1348,10 @@ impl ActiveThread {
for message_id in self.messages_after(message_id) {
thread.delete_message(*message_id, cx);
}
thread.get_or_init_configured_model(cx)
});
let Some(model) = LanguageModelRegistry::read_global(cx).default_model() else {
let Some(model) = thread_model else {
return;
};
@@ -1490,19 +1497,13 @@ impl ActiveThread {
let workspace = self.workspace.clone();
let thread = self.thread.read(cx);
let prompt_store = self.thread_store.read(cx).prompt_store().as_ref();
// Get all the data we need from thread before we start using it in closures
let checkpoint = thread.checkpoint_for_message(message_id);
let added_context = if let Some(workspace) = workspace.upgrade() {
let project = workspace.read(cx).project().read(cx);
thread
.context_for_message(message_id)
.flat_map(|context| AddedContext::new(context.clone(), prompt_store, project, cx))
.collect::<Vec<_>>()
} else {
return Empty.into_any();
};
let added_context = thread
.context_for_message(message_id)
.map(|context| AddedContext::new_attached(context, cx))
.collect::<Vec<_>>();
let tool_uses = thread.tool_uses_for_message(message_id, cx);
let has_tool_uses = !tool_uses.is_empty();
@@ -1513,8 +1514,6 @@ impl ActiveThread {
let show_feedback = thread.is_turn_end(ix);
let needs_confirmation = tool_uses.iter().any(|tool_use| tool_use.needs_confirmation);
let generating_label = (is_generating && is_last_message)
.then(|| AnimatedLabel::new("Generating").size(LabelSize::Small));
@@ -1540,23 +1539,25 @@ impl ActiveThread {
const RESPONSE_PADDING_X: Pixels = px(18.);
let feedback_container = h_flex()
.group("feedback_container")
.py_2()
.px(RESPONSE_PADDING_X)
.gap_1()
.flex_wrap()
.justify_between();
.justify_end();
let feedback_items = match self.thread.read(cx).message_feedback(message_id) {
Some(feedback) => feedback_container
.child(
Label::new(match feedback {
ThreadFeedback::Positive => "Thanks for your feedback!",
ThreadFeedback::Negative => {
"We appreciate your feedback and will use it to improve."
}
})
div().mr_1().visible_on_hover("feedback_container").child(
Label::new(match feedback {
ThreadFeedback::Positive => "Thanks for your feedback!",
ThreadFeedback::Negative => {
"We appreciate your feedback and will use it to improve."
}
})
.color(Color::Muted)
.size(LabelSize::XSmall)
.truncate(),
.truncate())
)
.child(
h_flex()
@@ -1603,12 +1604,13 @@ impl ActiveThread {
.into_any_element(),
None => feedback_container
.child(
Label::new(
"Rating the thread sends all of your current conversation to the Zed team.",
)
.color(Color::Muted)
div().mr_1().visible_on_hover("feedback_container").child(
Label::new(
"Rating the thread sends all of your current conversation to the Zed team.",
)
.color(Color::Muted)
.size(LabelSize::XSmall)
.truncate(),
.truncate())
)
.child(
h_flex()
@@ -1654,6 +1656,7 @@ impl ActiveThread {
let message_content = has_content.then(|| {
v_flex()
.w_full()
.gap_1()
.when(!message_is_empty, |parent| {
parent.child(
@@ -1677,6 +1680,8 @@ impl ActiveThread {
.on_action(cx.listener(Self::cancel_editing_message))
.on_action(cx.listener(Self::confirm_editing_message))
.min_h_6()
.flex_grow()
.w_full()
.child(EditorElement::new(
&edit_message_editor,
EditorStyle {
@@ -1706,7 +1711,7 @@ impl ActiveThread {
.when(!added_context.is_empty(), |parent| {
parent.child(h_flex().flex_wrap().gap_1().children(
added_context.into_iter().map(|added_context| {
let context = added_context.context.clone();
let context = added_context.handle.clone();
ContextPill::added(added_context, false, false, None).on_click(Rc::new(
cx.listener({
let workspace = workspace.clone();
@@ -1726,13 +1731,7 @@ impl ActiveThread {
let styled_message = match message.role {
Role::User => v_flex()
.id(("message-container", ix))
.map(|this| {
if is_first_message {
this.pt_2()
} else {
this.pt_4()
}
})
.pt_2()
.pl_2()
.pr_2p5()
.pb_4()
@@ -1746,48 +1745,62 @@ impl ActiveThread {
.border_color(colors.border)
.hover(|hover| hover.border_color(colors.text_accent.opacity(0.5)))
.cursor_pointer()
.child(div().p_2().pt_2p5().children(message_content))
.when_some(edit_message_editor.clone(), |this, edit_editor| {
let focus_handle = edit_editor.focus_handle(cx);
this.child(
h_flex()
.p_1()
.border_t_1()
.border_color(colors.border_variant)
.gap_1()
.justify_end()
.child(
Button::new("cancel-edit-message", "Cancel")
.label_size(LabelSize::Small)
.key_binding(
KeyBinding::for_action_in(
&menu::Cancel,
&focus_handle,
window,
cx,
.child(
h_flex()
.p_2()
.pt_3()
.gap_1()
.children(message_content)
.when_some(edit_message_editor.clone(), |this, edit_editor| {
let edit_editor_clone = edit_editor.clone();
this.w_full().justify_between().child(
h_flex()
.gap_0p5()
.child(
IconButton::new(
"cancel-edit-message",
IconName::Close,
)
.map(|kb| kb.size(rems_from_px(12.))),
.shape(ui::IconButtonShape::Square)
.icon_color(Color::Error)
.tooltip(move |window, cx| {
let focus_handle =
edit_editor_clone.focus_handle(cx);
Tooltip::for_action_in(
"Cancel Edit",
&menu::Cancel,
&focus_handle,
window,
cx,
)
})
.on_click(cx.listener(Self::handle_cancel_click)),
)
.on_click(cx.listener(Self::handle_cancel_click)),
.child(
IconButton::new(
"confirm-edit-message",
IconName::Check,
)
.disabled(edit_editor.read(cx).is_empty(cx))
.shape(ui::IconButtonShape::Square)
.icon_color(Color::Success)
.tooltip(move |window, cx| {
let focus_handle = edit_editor.focus_handle(cx);
Tooltip::for_action_in(
"Regenerate",
&menu::Confirm,
&focus_handle,
window,
cx,
)
})
.on_click(
cx.listener(Self::handle_regenerate_click),
),
),
)
.child(
Button::new("confirm-edit-message", "Regenerate")
.disabled(edit_editor.read(cx).is_empty(cx))
.label_size(LabelSize::Small)
.key_binding(
KeyBinding::for_action_in(
&menu::Confirm,
&focus_handle,
window,
cx,
)
.map(|kb| kb.size(rems_from_px(12.))),
)
.on_click(cx.listener(Self::handle_regenerate_click)),
),
)
})
}),
)
.when(edit_message_editor.is_none(), |this| {
this.tooltip(Tooltip::text("Click To Edit"))
})
@@ -1905,7 +1918,7 @@ impl ActiveThread {
parent.child(self.render_rules_item(cx))
})
.child(styled_message)
.when(!needs_confirmation && generating_label.is_some(), |this| {
.when(generating_label.is_some(), |this| {
this.child(
h_flex()
.h_8()
@@ -2113,13 +2126,15 @@ impl ActiveThread {
}),
transform: Some(Arc::new({
let active_thread = cx.entity();
let editor_bg = cx.theme().colors().editor_background;
move |el, range, metadata, _, cx| {
let is_expanded = active_thread
.read(cx)
.expanded_code_blocks
.get(&(message_id, range.start))
.copied()
.unwrap_or(false);
.unwrap_or(true);
if is_expanded
|| metadata.line_count
@@ -2135,19 +2150,11 @@ impl ActiveThread {
.w_full()
.h_1_4()
.rounded_b_lg()
.bg(gpui::linear_gradient(
.bg(linear_gradient(
0.,
gpui::linear_color_stop(
cx.theme()
.colors()
.editor_background,
0.,
),
gpui::linear_color_stop(
cx.theme()
.colors()
.editor_background
.opacity(0.),
linear_color_stop(editor_bg, 0.),
linear_color_stop(
editor_bg.opacity(0.),
1.,
),
)),
@@ -3049,21 +3056,30 @@ impl ActiveThread {
return;
};
let abs_paths = project_context
let project_entry_ids = project_context
.worktrees
.iter()
.flat_map(|worktree| worktree.rules_file.as_ref())
.map(|rules_file| rules_file.abs_path.to_path_buf())
.map(|rules_file| ProjectEntryId::from_usize(rules_file.project_entry_id))
.collect::<Vec<_>>();
if let Ok(task) = self.workspace.update(cx, move |workspace, cx| {
// TODO: Open a multibuffer instead? In some cases this doesn't make the set of rules
// files clear. For example, if rules file 1 is already open but rules file 2 is not,
// this would open and focus rules file 2 in a tab that is not next to rules file 1.
workspace.open_paths(abs_paths, OpenOptions::default(), None, window, cx)
}) {
task.detach();
}
self.workspace
.update(cx, move |workspace, cx| {
// TODO: Open a multibuffer instead? In some cases this doesn't make the set of rules
// files clear. For example, if rules file 1 is already open but rules file 2 is not,
// this would open and focus rules file 2 in a tab that is not next to rules file 1.
let project = workspace.project().read(cx);
let project_paths = project_entry_ids
.into_iter()
.flat_map(|entry_id| project.path_for_entry(entry_id, cx))
.collect::<Vec<_>>();
for project_path in project_paths {
workspace
.open_path(project_path, None, true, window, cx)
.detach_and_log_err(cx);
}
})
.ok();
}
fn dismiss_notifications(&mut self, cx: &mut Context<ActiveThread>) {
@@ -3170,13 +3186,13 @@ impl Render for ActiveThread {
}
pub(crate) fn open_context(
context: &AgentContext,
context: &AgentContextHandle,
workspace: Entity<Workspace>,
window: &mut Window,
cx: &mut App,
) {
match context {
AgentContext::File(file_context) => {
AgentContextHandle::File(file_context) => {
if let Some(project_path) = file_context.project_path(cx) {
workspace.update(cx, |workspace, cx| {
workspace
@@ -3186,7 +3202,7 @@ pub(crate) fn open_context(
}
}
AgentContext::Directory(directory_context) => {
AgentContextHandle::Directory(directory_context) => {
let entry_id = directory_context.entry_id;
workspace.update(cx, |workspace, cx| {
workspace.project().update(cx, |_project, cx| {
@@ -3195,7 +3211,7 @@ pub(crate) fn open_context(
})
}
AgentContext::Symbol(symbol_context) => {
AgentContextHandle::Symbol(symbol_context) => {
let buffer = symbol_context.buffer.read(cx);
if let Some(project_path) = buffer.project_path(cx) {
let snapshot = buffer.snapshot();
@@ -3205,7 +3221,7 @@ pub(crate) fn open_context(
}
}
AgentContext::Selection(selection_context) => {
AgentContextHandle::Selection(selection_context) => {
let buffer = selection_context.buffer.read(cx);
if let Some(project_path) = buffer.project_path(cx) {
let snapshot = buffer.snapshot();
@@ -3216,11 +3232,11 @@ pub(crate) fn open_context(
}
}
AgentContext::FetchedUrl(fetched_url_context) => {
AgentContextHandle::FetchedUrl(fetched_url_context) => {
cx.open_url(&fetched_url_context.url);
}
AgentContext::Thread(thread_context) => workspace.update(cx, |workspace, cx| {
AgentContextHandle::Thread(thread_context) => workspace.update(cx, |workspace, cx| {
if let Some(panel) = workspace.panel::<AssistantPanel>(cx) {
panel.update(cx, |panel, cx| {
let thread_id = thread_context.thread.read(cx).id().clone();
@@ -3231,14 +3247,14 @@ pub(crate) fn open_context(
}
}),
AgentContext::Rules(rules_context) => window.dispatch_action(
AgentContextHandle::Rules(rules_context) => window.dispatch_action(
Box::new(OpenRulesLibrary {
prompt_to_select: Some(rules_context.prompt_id.0),
}),
cx,
),
AgentContext::Image(_) => {}
AgentContextHandle::Image(_) => {}
}
}

View File

@@ -597,6 +597,10 @@ impl Item for AgentDiff {
editor.added_to_workspace(workspace, window, cx)
});
}
fn tab_content_text(&self, _detail: usize, _cx: &App) -> SharedString {
"Agent Diff".into()
}
}
impl Render for AgentDiff {
@@ -947,6 +951,7 @@ mod tests {
ThemeSettings::register(cx);
ContextServerSettings::register(cx);
EditorSettings::register(cx);
language_model::init_settings(cx);
});
let fs = FakeFs::new(cx.executor());
@@ -968,8 +973,8 @@ mod tests {
ThreadStore::load(
project.clone(),
cx.new(|_| ToolWorkingSet::default()),
prompt_store,
Arc::new(PromptBuilder::new(None).unwrap()),
prompt_store,
cx,
)
})

View File

@@ -34,7 +34,7 @@ use prompt_store::PromptBuilder;
use schemars::JsonSchema;
use serde::Deserialize;
use settings::Settings as _;
use thread::ThreadId;
pub use thread::{MessageSegment, ThreadId};
pub use crate::active_thread::ActiveThread;
use crate::assistant_configuration::{AddContextServerModal, ManageProfilesModal};
@@ -42,7 +42,7 @@ pub use crate::assistant_panel::{AssistantPanel, ConcreteAssistantPanelDelegate}
pub use crate::context::{ContextLoadResult, LoadedContext};
pub use crate::inline_assistant::InlineAssistant;
pub use crate::thread::{Message, Thread, ThreadEvent};
pub use crate::thread_store::ThreadStore;
pub use crate::thread_store::{SharedProjectContext, ThreadStore};
pub use agent_diff::{AgentDiff, AgentDiffToolbar};
actions!(

View File

@@ -272,7 +272,7 @@ impl PickerDelegate for ToolPickerDelegate {
.get(id.as_ref())
.and_then(|preset| preset.tools.get(&tool.name))
.copied()
.unwrap_or(false),
.unwrap_or(self.profile.enable_all_context_servers),
};
Some(

View File

@@ -2,6 +2,8 @@ use assistant_settings::AssistantSettings;
use fs::Fs;
use gpui::{Entity, FocusHandle, SharedString};
use crate::Thread;
use language_model::{ConfiguredModel, LanguageModelRegistry};
use language_model_selector::{
LanguageModelSelector, LanguageModelSelectorPopoverMenu, ToggleModelSelector,
};
@@ -9,7 +11,11 @@ use settings::update_settings_file;
use std::sync::Arc;
use ui::{ButtonLike, PopoverMenuHandle, Tooltip, prelude::*};
pub use language_model_selector::ModelType;
#[derive(Clone)]
pub enum ModelType {
Default(Entity<Thread>),
InlineAssistant,
}
pub struct AssistantModelSelector {
selector: Entity<LanguageModelSelector>,
@@ -24,18 +30,39 @@ impl AssistantModelSelector {
focus_handle: FocusHandle,
model_type: ModelType,
window: &mut Window,
cx: &mut App,
cx: &mut Context<Self>,
) -> Self {
Self {
selector: cx.new(|cx| {
selector: cx.new(move |cx| {
let fs = fs.clone();
LanguageModelSelector::new(
{
let model_type = model_type.clone();
move |cx| match &model_type {
ModelType::Default(thread) => thread.read(cx).configured_model(),
ModelType::InlineAssistant => {
LanguageModelRegistry::read_global(cx).inline_assistant_model()
}
}
},
move |model, cx| {
let provider = model.provider_id().0.to_string();
let model_id = model.id().0.to_string();
match model_type {
ModelType::Default => {
match &model_type {
ModelType::Default(thread) => {
thread.update(cx, |thread, cx| {
let registry = LanguageModelRegistry::read_global(cx);
if let Some(provider) = registry.provider(&model.provider_id())
{
thread.set_configured_model(
Some(ConfiguredModel {
provider,
model: model.clone(),
}),
cx,
);
}
});
update_settings_file::<AssistantSettings>(
fs.clone(),
cx,
@@ -58,7 +85,6 @@ impl AssistantModelSelector {
}
}
},
model_type,
window,
cx,
)

View File

@@ -5,8 +5,9 @@ use std::time::Duration;
use anyhow::{Result, anyhow};
use assistant_context_editor::{
AssistantPanelDelegate, ConfigurationError, ContextEditor, SlashCommandCompletionProvider,
humanize_token_count, make_lsp_adapter_delegate, render_remaining_tokens,
AssistantContext, AssistantPanelDelegate, ConfigurationError, ContextEditor, ContextEvent,
SlashCommandCompletionProvider, humanize_token_count, make_lsp_adapter_delegate,
render_remaining_tokens,
};
use assistant_settings::{AssistantDockPosition, AssistantSettings};
use assistant_slash_command::SlashCommandWorkingSet;
@@ -116,6 +117,8 @@ enum ActiveView {
},
PromptEditor {
context_editor: Entity<ContextEditor>,
title_editor: Entity<Editor>,
_subscriptions: Vec<gpui::Subscription>,
},
History,
Configuration,
@@ -176,6 +179,83 @@ impl ActiveView {
_subscriptions: subscriptions,
}
}
pub fn prompt_editor(
context_editor: Entity<ContextEditor>,
window: &mut Window,
cx: &mut App,
) -> Self {
let title = context_editor.read(cx).title(cx).to_string();
let editor = cx.new(|cx| {
let mut editor = Editor::single_line(window, cx);
editor.set_text(title, window, cx);
editor
});
// This is a workaround for `editor.set_text` emitting a `BufferEdited` event, which would
// cause a custom summary to be set. The presence of this custom summary would cause
// summarization to not happen.
let mut suppress_first_edit = true;
let subscriptions = vec![
window.subscribe(&editor, cx, {
{
let context_editor = context_editor.clone();
move |editor, event, window, cx| match event {
EditorEvent::BufferEdited => {
if suppress_first_edit {
suppress_first_edit = false;
return;
}
let new_summary = editor.read(cx).text(cx);
context_editor.update(cx, |context_editor, cx| {
context_editor
.context()
.update(cx, |assistant_context, cx| {
assistant_context.set_custom_summary(new_summary, cx);
})
})
}
EditorEvent::Blurred => {
if editor.read(cx).text(cx).is_empty() {
let summary = context_editor
.read(cx)
.context()
.read(cx)
.summary_or_default();
editor.update(cx, |editor, cx| {
editor.set_text(summary, window, cx);
});
}
}
_ => {}
}
}
}),
window.subscribe(&context_editor.read(cx).context().clone(), cx, {
let editor = editor.clone();
move |assistant_context, event, window, cx| match event {
ContextEvent::SummaryGenerated => {
let summary = assistant_context.read(cx).summary_or_default();
editor.update(cx, |editor, cx| {
editor.set_text(summary, window, cx);
})
}
_ => {}
}
}),
];
Self::PromptEditor {
context_editor,
title_editor: editor,
_subscriptions: subscriptions,
}
}
}
pub struct AssistantPanel {
@@ -188,6 +268,7 @@ pub struct AssistantPanel {
thread: Entity<ActiveThread>,
message_editor: Entity<MessageEditor>,
_active_thread_subscriptions: Vec<Subscription>,
_default_model_subscription: Subscription,
context_store: Entity<assistant_context_editor::ContextStore>,
prompt_store: Option<Entity<PromptStore>>,
configuration: Option<Entity<AssistantConfiguration>>,
@@ -221,8 +302,8 @@ impl AssistantPanel {
ThreadStore::load(
project,
tools.clone(),
prompt_store.clone(),
prompt_builder.clone(),
prompt_store.clone(),
cx,
)
})?
@@ -328,6 +409,20 @@ impl AssistantPanel {
}
});
let _default_model_subscription = cx.subscribe(
&LanguageModelRegistry::global(cx),
|this, _, event: &language_model::Event, cx| match event {
language_model::Event::DefaultModelChanged => {
this.thread
.read(cx)
.thread()
.clone()
.update(cx, |thread, cx| thread.get_or_init_configured_model(cx));
}
_ => {}
},
);
Self {
active_view,
workspace,
@@ -343,6 +438,7 @@ impl AssistantPanel {
active_thread_subscription,
message_editor_subscription,
],
_default_model_subscription,
context_store,
prompt_store,
configuration: None,
@@ -502,9 +598,7 @@ impl AssistantPanel {
});
self.set_active_view(
ActiveView::PromptEditor {
context_editor: context_editor.clone(),
},
ActiveView::prompt_editor(context_editor.clone(), window, cx),
window,
cx,
);
@@ -578,10 +672,9 @@ impl AssistantPanel {
cx,
)
});
this.set_active_view(
ActiveView::PromptEditor {
context_editor: editor,
},
ActiveView::prompt_editor(editor.clone(), window, cx),
window,
cx,
);
@@ -821,7 +914,7 @@ impl AssistantPanel {
pub(crate) fn active_context_editor(&self) -> Option<Entity<ContextEditor>> {
match &self.active_view {
ActiveView::PromptEditor { context_editor } => Some(context_editor.clone()),
ActiveView::PromptEditor { context_editor, .. } => Some(context_editor.clone()),
_ => None,
}
}
@@ -864,7 +957,7 @@ impl Focusable for AssistantPanel {
match &self.active_view {
ActiveView::Thread { .. } => self.message_editor.focus_handle(cx),
ActiveView::History => self.history.focus_handle(cx),
ActiveView::PromptEditor { context_editor } => context_editor.focus_handle(cx),
ActiveView::PromptEditor { context_editor, .. } => context_editor.focus_handle(cx),
ActiveView::Configuration => {
if let Some(configuration) = self.configuration.as_ref() {
configuration.focus_handle(cx)
@@ -988,9 +1081,34 @@ impl AssistantPanel {
.into_any_element()
}
}
ActiveView::PromptEditor { context_editor } => {
let title = SharedString::from(context_editor.read(cx).title(cx).to_string());
Label::new(title).ml_2().truncate().into_any_element()
ActiveView::PromptEditor {
title_editor,
context_editor,
..
} => {
let context_editor = context_editor.read(cx);
let summary = context_editor.context().read(cx).summary();
match summary {
None => Label::new(AssistantContext::DEFAULT_SUMMARY.clone())
.truncate()
.ml_2()
.into_any_element(),
Some(summary) => {
if summary.done {
div()
.ml_2()
.w_full()
.child(title_editor.clone())
.into_any_element()
} else {
Label::new(LOADING_SUMMARY_PLACEHOLDER)
.ml_2()
.truncate()
.into_any_element()
}
}
}
}
ActiveView::History => Label::new("History").truncate().into_any_element(),
ActiveView::Configuration => Label::new("Settings").truncate().into_any_element(),
@@ -1172,12 +1290,13 @@ impl AssistantPanel {
let is_generating = thread.is_generating();
let message_editor = self.message_editor.read(cx);
let conversation_token_usage = thread.total_token_usage(cx);
let conversation_token_usage = thread.total_token_usage()?;
let (total_token_usage, is_estimating) = if let Some((editing_message_id, unsent_tokens)) =
self.thread.read(cx).editing_message_id()
{
let combined = thread
.token_usage_up_to_message(editing_message_id, cx)
.token_usage_up_to_message(editing_message_id)
.add(unsent_tokens);
(combined, unsent_tokens > 0)
@@ -1263,7 +1382,7 @@ impl AssistantPanel {
Some(token_count)
}
ActiveView::PromptEditor { context_editor } => {
ActiveView::PromptEditor { context_editor, .. } => {
let element = render_remaining_tokens(context_editor, cx)?;
Some(element.into_any_element())
@@ -1871,7 +1990,9 @@ impl Render for AssistantPanel {
.child(h_flex().child(self.message_editor.clone()))
.children(self.render_last_error(cx)),
ActiveView::History => parent.child(self.history.clone()),
ActiveView::PromptEditor { context_editor } => parent.child(context_editor.clone()),
ActiveView::PromptEditor { context_editor, .. } => {
parent.child(context_editor.clone())
}
ActiveView::Configuration => parent.children(self.configuration.clone()),
})
}

View File

@@ -460,6 +460,7 @@ impl CodegenAlternative {
LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
tools: Vec::new(),
stop: Vec::new(),
temperature: None,
@@ -503,7 +504,7 @@ impl CodegenAlternative {
}
}
let http_client = cx.http_client().clone();
let http_client = cx.http_client();
let telemetry = self.telemetry.clone();
let language_name = {
let multibuffer = self.buffer.read(cx);

File diff suppressed because it is too large Load Diff

View File

@@ -388,7 +388,7 @@ impl ContextPicker {
ContextMenuItem::custom_entry(
move |_window, cx| {
render_file_context_entry(
ElementId::NamedInteger("ctx-recent".into(), ix),
ElementId::named_usize("ctx-recent", ix),
worktree_id,
&path,
&path_prefix,

View File

@@ -708,7 +708,7 @@ impl CompletionProvider for ContextPickerCompletionProvider {
let thread_store = self.thread_store.clone();
let editor = self.editor.clone();
let http_client = workspace.read(cx).client().http_client().clone();
let http_client = workspace.read(cx).client().http_client();
let MentionCompletion { mode, argument, .. } = state;
let query = argument.unwrap_or_else(|| "".to_string());
@@ -1045,6 +1045,10 @@ mod tests {
fn include_in_nav_history() -> bool {
false
}
fn tab_content_text(&self, _detail: usize, _cx: &App) -> SharedString {
"Test".into()
}
}
impl EventEmitter<()> for AtMentionEditor {}

View File

@@ -193,7 +193,7 @@ impl PickerDelegate for FetchContextPickerDelegate {
return;
};
let http_client = workspace.read(cx).client().http_client().clone();
let http_client = workspace.read(cx).client().http_client();
let url = self.url.clone();
cx.spawn_in(window, async move |this, cx| {
let text = cx

View File

@@ -169,7 +169,7 @@ impl PickerDelegate for FileContextPickerDelegate {
.inset(true)
.toggle_state(selected)
.child(render_file_context_entry(
ElementId::NamedInteger("file-ctx-picker".into(), ix),
ElementId::named_usize("file-ctx-picker", ix),
WorktreeId::from_usize(mat.worktree_id),
&mat.path,
&mat.path_prefix,

View File

@@ -171,10 +171,7 @@ impl PickerDelegate for SymbolContextPickerDelegate {
let mat = &self.matches[ix];
Some(ListItem::new(ix).inset(true).toggle_state(selected).child(
render_symbol_context_entry(
ElementId::NamedInteger("symbol-ctx-picker".into(), ix),
mat,
),
render_symbol_context_entry(ElementId::named_usize("symbol-ctx-picker", ix), mat),
))
}
}

View File

@@ -4,21 +4,21 @@ use std::sync::Arc;
use anyhow::{Result, anyhow};
use collections::{HashSet, IndexSet};
use futures::future::join_all;
use futures::{self, FutureExt};
use gpui::{App, Context, Entity, Image, SharedString, Task, WeakEntity};
use language::Buffer;
use language_model::LanguageModelImage;
use project::image_store::is_image_file;
use project::{Project, ProjectItem, ProjectPath, Symbol};
use prompt_store::UserPromptId;
use ref_cast::RefCast as _;
use text::{Anchor, OffsetRangeExt};
use util::ResultExt as _;
use crate::ThreadStore;
use crate::context::{
AgentContext, AgentContextKey, ContextId, DirectoryContext, FetchedUrlContext, FileContext,
ImageContext, RulesContext, SelectionContext, SymbolContext, ThreadContext,
AgentContextHandle, AgentContextKey, ContextId, DirectoryContextHandle, FetchedUrlContext,
FileContextHandle, ImageContext, RulesContextHandle, SelectionContextHandle,
SymbolContextHandle, ThreadContextHandle,
};
use crate::context_strip::SuggestedContext;
use crate::thread::{Thread, ThreadId};
@@ -26,7 +26,6 @@ use crate::thread::{Thread, ThreadId};
pub struct ContextStore {
project: WeakEntity<Project>,
thread_store: Option<WeakEntity<ThreadStore>>,
thread_summary_tasks: Vec<Task<()>>,
next_context_id: ContextId,
context_set: IndexSet<AgentContextKey>,
context_thread_ids: HashSet<ThreadId>,
@@ -40,14 +39,13 @@ impl ContextStore {
Self {
project,
thread_store,
thread_summary_tasks: Vec::new(),
next_context_id: ContextId::zero(),
context_set: IndexSet::default(),
context_thread_ids: HashSet::default(),
}
}
pub fn context(&self) -> impl Iterator<Item = &AgentContext> {
pub fn context(&self) -> impl Iterator<Item = &AgentContextHandle> {
self.context_set.iter().map(|entry| entry.as_ref())
}
@@ -56,11 +54,16 @@ impl ContextStore {
self.context_thread_ids.clear();
}
pub fn new_context_for_thread(&self, thread: &Thread) -> Vec<AgentContext> {
pub fn new_context_for_thread(&self, thread: &Thread) -> Vec<AgentContextHandle> {
let existing_context = thread
.messages()
.flat_map(|message| &message.loaded_context.contexts)
.map(AgentContextKey::ref_cast)
.flat_map(|message| {
message
.loaded_context
.contexts
.iter()
.map(|context| AgentContextKey(context.handle()))
})
.collect::<HashSet<_>>();
self.context_set
.iter()
@@ -79,15 +82,19 @@ impl ContextStore {
return Task::ready(Err(anyhow!("failed to read project")));
};
cx.spawn(async move |this, cx| {
let open_buffer_task = project.update(cx, |project, cx| {
project.open_buffer(project_path.clone(), cx)
})?;
let buffer = open_buffer_task.await?;
this.update(cx, |this, cx| {
this.add_file_from_buffer(&project_path, buffer, remove_if_exists, cx)
if is_image_file(&project, &project_path, cx) {
self.add_image_from_path(project_path, remove_if_exists, cx)
} else {
cx.spawn(async move |this, cx| {
let open_buffer_task = project.update(cx, |project, cx| {
project.open_buffer(project_path.clone(), cx)
})?;
let buffer = open_buffer_task.await?;
this.update(cx, |this, cx| {
this.add_file_from_buffer(&project_path, buffer, remove_if_exists, cx)
})
})
})
}
}
pub fn add_file_from_buffer(
@@ -98,7 +105,7 @@ impl ContextStore {
cx: &mut Context<Self>,
) {
let context_id = self.next_context_id.post_inc();
let context = AgentContext::File(FileContext { buffer, context_id });
let context = AgentContextHandle::File(FileContextHandle { buffer, context_id });
let already_included = if self.has_context(&context) {
if remove_if_exists {
@@ -133,7 +140,7 @@ impl ContextStore {
};
let context_id = self.next_context_id.post_inc();
let context = AgentContext::Directory(DirectoryContext {
let context = AgentContextHandle::Directory(DirectoryContextHandle {
entry_id,
context_id,
});
@@ -159,7 +166,7 @@ impl ContextStore {
cx: &mut Context<Self>,
) -> bool {
let context_id = self.next_context_id.post_inc();
let context = AgentContext::Symbol(SymbolContext {
let context = AgentContextHandle::Symbol(SymbolContextHandle {
buffer,
symbol,
range,
@@ -184,7 +191,7 @@ impl ContextStore {
cx: &mut Context<Self>,
) {
let context_id = self.next_context_id.post_inc();
let context = AgentContext::Thread(ThreadContext { thread, context_id });
let context = AgentContextHandle::Thread(ThreadContextHandle { thread, context_id });
if self.has_context(&context) {
if remove_if_exists {
@@ -195,41 +202,6 @@ impl ContextStore {
}
}
fn start_summarizing_thread_if_needed(
&mut self,
thread: &Entity<Thread>,
cx: &mut Context<Self>,
) {
if let Some(summary_task) =
thread.update(cx, |thread, cx| thread.generate_detailed_summary(cx))
{
let thread = thread.clone();
let thread_store = self.thread_store.clone();
self.thread_summary_tasks.push(cx.spawn(async move |_, cx| {
summary_task.await;
if let Some(thread_store) = thread_store {
// Save thread so its summary can be reused later
let save_task = thread_store
.update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx));
if let Some(save_task) = save_task.ok() {
save_task.await.log_err();
}
}
}));
}
}
pub fn wait_for_summaries(&mut self, cx: &App) -> Task<()> {
let tasks = std::mem::take(&mut self.thread_summary_tasks);
cx.spawn(async move |_cx| {
join_all(tasks).await;
})
}
pub fn add_rules(
&mut self,
prompt_id: UserPromptId,
@@ -237,7 +209,7 @@ impl ContextStore {
cx: &mut Context<ContextStore>,
) {
let context_id = self.next_context_id.post_inc();
let context = AgentContext::Rules(RulesContext {
let context = AgentContextHandle::Rules(RulesContextHandle {
prompt_id,
context_id,
});
@@ -257,7 +229,7 @@ impl ContextStore {
text: impl Into<SharedString>,
cx: &mut Context<ContextStore>,
) {
let context = AgentContext::FetchedUrl(FetchedUrlContext {
let context = AgentContextHandle::FetchedUrl(FetchedUrlContext {
url: url.into(),
text: text.into(),
context_id: self.next_context_id.post_inc(),
@@ -266,13 +238,55 @@ impl ContextStore {
self.insert_context(context, cx);
}
pub fn add_image(&mut self, image: Arc<Image>, cx: &mut Context<ContextStore>) {
pub fn add_image_from_path(
&mut self,
project_path: ProjectPath,
remove_if_exists: bool,
cx: &mut Context<ContextStore>,
) -> Task<Result<()>> {
let project = self.project.clone();
cx.spawn(async move |this, cx| {
let open_image_task = project.update(cx, |project, cx| {
project.open_image(project_path.clone(), cx)
})?;
let image_item = open_image_task.await?;
let image = image_item.read_with(cx, |image_item, _| image_item.image.clone())?;
this.update(cx, |this, cx| {
this.insert_image(
Some(image_item.read(cx).project_path(cx)),
image,
remove_if_exists,
cx,
);
})
})
}
pub fn add_image_instance(&mut self, image: Arc<Image>, cx: &mut Context<ContextStore>) {
self.insert_image(None, image, false, cx);
}
fn insert_image(
&mut self,
project_path: Option<ProjectPath>,
image: Arc<Image>,
remove_if_exists: bool,
cx: &mut Context<ContextStore>,
) {
let image_task = LanguageModelImage::from_image(image.clone(), cx).shared();
let context = AgentContext::Image(ImageContext {
let context = AgentContextHandle::Image(ImageContext {
project_path,
original_image: image,
image_task,
context_id: self.next_context_id.post_inc(),
});
if self.has_context(&context) {
if remove_if_exists {
self.remove_context(&context, cx);
return;
}
}
self.insert_context(context, cx);
}
@@ -283,7 +297,7 @@ impl ContextStore {
cx: &mut Context<ContextStore>,
) {
let context_id = self.next_context_id.post_inc();
let context = AgentContext::Selection(SelectionContext {
let context = AgentContextHandle::Selection(SelectionContextHandle {
buffer,
range,
context_id,
@@ -304,14 +318,17 @@ impl ContextStore {
} => {
if let Some(buffer) = buffer.upgrade() {
let context_id = self.next_context_id.post_inc();
self.insert_context(AgentContext::File(FileContext { buffer, context_id }), cx);
self.insert_context(
AgentContextHandle::File(FileContextHandle { buffer, context_id }),
cx,
);
};
}
SuggestedContext::Thread { thread, name: _ } => {
if let Some(thread) = thread.upgrade() {
let context_id = self.next_context_id.post_inc();
self.insert_context(
AgentContext::Thread(ThreadContext { thread, context_id }),
AgentContextHandle::Thread(ThreadContextHandle { thread, context_id }),
cx,
);
}
@@ -319,12 +336,18 @@ impl ContextStore {
}
}
fn insert_context(&mut self, context: AgentContext, cx: &mut Context<Self>) -> bool {
fn insert_context(&mut self, context: AgentContextHandle, cx: &mut Context<Self>) -> bool {
match &context {
AgentContext::Thread(thread_context) => {
self.context_thread_ids
.insert(thread_context.thread.read(cx).id().clone());
self.start_summarizing_thread_if_needed(&thread_context.thread, cx);
AgentContextHandle::Thread(thread_context) => {
if let Some(thread_store) = self.thread_store.clone() {
thread_context.thread.update(cx, |thread, cx| {
thread.start_generating_detailed_summary_if_needed(thread_store, cx);
});
self.context_thread_ids
.insert(thread_context.thread.read(cx).id().clone());
} else {
return false;
}
}
_ => {}
}
@@ -335,13 +358,13 @@ impl ContextStore {
inserted
}
pub fn remove_context(&mut self, context: &AgentContext, cx: &mut Context<Self>) {
pub fn remove_context(&mut self, context: &AgentContextHandle, cx: &mut Context<Self>) {
if self
.context_set
.shift_remove(AgentContextKey::ref_cast(context))
{
match context {
AgentContext::Thread(thread_context) => {
AgentContextHandle::Thread(thread_context) => {
self.context_thread_ids
.remove(thread_context.thread.read(cx).id());
}
@@ -351,7 +374,7 @@ impl ContextStore {
}
}
pub fn has_context(&mut self, context: &AgentContext) -> bool {
pub fn has_context(&mut self, context: &AgentContextHandle) -> bool {
self.context_set
.contains(AgentContextKey::ref_cast(context))
}
@@ -361,8 +384,13 @@ impl ContextStore {
pub fn file_path_included(&self, path: &ProjectPath, cx: &App) -> Option<FileInclusion> {
let project = self.project.upgrade()?.read(cx);
self.context().find_map(|context| match context {
AgentContext::File(file_context) => FileInclusion::check_file(file_context, path, cx),
AgentContext::Directory(directory_context) => {
AgentContextHandle::File(file_context) => {
FileInclusion::check_file(file_context, path, cx)
}
AgentContextHandle::Image(image_context) => {
FileInclusion::check_image(image_context, path)
}
AgentContextHandle::Directory(directory_context) => {
FileInclusion::check_directory(directory_context, path, project, cx)
}
_ => None,
@@ -376,7 +404,7 @@ impl ContextStore {
) -> Option<FileInclusion> {
let project = self.project.upgrade()?.read(cx);
self.context().find_map(|context| match context {
AgentContext::Directory(directory_context) => {
AgentContextHandle::Directory(directory_context) => {
FileInclusion::check_directory(directory_context, path, project, cx)
}
_ => None,
@@ -385,7 +413,7 @@ impl ContextStore {
pub fn includes_symbol(&self, symbol: &Symbol, cx: &App) -> bool {
self.context().any(|context| match context {
AgentContext::Symbol(context) => {
AgentContextHandle::Symbol(context) => {
if context.symbol != symbol.name {
return false;
}
@@ -410,7 +438,7 @@ impl ContextStore {
pub fn includes_user_rules(&self, prompt_id: UserPromptId) -> bool {
self.context_set
.contains(&RulesContext::lookup_key(prompt_id))
.contains(&RulesContextHandle::lookup_key(prompt_id))
}
pub fn includes_url(&self, url: impl Into<SharedString>) -> bool {
@@ -421,17 +449,17 @@ impl ContextStore {
pub fn file_paths(&self, cx: &App) -> HashSet<ProjectPath> {
self.context()
.filter_map(|context| match context {
AgentContext::File(file) => {
AgentContextHandle::File(file) => {
let buffer = file.buffer.read(cx);
buffer.project_path(cx)
}
AgentContext::Directory(_)
| AgentContext::Symbol(_)
| AgentContext::Selection(_)
| AgentContext::FetchedUrl(_)
| AgentContext::Thread(_)
| AgentContext::Rules(_)
| AgentContext::Image(_) => None,
AgentContextHandle::Directory(_)
| AgentContextHandle::Symbol(_)
| AgentContextHandle::Selection(_)
| AgentContextHandle::FetchedUrl(_)
| AgentContextHandle::Thread(_)
| AgentContextHandle::Rules(_)
| AgentContextHandle::Image(_) => None,
})
.collect()
}
@@ -447,7 +475,7 @@ pub enum FileInclusion {
}
impl FileInclusion {
fn check_file(file_context: &FileContext, path: &ProjectPath, cx: &App) -> Option<Self> {
fn check_file(file_context: &FileContextHandle, path: &ProjectPath, cx: &App) -> Option<Self> {
let file_path = file_context.buffer.read(cx).project_path(cx)?;
if path == &file_path {
Some(FileInclusion::Direct)
@@ -456,8 +484,17 @@ impl FileInclusion {
}
}
fn check_image(image_context: &ImageContext, path: &ProjectPath) -> Option<Self> {
let image_path = image_context.project_path.as_ref()?;
if path == image_path {
Some(FileInclusion::Direct)
} else {
None
}
}
fn check_directory(
directory_context: &DirectoryContext,
directory_context: &DirectoryContextHandle,
path: &ProjectPath,
project: &Project,
cx: &App,

View File

@@ -14,7 +14,7 @@ use project::ProjectItem;
use ui::{KeyBinding, PopoverMenu, PopoverMenuHandle, Tooltip, prelude::*};
use workspace::Workspace;
use crate::context::{AgentContext, ContextKind};
use crate::context::{AgentContextHandle, ContextKind};
use crate::context_picker::ContextPicker;
use crate::context_store::ContextStore;
use crate::thread::Thread;
@@ -92,7 +92,9 @@ impl ContextStrip {
self.context_store
.read(cx)
.context()
.flat_map(|context| AddedContext::new(context.clone(), prompt_store, project, cx))
.flat_map(|context| {
AddedContext::new_pending(context.clone(), prompt_store, project, cx)
})
.collect::<Vec<_>>()
} else {
Vec::new()
@@ -288,7 +290,7 @@ impl ContextStrip {
best.map(|(index, _, _)| index)
}
fn open_context(&mut self, context: &AgentContext, window: &mut Window, cx: &mut App) {
fn open_context(&mut self, context: &AgentContextHandle, window: &mut Window, cx: &mut App) {
let Some(workspace) = self.workspace.upgrade() else {
return;
};
@@ -309,7 +311,7 @@ impl ContextStrip {
};
self.context_store.update(cx, |this, cx| {
this.remove_context(&context.context, cx);
this.remove_context(&context.handle, cx);
});
let is_now_empty = added_contexts.len() == 1;
@@ -462,7 +464,7 @@ impl Render for ContextStrip {
.enumerate()
.map(|(i, added_context)| {
let name = added_context.name.clone();
let context = added_context.context.clone();
let context = added_context.handle.clone();
ContextPill::added(
added_context,
dupe_names.contains(&name),

View File

@@ -1,4 +1,4 @@
use crate::assistant_model_selector::AssistantModelSelector;
use crate::assistant_model_selector::{AssistantModelSelector, ModelType};
use crate::buffer_codegen::BufferCodegen;
use crate::context_picker::ContextPicker;
use crate::context_store::ContextStore;
@@ -20,7 +20,7 @@ use gpui::{
Focusable, FontWeight, Subscription, TextStyle, WeakEntity, Window, anchored, deferred, point,
};
use language_model::{LanguageModel, LanguageModelRegistry};
use language_model_selector::{ModelType, ToggleModelSelector};
use language_model_selector::ToggleModelSelector;
use parking_lot::Mutex;
use settings::Settings;
use std::cmp;

View File

@@ -1,9 +1,10 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use crate::assistant_model_selector::ModelType;
use crate::assistant_model_selector::{AssistantModelSelector, ModelType};
use crate::context::{ContextLoadResult, load_context};
use crate::tool_compatibility::{IncompatibleToolsState, IncompatibleToolsTooltip};
use crate::ui::AnimatedLabel;
use buffer_diff::BufferDiff;
use collections::HashSet;
use editor::actions::{MoveUp, Paste};
@@ -11,6 +12,7 @@ use editor::{
ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, EditorEvent, EditorMode,
EditorStyle, MultiBuffer,
};
use feature_flags::{FeatureFlagAppExt, NewBillingFeatureFlag};
use file_icons::FileIcons;
use fs::Fs;
use futures::future::Shared;
@@ -20,7 +22,7 @@ use gpui::{
Task, TextStyle, WeakEntity, linear_color_stop, linear_gradient, point, pulsating_between,
};
use language::{Buffer, Language};
use language_model::{ConfiguredModel, LanguageModelRegistry, LanguageModelRequestMessage};
use language_model::{ConfiguredModel, LanguageModelRequestMessage, MessageContent};
use language_model_selector::ToggleModelSelector;
use multi_buffer;
use project::Project;
@@ -31,8 +33,8 @@ use theme::ThemeSettings;
use ui::{Disclosure, KeyBinding, PopoverMenuHandle, Tooltip, prelude::*};
use util::ResultExt as _;
use workspace::Workspace;
use zed_llm_client::CompletionMode;
use crate::assistant_model_selector::AssistantModelSelector;
use crate::context_picker::{ContextPicker, ContextPickerCompletionProvider};
use crate::context_store::ContextStore;
use crate::context_strip::{ContextStrip, ContextStripEvent, SuggestContextKind};
@@ -48,7 +50,6 @@ pub struct MessageEditor {
thread: Entity<Thread>,
incompatible_tools_state: Entity<IncompatibleToolsState>,
editor: Entity<Editor>,
#[allow(dead_code)]
workspace: WeakEntity<Workspace>,
project: Entity<Project>,
context_store: Entity<ContextStore>,
@@ -57,11 +58,10 @@ pub struct MessageEditor {
context_picker_menu_handle: PopoverMenuHandle<ContextPicker>,
model_selector: Entity<AssistantModelSelector>,
last_loaded_context: Option<ContextLoadResult>,
context_load_task: Option<Shared<Task<()>>>,
load_context_task: Option<Shared<Task<()>>>,
profile_selector: Entity<ProfileSelector>,
edits_expanded: bool,
editor_is_expanded: bool,
waiting_for_summaries_to_send: bool,
last_estimated_token_count: Option<usize>,
update_token_count_task: Option<Task<anyhow::Result<()>>>,
_subscriptions: Vec<Subscription>,
@@ -146,10 +146,22 @@ impl MessageEditor {
_ => {}
}),
cx.observe(&context_store, |this, _, cx| {
let _ = this.start_context_load(cx);
// When context changes, reload it for token counting.
let _ = this.reload_context(cx);
}),
];
let model_selector = cx.new(|cx| {
AssistantModelSelector::new(
fs.clone(),
model_selector_menu_handle,
editor.focus_handle(cx),
ModelType::Default(thread.clone()),
window,
cx,
)
});
Self {
editor: editor.clone(),
project: thread.read(cx).project().clone(),
@@ -160,21 +172,11 @@ impl MessageEditor {
prompt_store,
context_strip,
context_picker_menu_handle,
context_load_task: None,
load_context_task: None,
last_loaded_context: None,
model_selector: cx.new(|cx| {
AssistantModelSelector::new(
fs.clone(),
model_selector_menu_handle,
editor.focus_handle(cx),
ModelType::Default,
window,
cx,
)
}),
model_selector,
edits_expanded: false,
editor_is_expanded: false,
waiting_for_summaries_to_send: false,
profile_selector: cx
.new(|cx| ProfileSelector::new(fs, thread_store, editor.focus_handle(cx), cx)),
last_estimated_token_count: None,
@@ -242,6 +244,10 @@ impl MessageEditor {
return;
}
self.thread.update(cx, |thread, cx| {
thread.cancel_editing(cx);
});
if self.thread.read(cx).is_generating() {
self.stop_current_and_send_new_message(window, cx);
return;
@@ -257,15 +263,11 @@ impl MessageEditor {
self.editor.read(cx).text(cx).trim().is_empty()
}
fn is_model_selected(&self, cx: &App) -> bool {
LanguageModelRegistry::read_global(cx)
.default_model()
.is_some()
}
fn send_to_model(&mut self, window: &mut Window, cx: &mut Context<Self>) {
let model_registry = LanguageModelRegistry::read_global(cx);
let Some(ConfiguredModel { model, provider }) = model_registry.default_model() else {
let Some(ConfiguredModel { model, provider }) = self
.thread
.update(cx, |thread, cx| thread.get_or_init_configured_model(cx))
else {
return;
};
@@ -286,7 +288,7 @@ impl MessageEditor {
let thread = self.thread.clone();
let git_store = self.project.read(cx).git_store().clone();
let checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
let context_task = self.load_context(cx);
let context_task = self.reload_context(cx);
let window_handle = window.window_handle();
cx.spawn(async move |_this, cx| {
@@ -309,31 +311,11 @@ impl MessageEditor {
.detach();
}
fn wait_for_summaries(&mut self, cx: &mut Context<Self>) -> Task<()> {
let context_store = self.context_store.clone();
cx.spawn(async move |this, cx| {
if let Some(wait_for_summaries) = context_store
.update(cx, |context_store, cx| context_store.wait_for_summaries(cx))
.ok()
{
this.update(cx, |this, cx| {
this.waiting_for_summaries_to_send = true;
cx.notify();
})
.ok();
wait_for_summaries.await;
this.update(cx, |this, cx| {
this.waiting_for_summaries_to_send = false;
cx.notify();
})
.ok();
}
})
}
fn stop_current_and_send_new_message(&mut self, window: &mut Window, cx: &mut Context<Self>) {
self.thread.update(cx, |thread, cx| {
thread.cancel_editing(cx);
});
let cancelled = self.thread.update(cx, |thread, cx| {
thread.cancel_last_completion(Some(window.window_handle()), cx)
});
@@ -393,7 +375,7 @@ impl MessageEditor {
self.context_store.update(cx, |store, cx| {
for image in images {
store.add_image(Arc::new(image), cx);
store.add_image_instance(Arc::new(image), cx);
}
});
}
@@ -417,6 +399,36 @@ impl MessageEditor {
}
}
fn render_max_mode_toggle(&self, cx: &mut Context<Self>) -> Option<AnyElement> {
if !cx.has_flag::<NewBillingFeatureFlag>() {
return None;
}
let thread = self.thread.read(cx);
let model = thread.configured_model();
if !model?.model.supports_max_mode() {
return None;
}
let active_completion_mode = thread.completion_mode();
Some(
IconButton::new("max-mode", IconName::SquarePlus)
.icon_size(IconSize::Small)
.toggle_state(active_completion_mode == Some(CompletionMode::Max))
.on_click(cx.listener(move |this, _event, _window, cx| {
this.thread.update(cx, |thread, _cx| {
thread.set_completion_mode(match active_completion_mode {
Some(CompletionMode::Max) => Some(CompletionMode::Normal),
Some(CompletionMode::Normal) | None => Some(CompletionMode::Max),
});
});
}))
.tooltip(Tooltip::text("Max Mode"))
.into_any_element(),
)
}
fn render_editor(
&self,
font_size: Rems,
@@ -425,24 +437,21 @@ impl MessageEditor {
cx: &mut Context<Self>,
) -> Div {
let thread = self.thread.read(cx);
let model = thread.configured_model();
let editor_bg_color = cx.theme().colors().editor_background;
let is_generating = thread.is_generating();
let focus_handle = self.editor.focus_handle(cx);
let is_model_selected = self.is_model_selected(cx);
let is_model_selected = model.is_some();
let is_editor_empty = self.is_editor_empty(cx);
let model = LanguageModelRegistry::read_global(cx)
.default_model()
.map(|default| default.model.clone());
let incompatible_tools = model
.as_ref()
.map(|model| {
self.incompatible_tools_state.update(cx, |state, cx| {
state
.incompatible_tools(model, cx)
.incompatible_tools(&model.model, cx)
.iter()
.cloned()
.collect::<Vec<_>>()
@@ -486,32 +495,34 @@ impl MessageEditor {
.items_start()
.justify_between()
.child(self.context_strip.clone())
.child(
IconButton::new("toggle-height", expand_icon)
.icon_size(IconSize::XSmall)
.icon_color(Color::Muted)
.tooltip({
let focus_handle = focus_handle.clone();
move |window, cx| {
let expand_label = if is_editor_expanded {
"Minimize Message Editor".to_string()
} else {
"Expand Message Editor".to_string()
};
.when(focus_handle.is_focused(window), |this| {
this.child(
IconButton::new("toggle-height", expand_icon)
.icon_size(IconSize::XSmall)
.icon_color(Color::Muted)
.tooltip({
let focus_handle = focus_handle.clone();
move |window, cx| {
let expand_label = if is_editor_expanded {
"Minimize Message Editor".to_string()
} else {
"Expand Message Editor".to_string()
};
Tooltip::for_action_in(
expand_label,
&ExpandMessageEditor,
&focus_handle,
window,
cx,
)
}
})
.on_click(cx.listener(|_, _, window, cx| {
window.dispatch_action(Box::new(ExpandMessageEditor), cx);
})),
),
Tooltip::for_action_in(
expand_label,
&ExpandMessageEditor,
&focus_handle,
window,
cx,
)
}
})
.on_click(cx.listener(|_, _, window, cx| {
window.dispatch_action(Box::new(ExpandMessageEditor), cx);
})),
)
}),
)
.child(
v_flex()
@@ -577,6 +588,7 @@ impl MessageEditor {
}),
)
})
.children(self.render_max_mode_toggle(cx))
.child(self.model_selector.clone())
.map({
let focus_handle = focus_handle.clone();
@@ -629,31 +641,31 @@ impl MessageEditor {
})
.when(!is_editor_empty, |parent| {
parent.child(
IconButton::new("send-message", IconName::Send)
.icon_color(Color::Accent)
.style(ButtonStyle::Filled)
.disabled(
!is_model_selected
|| self
.waiting_for_summaries_to_send,
)
.on_click({
let focus_handle = focus_handle.clone();
move |_event, window, cx| {
focus_handle.dispatch_action(
&Chat, window, cx,
);
}
})
.tooltip(move |window, cx| {
Tooltip::for_action(
"Stop and Send New Message",
&Chat,
window,
cx,
IconButton::new(
"send-message",
IconName::Send,
)
}),
)
.icon_color(Color::Accent)
.style(ButtonStyle::Filled)
.disabled(!is_model_selected)
.on_click({
let focus_handle =
focus_handle.clone();
move |_event, window, cx| {
focus_handle.dispatch_action(
&Chat, window, cx,
);
}
})
.tooltip(move |window, cx| {
Tooltip::for_action(
"Stop and Send New Message",
&Chat,
window,
cx,
)
}),
)
})
} else {
parent.child(
@@ -661,10 +673,7 @@ impl MessageEditor {
.icon_color(Color::Accent)
.style(ButtonStyle::Filled)
.disabled(
is_editor_empty
|| !is_model_selected
|| self
.waiting_for_summaries_to_send,
is_editor_empty || !is_model_selected,
)
.on_click({
let focus_handle = focus_handle.clone();
@@ -715,9 +724,12 @@ impl MessageEditor {
let border_color = cx.theme().colors().border;
let active_color = cx.theme().colors().element_selected;
let bg_edit_files_disclosure = editor_bg_color.blend(active_color.opacity(0.3));
let is_edit_changes_expanded = self.edits_expanded;
let is_generating = self.thread.read(cx).is_generating();
v_flex()
.mt_1()
.mx_2()
.bg(bg_edit_files_disclosure)
.border_1()
@@ -752,25 +764,44 @@ impl MessageEditor {
cx.notify();
})),
)
.child(
Label::new("Edits")
.size(LabelSize::Small)
.color(Color::Muted),
)
.child(Label::new("").size(LabelSize::XSmall).color(Color::Muted))
.child(
Label::new(format!(
"{} {}",
changed_buffers.len(),
if changed_buffers.len() == 1 {
"file"
} else {
"files"
}
))
.size(LabelSize::Small)
.color(Color::Muted),
),
.map(|this| {
if is_generating {
this.child(
AnimatedLabel::new(format!(
"Editing {} {}",
changed_buffers.len(),
if changed_buffers.len() == 1 {
"file"
} else {
"files"
}
))
.size(LabelSize::Small),
)
} else {
this.child(
Label::new("Edits")
.size(LabelSize::Small)
.color(Color::Muted),
)
.child(
Label::new("").size(LabelSize::XSmall).color(Color::Muted),
)
.child(
Label::new(format!(
"{} {}",
changed_buffers.len(),
if changed_buffers.len() == 1 {
"file"
} else {
"files"
}
))
.size(LabelSize::Small)
.color(Color::Muted),
)
}
}),
)
.child(
Button::new("review", "Review Changes")
@@ -860,7 +891,7 @@ impl MessageEditor {
.justify_between()
.bg(cx.theme().colors().editor_background)
.hover(|style| style.bg(hover_color))
.when(index + 1 < changed_buffers.len(), |parent| {
.when(index < changed_buffers.len() - 1, |parent| {
parent.border_color(border_color).border_b_1()
})
.child(
@@ -876,9 +907,9 @@ impl MessageEditor {
.gap_0p5()
.children(name_label)
.children(parent_label),
) // TODO: show lines changed
.child(Label::new("+").color(Color::Created))
.child(Label::new("-").color(Color::Deleted)),
), // TODO: Implement line diff
// .child(Label::new("+").color(Color::Created))
// .child(Label::new("-").color(Color::Deleted)),
)
.child(
div().visible_on_hover("edited-code").child(
@@ -1006,16 +1037,8 @@ impl MessageEditor {
self.update_token_count_task.is_some()
}
fn handle_message_changed(&mut self, cx: &mut Context<Self>) {
self.message_or_context_changed(true, cx);
}
fn start_context_load(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
let summaries_task = self.wait_for_summaries(cx);
fn reload_context(&mut self, cx: &mut Context<Self>) -> Task<Option<ContextLoadResult>> {
let load_task = cx.spawn(async move |this, cx| {
// Waits for detailed summaries before `load_context`, as it directly reads these from
// the thread. TODO: Would be cleaner to have context loading await on summarization.
summaries_task.await;
let Ok(load_task) = this.update(cx, |this, cx| {
let new_context = this.context_store.read_with(cx, |context_store, cx| {
context_store.new_context_for_thread(this.thread.read(cx))
@@ -1027,32 +1050,31 @@ impl MessageEditor {
let result = load_task.await;
this.update(cx, |this, cx| {
this.last_loaded_context = Some(result);
this.context_load_task = None;
this.load_context_task = None;
this.message_or_context_changed(false, cx);
})
.ok();
});
// Replace existing load task, if any, causing it to be cancelled.
let load_task = load_task.shared();
self.context_load_task = Some(load_task.clone());
load_task
}
fn load_context(&mut self, cx: &mut Context<Self>) -> Task<Option<ContextLoadResult>> {
let context_load_task = self.start_context_load(cx);
self.load_context_task = Some(load_task.clone());
cx.spawn(async move |this, cx| {
context_load_task.await;
load_task.await;
this.read_with(cx, |this, _cx| this.last_loaded_context.clone())
.ok()
.flatten()
})
}
fn handle_message_changed(&mut self, cx: &mut Context<Self>) {
self.message_or_context_changed(true, cx);
}
fn message_or_context_changed(&mut self, debounce: bool, cx: &mut Context<Self>) {
cx.emit(MessageEditorEvent::Changed);
self.update_token_count_task.take();
let Some(default_model) = LanguageModelRegistry::read_global(cx).default_model() else {
let Some(model) = self.thread.read(cx).configured_model() else {
self.last_estimated_token_count.take();
return;
};
@@ -1089,16 +1111,23 @@ impl MessageEditor {
loaded_context.add_to_request_message(&mut request_message);
}
if !message_text.is_empty() {
request_message
.content
.push(MessageContent::Text(message_text));
}
let request = language_model::LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![request_message],
tools: vec![],
stop: vec![],
temperature: None,
};
Some(default_model.model.count_tokens(request, cx))
Some(model.model.count_tokens(request, cx))
})? {
task.await?
} else {
@@ -1130,8 +1159,11 @@ impl Focusable for MessageEditor {
impl Render for MessageEditor {
fn render(&mut self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
let thread = self.thread.read(cx);
let total_token_usage = thread.total_token_usage(cx);
let token_usage_ratio = total_token_usage.ratio();
let token_usage_ratio = thread
.total_token_usage()
.map_or(TokenUsageRatio::Normal, |total_token_usage| {
total_token_usage.ratio()
});
let action_log = self.thread.read(cx).action_log();
let changed_buffers = action_log.read(cx).changed_buffers(cx);
@@ -1141,41 +1173,6 @@ impl Render for MessageEditor {
v_flex()
.size_full()
.when(self.waiting_for_summaries_to_send, |parent| {
parent.child(
h_flex().py_3().w_full().justify_center().child(
h_flex()
.flex_none()
.px_2()
.py_2()
.bg(cx.theme().colors().editor_background)
.border_1()
.border_color(cx.theme().colors().border_variant)
.rounded_lg()
.shadow_md()
.gap_1()
.child(
Icon::new(IconName::ArrowCircle)
.size(IconSize::XSmall)
.color(Color::Muted)
.with_animation(
"arrow-circle",
Animation::new(Duration::from_secs(2)).repeat(),
|icon, delta| {
icon.transform(gpui::Transformation::rotate(
gpui::percentage(delta),
))
},
),
)
.child(
Label::new("Summarizing context…")
.size(LabelSize::XSmall)
.color(Color::Muted),
),
),
)
})
.when(changed_buffers.len() > 0, |parent| {
parent.child(self.render_changed_buffers(&changed_buffers, window, cx))
})

View File

@@ -276,6 +276,7 @@ impl TerminalInlineAssistant {
LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![request_message],
tools: Vec::new(),
stop: Vec::new(),

View File

@@ -14,19 +14,21 @@ use futures::future::Shared;
use futures::{FutureExt, StreamExt as _};
use git::repository::DiffType;
use gpui::{
AnyWindowHandle, App, AppContext, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
WeakEntity,
};
use language_model::{
ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
LanguageModelId, LanguageModelKnownError, LanguageModelRegistry, LanguageModelRequest,
LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent,
ModelRequestLimitReachedError, PaymentRequiredError, RequestUsage, Role, StopReason,
TokenUsage,
ModelRequestLimitReachedError, PaymentRequiredError, RequestUsage, Role, SelectedModel,
StopReason, TokenUsage,
};
use postage::stream::Stream as _;
use project::Project;
use project::git_store::{GitStore, GitStoreCheckpoint, RepositoryState};
use prompt_store::PromptBuilder;
use prompt_store::{ModelContext, PromptBuilder};
use proto::Plan;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -34,11 +36,13 @@ use settings::Settings;
use thiserror::Error;
use util::{ResultExt as _, TryFutureExt as _, post_inc};
use uuid::Uuid;
use zed_llm_client::CompletionMode;
use crate::ThreadStore;
use crate::context::{AgentContext, ContextLoadResult, LoadedContext};
use crate::thread_store::{
SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
SerializedToolUse, SharedProjectContext,
SerializedLanguageModel, SerializedMessage, SerializedMessageSegment, SerializedThread,
SerializedToolResult, SerializedToolUse, SharedProjectContext,
};
use crate::tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState};
@@ -242,6 +246,16 @@ pub enum DetailedSummaryState {
},
}
impl DetailedSummaryState {
fn text(&self) -> Option<SharedString> {
if let Self::Generated { text, .. } = self {
Some(text.clone())
} else {
None
}
}
}
#[derive(Default)]
pub struct TotalTokenUsage {
pub total: usize,
@@ -258,7 +272,11 @@ impl TotalTokenUsage {
#[cfg(not(debug_assertions))]
let warning_threshold: f32 = 0.8;
if self.total >= self.max {
// When the maximum is unknown because there is no selected model,
// avoid showing the token limit warning.
if self.max == 0 {
TokenUsageRatio::Normal
} else if self.total >= self.max {
TokenUsageRatio::Exceeded
} else if self.total as f32 / self.max as f32 >= warning_threshold {
TokenUsageRatio::Warning
@@ -289,7 +307,10 @@ pub struct Thread {
updated_at: DateTime<Utc>,
summary: Option<SharedString>,
pending_summary: Task<Option<()>>,
detailed_summary_state: DetailedSummaryState,
detailed_summary_task: Task<Option<()>>,
detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
completion_mode: Option<CompletionMode>,
messages: Vec<Message>,
next_message_id: MessageId,
last_prompt_id: PromptId,
@@ -315,6 +336,7 @@ pub struct Thread {
Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
>,
remaining_turns: u32,
configured_model: Option<ConfiguredModel>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -333,12 +355,18 @@ impl Thread {
system_prompt: SharedProjectContext,
cx: &mut Context<Self>,
) -> Self {
let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
let configured_model = LanguageModelRegistry::read_global(cx).default_model();
Self {
id: ThreadId::new(),
updated_at: Utc::now(),
summary: None,
pending_summary: Task::ready(None),
detailed_summary_state: DetailedSummaryState::NotGenerated,
detailed_summary_task: Task::ready(None),
detailed_summary_tx,
detailed_summary_rx,
completion_mode: None,
messages: Vec::new(),
next_message_id: MessageId(0),
last_prompt_id: PromptId::new(),
@@ -367,6 +395,7 @@ impl Thread {
last_auto_capture_at: None,
request_callback: None,
remaining_turns: u32::MAX,
configured_model,
}
}
@@ -387,13 +416,31 @@ impl Thread {
.unwrap_or(0),
);
let tool_use = ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages);
let (detailed_summary_tx, detailed_summary_rx) =
postage::watch::channel_with(serialized.detailed_summary_state);
let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
serialized
.model
.and_then(|model| {
let model = SelectedModel {
provider: model.provider.clone().into(),
model: model.model.clone().into(),
};
registry.select_model(&model, cx)
})
.or_else(|| registry.default_model())
});
Self {
id,
updated_at: serialized.updated_at,
summary: Some(serialized.summary),
pending_summary: Task::ready(None),
detailed_summary_state: serialized.detailed_summary_state,
detailed_summary_task: Task::ready(None),
detailed_summary_tx,
detailed_summary_rx,
completion_mode: None,
messages: serialized
.messages
.into_iter()
@@ -442,6 +489,7 @@ impl Thread {
last_auto_capture_at: None,
request_callback: None,
remaining_turns: u32::MAX,
configured_model,
}
}
@@ -481,6 +529,22 @@ impl Thread {
self.project_context.clone()
}
pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
if self.configured_model.is_none() {
self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
}
self.configured_model.clone()
}
pub fn configured_model(&self) -> Option<ConfiguredModel> {
self.configured_model.clone()
}
pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
self.configured_model = model;
cx.notify();
}
pub const DEFAULT_SUMMARY: SharedString = SharedString::new_static("New Thread");
pub fn summary_or_default(&self) -> SharedString {
@@ -505,17 +569,12 @@ impl Thread {
}
}
pub fn latest_detailed_summary_or_text(&self) -> SharedString {
self.latest_detailed_summary()
.unwrap_or_else(|| self.text().into())
pub fn completion_mode(&self) -> Option<CompletionMode> {
self.completion_mode
}
fn latest_detailed_summary(&self) -> Option<SharedString> {
if let DetailedSummaryState::Generated { text, .. } = &self.detailed_summary_state {
Some(text.clone())
} else {
None
}
pub fn set_completion_mode(&mut self, mode: Option<CompletionMode>) {
self.completion_mode = mode;
}
pub fn message(&self, id: MessageId) -> Option<&Message> {
@@ -728,6 +787,32 @@ impl Thread {
self.tool_use.tool_result_card(id).cloned()
}
/// Return tools that are both enabled and supported by the model
pub fn available_tools(
&self,
cx: &App,
model: Arc<dyn LanguageModel>,
) -> Vec<LanguageModelRequestTool> {
if model.supports_tools() {
self.tools()
.read(cx)
.enabled_tools(cx)
.into_iter()
.filter_map(|tool| {
// Skip tools that cannot be supported
let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
Some(LanguageModelRequestTool {
name: tool.name(),
description: tool.description(),
input_schema,
})
})
.collect()
} else {
Vec::default()
}
}
pub fn insert_user_message(
&mut self,
text: impl Into<String>,
@@ -903,8 +988,15 @@ impl Thread {
initial_project_snapshot,
cumulative_token_usage: this.cumulative_token_usage,
request_token_usage: this.request_token_usage.clone(),
detailed_summary_state: this.detailed_summary_state.clone(),
detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
exceeded_window_error: this.exceeded_window_error.clone(),
model: this
.configured_model
.as_ref()
.map(|model| SerializedLanguageModel {
provider: model.provider.id().0.to_string(),
model: model.model.id().0.to_string(),
}),
})
})
}
@@ -929,24 +1021,7 @@ impl Thread {
self.remaining_turns -= 1;
let mut request = self.to_completion_request(cx);
if model.supports_tools() {
request.tools = self
.tools()
.read(cx)
.enabled_tools(cx)
.into_iter()
.filter_map(|tool| {
// Skip tools that cannot be supported
let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
Some(LanguageModelRequestTool {
name: tool.name(),
description: tool.description(),
input_schema,
})
})
.collect();
}
let request = self.to_completion_request(model.clone(), cx);
self.stream_completion(request, model, window, cx);
}
@@ -963,20 +1038,35 @@ impl Thread {
false
}
pub fn to_completion_request(&self, cx: &mut Context<Self>) -> LanguageModelRequest {
pub fn to_completion_request(
&self,
model: Arc<dyn LanguageModel>,
cx: &mut Context<Self>,
) -> LanguageModelRequest {
let mut request = LanguageModelRequest {
thread_id: Some(self.id.to_string()),
prompt_id: Some(self.last_prompt_id.to_string()),
mode: None,
messages: vec![],
tools: Vec::new(),
stop: Vec::new(),
temperature: None,
};
let available_tools = self.available_tools(cx, model.clone());
let available_tool_names = available_tools
.iter()
.map(|tool| tool.name.clone())
.collect();
let model_context = &ModelContext {
available_tools: available_tool_names,
};
if let Some(project_context) = self.project_context.borrow().as_ref() {
match self
.prompt_builder
.generate_assistant_system_prompt(project_context)
.generate_assistant_system_prompt(project_context, model_context)
{
Err(err) => {
let message = format!("{err:?}").into();
@@ -1056,6 +1146,13 @@ impl Thread {
self.attached_tracked_files_state(&mut request.messages, cx);
request.tools = available_tools;
request.mode = if model.supports_max_mode() {
self.completion_mode
} else {
None
};
request
}
@@ -1063,6 +1160,7 @@ impl Thread {
let mut request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: vec![],
tools: Vec::new(),
stop: Vec::new(),
@@ -1356,7 +1454,7 @@ impl Thread {
match result.as_ref() {
Ok(stop_reason) => match stop_reason {
StopReason::ToolUse => {
let tool_uses = thread.use_pending_tools(window, cx);
let tool_uses = thread.use_pending_tools(window, cx, model.clone());
cx.emit(ThreadEvent::UsePendingTools { tool_uses });
}
StopReason::EndTurn => {}
@@ -1496,25 +1594,34 @@ impl Thread {
});
}
pub fn generate_detailed_summary(&mut self, cx: &mut Context<Self>) -> Option<Task<()>> {
let last_message_id = self.messages.last().map(|message| message.id)?;
pub fn start_generating_detailed_summary_if_needed(
&mut self,
thread_store: WeakEntity<ThreadStore>,
cx: &mut Context<Self>,
) {
let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
return;
};
match &self.detailed_summary_state {
match &*self.detailed_summary_rx.borrow() {
DetailedSummaryState::Generating { message_id, .. }
| DetailedSummaryState::Generated { message_id, .. }
if *message_id == last_message_id =>
{
// Already up-to-date
return None;
return;
}
_ => {}
}
let ConfiguredModel { model, provider } =
LanguageModelRegistry::read_global(cx).thread_summary_model()?;
let Some(ConfiguredModel { model, provider }) =
LanguageModelRegistry::read_global(cx).thread_summary_model()
else {
return;
};
if !provider.is_authenticated(cx) {
return None;
return;
}
let added_user_message = "Generate a detailed summary of this conversation. Include:\n\
@@ -1526,16 +1633,24 @@ impl Thread {
let request = self.to_summarize_request(added_user_message.into());
let task = cx.spawn(async move |thread, cx| {
*self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
message_id: last_message_id,
};
// Replace the detailed summarization task if there is one, cancelling it. It would probably
// be better to allow the old task to complete, but this would require logic for choosing
// which result to prefer (the old task could complete after the new one, resulting in a
// stale summary).
self.detailed_summary_task = cx.spawn(async move |thread, cx| {
let stream = model.stream_completion_text(request, &cx);
let Some(mut messages) = stream.await.log_err() else {
thread
.update(cx, |this, _cx| {
this.detailed_summary_state = DetailedSummaryState::NotGenerated;
.update(cx, |thread, _cx| {
*thread.detailed_summary_tx.borrow_mut() =
DetailedSummaryState::NotGenerated;
})
.log_err();
return;
.ok()?;
return None;
};
let mut new_detailed_summary = String::new();
@@ -1547,25 +1662,56 @@ impl Thread {
}
thread
.update(cx, |this, _cx| {
this.detailed_summary_state = DetailedSummaryState::Generated {
.update(cx, |thread, _cx| {
*thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
text: new_detailed_summary.into(),
message_id: last_message_id,
};
})
.log_err();
.ok()?;
// Save thread so its summary can be reused later
if let Some(thread) = thread.upgrade() {
if let Ok(Ok(save_task)) = cx.update(|cx| {
thread_store
.update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
}) {
save_task.await.log_err();
}
}
Some(())
});
}
self.detailed_summary_state = DetailedSummaryState::Generating {
message_id: last_message_id,
};
pub async fn wait_for_detailed_summary_or_text(
this: &Entity<Self>,
cx: &mut AsyncApp,
) -> Option<SharedString> {
let mut detailed_summary_rx = this
.read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
.ok()?;
loop {
match detailed_summary_rx.recv().await? {
DetailedSummaryState::Generating { .. } => {}
DetailedSummaryState::NotGenerated => {
return this.read_with(cx, |this, _cx| this.text().into()).ok();
}
DetailedSummaryState::Generated { text, .. } => return Some(text),
}
}
}
Some(task)
pub fn latest_detailed_summary_or_text(&self) -> SharedString {
self.detailed_summary_rx
.borrow()
.text()
.unwrap_or_else(|| self.text().into())
}
pub fn is_generating_detailed_summary(&self) -> bool {
matches!(
self.detailed_summary_state,
&*self.detailed_summary_rx.borrow(),
DetailedSummaryState::Generating { .. }
)
}
@@ -1574,9 +1720,10 @@ impl Thread {
&mut self,
window: Option<AnyWindowHandle>,
cx: &mut Context<Self>,
model: Arc<dyn LanguageModel>,
) -> Vec<PendingToolUse> {
self.auto_capture_telemetry(cx);
let request = self.to_completion_request(cx);
let request = self.to_completion_request(model, cx);
let messages = Arc::new(request.messages);
let pending_tool_uses = self
.tool_use
@@ -1631,7 +1778,7 @@ impl Thread {
tool_use_id.clone(),
tool_name,
Err(anyhow!("Error parsing input JSON: {error}")),
cx,
self.configured_model.as_ref(),
);
let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
pending_tool_use.ui_text.clone()
@@ -1706,7 +1853,7 @@ impl Thread {
tool_use_id.clone(),
tool_name,
output,
cx,
thread.configured_model.as_ref(),
);
thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
})
@@ -1724,10 +1871,9 @@ impl Thread {
cx: &mut Context<Self>,
) {
if self.all_tools_finished() {
let model_registry = LanguageModelRegistry::read_global(cx);
if let Some(ConfiguredModel { model, .. }) = model_registry.default_model() {
if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
if !canceled {
self.send_to_model(model, window, cx);
self.send_to_model(model.clone(), window, cx);
}
self.auto_capture_telemetry(cx);
}
@@ -1764,6 +1910,14 @@ impl Thread {
canceled
}
/// Signals that any in-progress editing should be canceled.
///
/// This method is used to notify listeners (like ActiveThread) that
/// they should cancel any editing operations.
pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
cx.emit(ThreadEvent::CancelEditing);
}
pub fn feedback(&self) -> Option<ThreadFeedback> {
self.feedback
}
@@ -1964,7 +2118,7 @@ impl Thread {
};
let remote_url = backend.remote_url("origin");
let head_sha = backend.head_sha();
let head_sha = backend.head_sha().await;
let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
GitState {
@@ -2144,8 +2298,8 @@ impl Thread {
self.cumulative_token_usage
}
pub fn token_usage_up_to_message(&self, message_id: MessageId, cx: &App) -> TotalTokenUsage {
let Some(model) = LanguageModelRegistry::read_global(cx).default_model() else {
pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
let Some(model) = self.configured_model.as_ref() else {
return TotalTokenUsage::default();
};
@@ -2173,20 +2327,17 @@ impl Thread {
}
}
pub fn total_token_usage(&self, cx: &App) -> TotalTokenUsage {
let model_registry = LanguageModelRegistry::read_global(cx);
let Some(model) = model_registry.default_model() else {
return TotalTokenUsage::default();
};
pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
let model = self.configured_model.as_ref()?;
let max = model.model.max_token_count();
if let Some(exceeded_error) = &self.exceeded_window_error {
if model.model.id() == exceeded_error.model_id {
return TotalTokenUsage {
return Some(TotalTokenUsage {
total: exceeded_error.token_count,
max,
};
});
}
}
@@ -2195,7 +2346,7 @@ impl Thread {
.unwrap_or_default()
.total_tokens() as usize;
TotalTokenUsage { total, max }
Some(TotalTokenUsage { total, max })
}
fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
@@ -2226,8 +2377,12 @@ impl Thread {
"Permission to run tool action denied by user"
));
self.tool_use
.insert_tool_output(tool_use_id.clone(), tool_name, err, cx);
self.tool_use.insert_tool_output(
tool_use_id.clone(),
tool_name,
err,
self.configured_model.as_ref(),
);
self.tool_finished(tool_use_id.clone(), None, true, window, cx);
}
}
@@ -2282,6 +2437,7 @@ pub enum ThreadEvent {
},
CheckpointChanged,
ToolConfirmationNeeded,
CancelEditing,
}
impl EventEmitter<ThreadEvent> for Thread {}
@@ -2296,9 +2452,11 @@ mod tests {
use super::*;
use crate::{ThreadStore, context::load_context, context_store::ContextStore, thread_store};
use assistant_settings::AssistantSettings;
use assistant_tool::ToolRegistry;
use context_server::ContextServerSettings;
use editor::EditorSettings;
use gpui::TestAppContext;
use language_model::fake_provider::FakeLanguageModel;
use project::{FakeFs, Project};
use prompt_store::PromptBuilder;
use serde_json::json;
@@ -2318,7 +2476,7 @@ mod tests {
)
.await;
let (_workspace, _thread_store, thread, context_store) =
let (_workspace, _thread_store, thread, context_store, model) =
setup_test_environment(cx, project.clone()).await;
add_file_to_context(&project, &context_store, "test/code.rs", cx)
@@ -2369,7 +2527,9 @@ fn main() {{
assert_eq!(message.loaded_context.text, expected_context);
// Check message in request
let request = thread.update(cx, |thread, cx| thread.to_completion_request(cx));
let request = thread.update(cx, |thread, cx| {
thread.to_completion_request(model.clone(), cx)
});
assert_eq!(request.messages.len(), 2);
let expected_full_message = format!("{}Please explain this code", expected_context);
@@ -2390,7 +2550,7 @@ fn main() {{
)
.await;
let (_, _thread_store, thread, context_store) =
let (_, _thread_store, thread, context_store, model) =
setup_test_environment(cx, project.clone()).await;
// First message with context 1
@@ -2461,7 +2621,9 @@ fn main() {{
assert!(message3.loaded_context.text.contains("file3.rs"));
// Check entire request to make sure all contexts are properly included
let request = thread.update(cx, |thread, cx| thread.to_completion_request(cx));
let request = thread.update(cx, |thread, cx| {
thread.to_completion_request(model.clone(), cx)
});
// The request should contain all 3 messages
assert_eq!(request.messages.len(), 4);
@@ -2490,7 +2652,7 @@ fn main() {{
)
.await;
let (_, _thread_store, thread, _context_store) =
let (_, _thread_store, thread, _context_store, model) =
setup_test_environment(cx, project.clone()).await;
// Insert user message without any context (empty context vector)
@@ -2516,7 +2678,9 @@ fn main() {{
assert_eq!(message.loaded_context.text, "");
// Check message in request
let request = thread.update(cx, |thread, cx| thread.to_completion_request(cx));
let request = thread.update(cx, |thread, cx| {
thread.to_completion_request(model.clone(), cx)
});
assert_eq!(request.messages.len(), 2);
assert_eq!(
@@ -2539,7 +2703,9 @@ fn main() {{
assert_eq!(message2.loaded_context.text, "");
// Check that both messages appear in the request
let request = thread.update(cx, |thread, cx| thread.to_completion_request(cx));
let request = thread.update(cx, |thread, cx| {
thread.to_completion_request(model.clone(), cx)
});
assert_eq!(request.messages.len(), 3);
assert_eq!(
@@ -2562,7 +2728,7 @@ fn main() {{
)
.await;
let (_workspace, _thread_store, thread, context_store) =
let (_workspace, _thread_store, thread, context_store, model) =
setup_test_environment(cx, project.clone()).await;
// Open buffer and add it to context
@@ -2581,7 +2747,9 @@ fn main() {{
});
// Create a request and check that it doesn't have a stale buffer warning yet
let initial_request = thread.update(cx, |thread, cx| thread.to_completion_request(cx));
let initial_request = thread.update(cx, |thread, cx| {
thread.to_completion_request(model.clone(), cx)
});
// Make sure we don't have a stale file warning yet
let has_stale_warning = initial_request.messages.iter().any(|msg| {
@@ -2614,7 +2782,9 @@ fn main() {{
});
// Create a new request and check for the stale buffer warning
let new_request = thread.update(cx, |thread, cx| thread.to_completion_request(cx));
let new_request = thread.update(cx, |thread, cx| {
thread.to_completion_request(model.clone(), cx)
});
// We should have a stale file warning as the last message
let last_message = new_request
@@ -2644,9 +2814,11 @@ fn main() {{
prompt_store::init(cx);
thread_store::init(cx);
workspace::init_settings(cx);
language_model::init_settings(cx);
ThemeSettings::register(cx);
ContextServerSettings::register(cx);
EditorSettings::register(cx);
ToolRegistry::default_global(cx);
});
}
@@ -2668,6 +2840,7 @@ fn main() {{
Entity<ThreadStore>,
Entity<Thread>,
Entity<ContextStore>,
Arc<dyn LanguageModel>,
) {
let (workspace, cx) =
cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
@@ -2677,8 +2850,8 @@ fn main() {{
ThreadStore::load(
project.clone(),
cx.new(|_| ToolWorkingSet::default()),
None,
Arc::new(PromptBuilder::new(None).unwrap()),
None,
cx,
)
})
@@ -2688,7 +2861,10 @@ fn main() {{
let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
(workspace, thread_store, thread, context_store)
let model = FakeLanguageModel::default();
let model: Arc<dyn LanguageModel> = Arc::new(model);
(workspace, thread_store, thread, context_store, model)
}
async fn add_file_to_context(

View File

@@ -11,7 +11,6 @@ use chrono::{DateTime, Utc};
use collections::HashMap;
use context_server::manager::ContextServerManager;
use context_server::{ContextServerFactoryRegistry, ContextServerTool};
use fs::Fs;
use futures::channel::{mpsc, oneshot};
use futures::future::{self, BoxFuture, Shared};
use futures::{FutureExt as _, StreamExt as _};
@@ -22,7 +21,7 @@ use gpui::{
use heed::Database;
use heed::types::SerdeBincode;
use language_model::{LanguageModelToolUseId, Role, TokenUsage};
use project::{Project, Worktree};
use project::{Project, ProjectItem, ProjectPath, Worktree};
use prompt_store::{
ProjectContext, PromptBuilder, PromptId, PromptStore, PromptsUpdatedEvent, RulesFileContext,
UserRulesContext, WorktreeContext,
@@ -82,8 +81,8 @@ impl ThreadStore {
pub fn load(
project: Entity<Project>,
tools: Entity<ToolWorkingSet>,
prompt_store: Option<Entity<PromptStore>>,
prompt_builder: Arc<PromptBuilder>,
prompt_store: Option<Entity<PromptStore>>,
cx: &mut App,
) -> Task<Result<Entity<Self>>> {
cx.spawn(async move |cx| {
@@ -207,15 +206,15 @@ impl ThreadStore {
prompt_store: Option<Entity<PromptStore>>,
cx: &mut Context<Self>,
) -> Task<()> {
let project = self.project.read(cx);
let worktree_tasks = project
let worktrees = self
.project
.read(cx)
.visible_worktrees(cx)
.collect::<Vec<_>>();
let worktree_tasks = worktrees
.into_iter()
.map(|worktree| {
Self::load_worktree_info_for_system_prompt(
project.fs().clone(),
worktree.read(cx),
cx,
)
Self::load_worktree_info_for_system_prompt(worktree, self.project.clone(), cx)
})
.collect::<Vec<_>>();
let default_user_rules_task = match prompt_store {
@@ -276,13 +275,13 @@ impl ThreadStore {
}
fn load_worktree_info_for_system_prompt(
fs: Arc<dyn Fs>,
worktree: &Worktree,
cx: &App,
worktree: Entity<Worktree>,
project: Entity<Project>,
cx: &mut App,
) -> Task<(WorktreeContext, Option<RulesLoadingError>)> {
let root_name = worktree.root_name().into();
let root_name = worktree.read(cx).root_name().into();
let rules_task = Self::load_worktree_rules_file(fs, worktree, cx);
let rules_task = Self::load_worktree_rules_file(worktree, project, cx);
let Some(rules_task) = rules_task else {
return Task::ready((
WorktreeContext {
@@ -312,33 +311,44 @@ impl ThreadStore {
}
fn load_worktree_rules_file(
fs: Arc<dyn Fs>,
worktree: &Worktree,
cx: &App,
worktree: Entity<Worktree>,
project: Entity<Project>,
cx: &mut App,
) -> Option<Task<Result<RulesFileContext>>> {
let worktree_ref = worktree.read(cx);
let worktree_id = worktree_ref.id();
let selected_rules_file = RULES_FILE_NAMES
.into_iter()
.filter_map(|name| {
worktree
worktree_ref
.entry_for_path(name)
.filter(|entry| entry.is_file())
.map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
.map(|entry| entry.path.clone())
})
.next();
// Note that Cline supports `.clinerules` being a directory, but that is not currently
// supported. This doesn't seem to occur often in GitHub repositories.
selected_rules_file.map(|(path_in_worktree, abs_path)| {
let fs = fs.clone();
selected_rules_file.map(|path_in_worktree| {
let project_path = ProjectPath {
worktree_id,
path: path_in_worktree.clone(),
};
let buffer_task =
project.update(cx, |project, cx| project.open_buffer(project_path, cx));
let rope_task = cx.spawn(async move |cx| {
buffer_task.await?.read_with(cx, |buffer, cx| {
let project_entry_id = buffer.entry_id(cx).context("buffer has no file")?;
anyhow::Ok((project_entry_id, buffer.as_rope().clone()))
})?
});
// Build a string from the rope on a background thread.
cx.background_spawn(async move {
let abs_path = abs_path?;
let text = fs.load(&abs_path).await.with_context(|| {
format!("Failed to load assistant rules file {:?}", abs_path)
})?;
let (project_entry_id, rope) = rope_task.await?;
anyhow::Ok(RulesFileContext {
path_in_worktree,
abs_path: abs_path.into(),
text: text.trim().to_string(),
text: rope.to_string().trim().to_string(),
project_entry_id: project_entry_id.to_usize(),
})
})
})
@@ -494,6 +504,22 @@ impl ThreadStore {
);
});
}
// Enable all the tools from all context servers, but disable the ones that are explicitly disabled
for (context_server_id, preset) in &profile.context_servers {
self.tools.update(cx, |tools, cx| {
tools.disable(
ToolSource::ContextServer {
id: context_server_id.clone().into(),
},
&preset
.tools
.iter()
.filter_map(|(tool, enabled)| (!enabled).then(|| tool.clone()))
.collect::<Vec<_>>(),
cx,
)
})
}
} else {
for (context_server_id, preset) in &profile.context_servers {
self.tools.update(cx, |tools, cx| {
@@ -614,6 +640,14 @@ pub struct SerializedThread {
pub detailed_summary_state: DetailedSummaryState,
#[serde(default)]
pub exceeded_window_error: Option<ExceededWindowError>,
#[serde(default)]
pub model: Option<SerializedLanguageModel>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SerializedLanguageModel {
pub provider: String,
pub model: String,
}
impl SerializedThread {
@@ -748,6 +782,7 @@ impl LegacySerializedThread {
request_token_usage: Vec::new(),
detailed_summary_state: DetailedSummaryState::default(),
exceeded_window_error: None,
model: None,
}
}
}

View File

@@ -7,7 +7,7 @@ use futures::FutureExt as _;
use futures::future::Shared;
use gpui::{App, Entity, SharedString, Task};
use language_model::{
LanguageModel, LanguageModelRegistry, LanguageModelRequestMessage, LanguageModelToolResult,
ConfiguredModel, LanguageModel, LanguageModelRequestMessage, LanguageModelToolResult,
LanguageModelToolUse, LanguageModelToolUseId, MessageContent, Role,
};
use ui::IconName;
@@ -353,7 +353,7 @@ impl ToolUseState {
tool_use_id: LanguageModelToolUseId,
tool_name: Arc<str>,
output: Result<String>,
cx: &App,
configured_model: Option<&ConfiguredModel>,
) -> Option<PendingToolUse> {
let metadata = self.tool_use_metadata_by_id.remove(&tool_use_id);
@@ -373,13 +373,10 @@ impl ToolUseState {
match output {
Ok(tool_result) => {
let model_registry = LanguageModelRegistry::read_global(cx);
const BYTES_PER_TOKEN_ESTIMATE: usize = 3;
// Protect from clearly large output
let tool_output_limit = model_registry
.default_model()
let tool_output_limit = configured_model
.map(|model| model.model.max_token_count() * BYTES_PER_TOKEN_ESTIMATE)
.unwrap_or(usize::MAX);

View File

@@ -1,13 +1,23 @@
use std::{rc::Rc, time::Duration};
use std::{ops::Range, path::Path, rc::Rc, sync::Arc, time::Duration};
use file_icons::FileIcons;
use gpui::{Animation, AnimationExt as _, ClickEvent, Entity, MouseButton, pulsating_between};
use futures::FutureExt as _;
use gpui::{
Animation, AnimationExt as _, AnyView, ClickEvent, Entity, Image, MouseButton, Task,
pulsating_between,
};
use language_model::LanguageModelImage;
use project::Project;
use prompt_store::PromptStore;
use text::OffsetRangeExt;
use rope::Point;
use ui::{IconButtonShape, Tooltip, prelude::*, tooltip_container};
use crate::context::{AgentContext, ContextKind, ImageStatus};
use crate::context::{
AgentContext, AgentContextHandle, ContextId, ContextKind, DirectoryContext,
DirectoryContextHandle, FetchedUrlContext, FileContext, FileContextHandle, ImageContext,
ImageStatus, RulesContext, RulesContextHandle, SelectionContext, SelectionContextHandle,
SymbolContext, SymbolContextHandle, ThreadContext, ThreadContextHandle,
};
#[derive(IntoElement)]
pub enum ContextPill {
@@ -72,7 +82,7 @@ impl ContextPill {
pub fn id(&self) -> ElementId {
match self {
Self::Added { context, .. } => context.context.element_id("context-pill".into()),
Self::Added { context, .. } => context.handle.element_id("context-pill".into()),
Self::Suggested { .. } => "suggested-context-pill".into(),
}
}
@@ -165,16 +175,11 @@ impl RenderOnce for ContextPill {
.map(|element| match &context.status {
ContextStatus::Ready => element
.when_some(
context.render_preview.as_ref(),
|element, render_preview| {
element.hoverable_tooltip({
let render_preview = render_preview.clone();
move |_, cx| {
cx.new(|_| ContextPillPreview {
render_preview: render_preview.clone(),
})
.into()
}
context.render_hover.as_ref(),
|element, render_hover| {
let render_hover = render_hover.clone();
element.hoverable_tooltip(move |window, cx| {
render_hover(window, cx)
})
},
)
@@ -197,7 +202,7 @@ impl RenderOnce for ContextPill {
.when_some(on_remove.as_ref(), |element, on_remove| {
element.child(
IconButton::new(
context.context.element_id("remove".into()),
context.handle.element_id("remove".into()),
IconName::Close,
)
.shape(IconButtonShape::Square)
@@ -262,18 +267,16 @@ pub enum ContextStatus {
Error { message: SharedString },
}
// TODO: Component commented out due to new dependency on `Project`.
//
// #[derive(RegisterComponent)]
#[derive(RegisterComponent)]
pub struct AddedContext {
pub context: AgentContext,
pub handle: AgentContextHandle,
pub kind: ContextKind,
pub name: SharedString,
pub parent: Option<SharedString>,
pub tooltip: Option<SharedString>,
pub icon_path: Option<SharedString>,
pub status: ContextStatus,
pub render_preview: Option<Rc<dyn Fn(&mut Window, &mut App) -> AnyElement + 'static>>,
pub render_hover: Option<Rc<dyn Fn(&mut Window, &mut App) -> AnyView + 'static>>,
}
impl AddedContext {
@@ -281,221 +284,430 @@ impl AddedContext {
/// `None` if `DirectoryContext` or `RulesContext` no longer exist.
///
/// TODO: `None` cases are unremovable from `ContextStore` and so are a very minor memory leak.
pub fn new(
context: AgentContext,
pub fn new_pending(
handle: AgentContextHandle,
prompt_store: Option<&Entity<PromptStore>>,
project: &Project,
cx: &App,
) -> Option<AddedContext> {
match handle {
AgentContextHandle::File(handle) => Self::pending_file(handle, cx),
AgentContextHandle::Directory(handle) => Self::pending_directory(handle, project, cx),
AgentContextHandle::Symbol(handle) => Self::pending_symbol(handle, cx),
AgentContextHandle::Selection(handle) => Self::pending_selection(handle, cx),
AgentContextHandle::FetchedUrl(handle) => Some(Self::fetched_url(handle)),
AgentContextHandle::Thread(handle) => Some(Self::pending_thread(handle, cx)),
AgentContextHandle::Rules(handle) => Self::pending_rules(handle, prompt_store, cx),
AgentContextHandle::Image(handle) => Some(Self::image(handle)),
}
}
pub fn new_attached(context: &AgentContext, cx: &App) -> AddedContext {
match context {
AgentContext::File(ref file_context) => {
let full_path = file_context.buffer.read(cx).file()?.full_path(cx);
let full_path_string: SharedString =
full_path.to_string_lossy().into_owned().into();
let name = full_path
.file_name()
.map(|n| n.to_string_lossy().into_owned().into())
.unwrap_or_else(|| full_path_string.clone());
let parent = full_path
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().into_owned().into());
Some(AddedContext {
kind: ContextKind::File,
name,
parent,
tooltip: Some(full_path_string),
icon_path: FileIcons::get_icon(&full_path, cx),
status: ContextStatus::Ready,
render_preview: None,
context,
})
}
AgentContext::File(context) => Self::attached_file(context, cx),
AgentContext::Directory(context) => Self::attached_directory(context),
AgentContext::Symbol(context) => Self::attached_symbol(context, cx),
AgentContext::Selection(context) => Self::attached_selection(context, cx),
AgentContext::FetchedUrl(context) => Self::fetched_url(context.clone()),
AgentContext::Thread(context) => Self::attached_thread(context),
AgentContext::Rules(context) => Self::attached_rules(context),
AgentContext::Image(context) => Self::image(context.clone()),
}
}
AgentContext::Directory(ref directory_context) => {
let worktree = project
.worktree_for_entry(directory_context.entry_id, cx)?
.read(cx);
let entry = worktree.entry_for_id(directory_context.entry_id)?;
let full_path = worktree.full_path(&entry.path);
let full_path_string: SharedString =
full_path.to_string_lossy().into_owned().into();
let name = full_path
.file_name()
.map(|n| n.to_string_lossy().into_owned().into())
.unwrap_or_else(|| full_path_string.clone());
let parent = full_path
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().into_owned().into());
Some(AddedContext {
kind: ContextKind::Directory,
name,
parent,
tooltip: Some(full_path_string),
icon_path: None,
status: ContextStatus::Ready,
render_preview: None,
context,
})
}
fn pending_file(handle: FileContextHandle, cx: &App) -> Option<AddedContext> {
let full_path = handle.buffer.read(cx).file()?.full_path(cx);
Some(Self::file(handle, &full_path, cx))
}
AgentContext::Symbol(ref symbol_context) => Some(AddedContext {
kind: ContextKind::Symbol,
name: symbol_context.symbol.clone(),
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_preview: None,
context,
}),
fn attached_file(context: &FileContext, cx: &App) -> AddedContext {
Self::file(context.handle.clone(), &context.full_path, cx)
}
AgentContext::Selection(ref selection_context) => {
let buffer = selection_context.buffer.read(cx);
let full_path = buffer.file()?.full_path(cx);
let mut full_path_string = full_path.to_string_lossy().into_owned();
let mut name = full_path
.file_name()
.map(|n| n.to_string_lossy().into_owned())
.unwrap_or_else(|| full_path_string.clone());
fn file(handle: FileContextHandle, full_path: &Path, cx: &App) -> AddedContext {
let full_path_string: SharedString = full_path.to_string_lossy().into_owned().into();
let name = full_path
.file_name()
.map(|n| n.to_string_lossy().into_owned().into())
.unwrap_or_else(|| full_path_string.clone());
let parent = full_path
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().into_owned().into());
AddedContext {
kind: ContextKind::File,
name,
parent,
tooltip: Some(full_path_string),
icon_path: FileIcons::get_icon(&full_path, cx),
status: ContextStatus::Ready,
render_hover: None,
handle: AgentContextHandle::File(handle),
}
}
let line_range = selection_context.range.to_point(&buffer.snapshot());
fn pending_directory(
handle: DirectoryContextHandle,
project: &Project,
cx: &App,
) -> Option<AddedContext> {
let worktree = project.worktree_for_entry(handle.entry_id, cx)?.read(cx);
let entry = worktree.entry_for_id(handle.entry_id)?;
let full_path = worktree.full_path(&entry.path);
Some(Self::directory(handle, &full_path))
}
let line_range_text =
format!(" ({}-{})", line_range.start.row + 1, line_range.end.row + 1);
fn attached_directory(context: &DirectoryContext) -> AddedContext {
Self::directory(context.handle.clone(), &context.full_path)
}
full_path_string.push_str(&line_range_text);
name.push_str(&line_range_text);
fn directory(handle: DirectoryContextHandle, full_path: &Path) -> AddedContext {
let full_path_string: SharedString = full_path.to_string_lossy().into_owned().into();
let name = full_path
.file_name()
.map(|n| n.to_string_lossy().into_owned().into())
.unwrap_or_else(|| full_path_string.clone());
let parent = full_path
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().into_owned().into());
AddedContext {
kind: ContextKind::Directory,
name,
parent,
tooltip: Some(full_path_string),
icon_path: None,
status: ContextStatus::Ready,
render_hover: None,
handle: AgentContextHandle::Directory(handle),
}
}
let parent = full_path
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().into_owned().into());
fn pending_symbol(handle: SymbolContextHandle, cx: &App) -> Option<AddedContext> {
let excerpt =
ContextFileExcerpt::new(&handle.full_path(cx)?, handle.enclosing_line_range(cx), cx);
Some(AddedContext {
kind: ContextKind::Symbol,
name: handle.symbol.clone(),
parent: Some(excerpt.file_name_and_range.clone()),
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_hover: {
let handle = handle.clone();
Some(Rc::new(move |_, cx| {
excerpt.hover_view(handle.text(cx), cx).into()
}))
},
handle: AgentContextHandle::Symbol(handle),
})
}
Some(AddedContext {
kind: ContextKind::Selection,
name: name.into(),
parent,
tooltip: None,
icon_path: FileIcons::get_icon(&full_path, cx),
status: ContextStatus::Ready,
render_preview: None,
/*
render_preview: Some(Rc::new({
let content = selection_context.text.clone();
move |_, cx| {
div()
.id("context-pill-selection-preview")
.overflow_scroll()
.max_w_128()
.max_h_96()
.child(Label::new(content.clone()).buffer_font(cx))
.into_any_element()
}
})),
*/
context,
})
}
fn attached_symbol(context: &SymbolContext, cx: &App) -> AddedContext {
let excerpt = ContextFileExcerpt::new(&context.full_path, context.line_range.clone(), cx);
AddedContext {
kind: ContextKind::Symbol,
name: context.handle.symbol.clone(),
parent: Some(excerpt.file_name_and_range.clone()),
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_hover: {
let text = context.text.clone();
Some(Rc::new(move |_, cx| {
excerpt.hover_view(text.clone(), cx).into()
}))
},
handle: AgentContextHandle::Symbol(context.handle.clone()),
}
}
AgentContext::FetchedUrl(ref fetched_url_context) => Some(AddedContext {
kind: ContextKind::FetchedUrl,
name: fetched_url_context.url.clone(),
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_preview: None,
context,
}),
fn pending_selection(handle: SelectionContextHandle, cx: &App) -> Option<AddedContext> {
let excerpt = ContextFileExcerpt::new(&handle.full_path(cx)?, handle.line_range(cx), cx);
Some(AddedContext {
kind: ContextKind::Selection,
name: excerpt.file_name_and_range.clone(),
parent: excerpt.parent_name.clone(),
tooltip: None,
icon_path: excerpt.icon_path.clone(),
status: ContextStatus::Ready,
render_hover: {
let handle = handle.clone();
Some(Rc::new(move |_, cx| {
excerpt.hover_view(handle.text(cx), cx).into()
}))
},
handle: AgentContextHandle::Selection(handle),
})
}
AgentContext::Thread(ref thread_context) => Some(AddedContext {
kind: ContextKind::Thread,
name: thread_context.name(cx),
parent: None,
tooltip: None,
icon_path: None,
status: if thread_context
.thread
.read(cx)
.is_generating_detailed_summary()
{
ContextStatus::Loading {
message: "Summarizing…".into(),
}
} else {
ContextStatus::Ready
fn attached_selection(context: &SelectionContext, cx: &App) -> AddedContext {
let excerpt = ContextFileExcerpt::new(&context.full_path, context.line_range.clone(), cx);
AddedContext {
kind: ContextKind::Selection,
name: excerpt.file_name_and_range.clone(),
parent: excerpt.parent_name.clone(),
tooltip: None,
icon_path: excerpt.icon_path.clone(),
status: ContextStatus::Ready,
render_hover: {
let text = context.text.clone();
Some(Rc::new(move |_, cx| {
excerpt.hover_view(text.clone(), cx).into()
}))
},
handle: AgentContextHandle::Selection(context.handle.clone()),
}
}
fn fetched_url(context: FetchedUrlContext) -> AddedContext {
AddedContext {
kind: ContextKind::FetchedUrl,
name: context.url.clone(),
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_hover: None,
handle: AgentContextHandle::FetchedUrl(context),
}
}
fn pending_thread(handle: ThreadContextHandle, cx: &App) -> AddedContext {
AddedContext {
kind: ContextKind::Thread,
name: handle.title(cx),
parent: None,
tooltip: None,
icon_path: None,
status: if handle.thread.read(cx).is_generating_detailed_summary() {
ContextStatus::Loading {
message: "Summarizing…".into(),
}
} else {
ContextStatus::Ready
},
render_hover: {
let thread = handle.thread.clone();
Some(Rc::new(move |_, cx| {
let text = thread.read(cx).latest_detailed_summary_or_text();
ContextPillHover::new_text(text.clone(), cx).into()
}))
},
handle: AgentContextHandle::Thread(handle),
}
}
fn attached_thread(context: &ThreadContext) -> AddedContext {
AddedContext {
kind: ContextKind::Thread,
name: context.title.clone(),
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_hover: {
let text = context.text.clone();
Some(Rc::new(move |_, cx| {
ContextPillHover::new_text(text.clone(), cx).into()
}))
},
handle: AgentContextHandle::Thread(context.handle.clone()),
}
}
fn pending_rules(
handle: RulesContextHandle,
prompt_store: Option<&Entity<PromptStore>>,
cx: &App,
) -> Option<AddedContext> {
let title = prompt_store
.as_ref()?
.read(cx)
.metadata(handle.prompt_id.into())?
.title
.unwrap_or_else(|| "Unnamed Rule".into());
Some(AddedContext {
kind: ContextKind::Rules,
name: title.clone(),
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_hover: None,
handle: AgentContextHandle::Rules(handle),
})
}
fn attached_rules(context: &RulesContext) -> AddedContext {
let title = context
.title
.clone()
.unwrap_or_else(|| "Unnamed Rule".into());
AddedContext {
kind: ContextKind::Rules,
name: title,
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_hover: {
let text = context.text.clone();
Some(Rc::new(move |_, cx| {
ContextPillHover::new_text(text.clone(), cx).into()
}))
},
handle: AgentContextHandle::Rules(context.handle.clone()),
}
}
fn image(context: ImageContext) -> AddedContext {
AddedContext {
kind: ContextKind::Image,
name: "Image".into(),
parent: None,
tooltip: None,
icon_path: None,
status: match context.status() {
ImageStatus::Loading => ContextStatus::Loading {
message: "Loading…".into(),
},
render_preview: None,
context,
}),
AgentContext::Rules(ref user_rules_context) => {
let name = prompt_store
.as_ref()?
.read(cx)
.metadata(user_rules_context.prompt_id.into())?
.title?;
Some(AddedContext {
kind: ContextKind::Rules,
name: name.clone(),
parent: None,
tooltip: None,
icon_path: None,
status: ContextStatus::Ready,
render_preview: None,
context,
})
}
AgentContext::Image(ref image_context) => Some(AddedContext {
kind: ContextKind::Image,
name: "Image".into(),
parent: None,
tooltip: None,
icon_path: None,
status: match image_context.status() {
ImageStatus::Loading => ContextStatus::Loading {
message: "Loading…".into(),
},
ImageStatus::Error => ContextStatus::Error {
message: "Failed to load image".into(),
},
ImageStatus::Ready => ContextStatus::Ready,
ImageStatus::Error => ContextStatus::Error {
message: "Failed to load image".into(),
},
render_preview: Some(Rc::new({
let image = image_context.original_image.clone();
move |_, _| {
ImageStatus::Ready => ContextStatus::Ready,
},
render_hover: Some(Rc::new({
let image = context.original_image.clone();
move |_, cx| {
let image = image.clone();
ContextPillHover::new(cx, move |_, _| {
gpui::img(image.clone())
.max_w_96()
.max_h_96()
.into_any_element()
}
})),
context,
}),
})
.into()
}
})),
handle: AgentContextHandle::Image(context),
}
}
}
struct ContextPillPreview {
render_preview: Rc<dyn Fn(&mut Window, &mut App) -> AnyElement>,
#[derive(Debug, Clone)]
struct ContextFileExcerpt {
pub file_name_and_range: SharedString,
pub full_path_and_range: SharedString,
pub parent_name: Option<SharedString>,
pub icon_path: Option<SharedString>,
}
impl Render for ContextPillPreview {
impl ContextFileExcerpt {
pub fn new(full_path: &Path, line_range: Range<Point>, cx: &App) -> Self {
let full_path_string = full_path.to_string_lossy().into_owned();
let file_name = full_path
.file_name()
.map(|n| n.to_string_lossy().into_owned())
.unwrap_or_else(|| full_path_string.clone());
let line_range_text = format!(" ({}-{})", line_range.start.row + 1, line_range.end.row + 1);
let mut full_path_and_range = full_path_string;
full_path_and_range.push_str(&line_range_text);
let mut file_name_and_range = file_name;
file_name_and_range.push_str(&line_range_text);
let parent_name = full_path
.parent()
.and_then(|p| p.file_name())
.map(|n| n.to_string_lossy().into_owned().into());
let icon_path = FileIcons::get_icon(&full_path, cx);
ContextFileExcerpt {
file_name_and_range: file_name_and_range.into(),
full_path_and_range: full_path_and_range.into(),
parent_name,
icon_path,
}
}
fn hover_view(&self, text: SharedString, cx: &mut App) -> Entity<ContextPillHover> {
let icon_path = self.icon_path.clone();
let full_path_and_range = self.full_path_and_range.clone();
ContextPillHover::new(cx, move |_, cx| {
v_flex()
.child(
h_flex()
.gap_0p5()
.w_full()
.max_w_full()
.border_b_1()
.border_color(cx.theme().colors().border.opacity(0.6))
.children(
icon_path
.clone()
.map(Icon::from_path)
.map(|icon| icon.color(Color::Muted).size(IconSize::XSmall)),
)
.child(
// TODO: make this truncate on the left.
Label::new(full_path_and_range.clone())
.size(LabelSize::Small)
.ml_1(),
),
)
.child(
div()
.id("context-pill-hover-contents")
.overflow_scroll()
.max_w_128()
.max_h_96()
.child(Label::new(text.clone()).buffer_font(cx)),
)
.into_any_element()
})
}
}
struct ContextPillHover {
render_hover: Box<dyn Fn(&mut Window, &mut App) -> AnyElement>,
}
impl ContextPillHover {
fn new(
cx: &mut App,
render_hover: impl Fn(&mut Window, &mut App) -> AnyElement + 'static,
) -> Entity<Self> {
cx.new(|_| Self {
render_hover: Box::new(render_hover),
})
}
fn new_text(content: SharedString, cx: &mut App) -> Entity<Self> {
Self::new(cx, move |_, _| {
div()
.id("context-pill-hover-contents")
.overflow_scroll()
.max_w_128()
.max_h_96()
.child(content.clone())
.into_any_element()
})
}
}
impl Render for ContextPillHover {
fn render(&mut self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
tooltip_container(window, cx, move |this, window, cx| {
this.occlude()
.on_mouse_move(|_, _, cx| cx.stop_propagation())
.on_mouse_down(MouseButton::Left, |_, _, cx| cx.stop_propagation())
.child((self.render_preview)(window, cx))
.child((self.render_hover)(window, cx))
})
}
}
// TODO: Component commented out due to new dependency on `Project`.
/*
impl Component for AddedContext {
fn scope() -> ComponentScope {
ComponentScope::Agent
@@ -505,47 +717,41 @@ impl Component for AddedContext {
"AddedContext"
}
fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
let next_context_id = ContextId::zero();
fn preview(_window: &mut Window, cx: &mut App) -> Option<AnyElement> {
let mut next_context_id = ContextId::zero();
let image_ready = (
"Ready",
AddedContext::new(
AgentContext::Image(ImageContext {
context_id: next_context_id.post_inc(),
original_image: Arc::new(Image::empty()),
image_task: Task::ready(Some(LanguageModelImage::empty())).shared(),
}),
cx,
),
AddedContext::image(ImageContext {
context_id: next_context_id.post_inc(),
project_path: None,
original_image: Arc::new(Image::empty()),
image_task: Task::ready(Some(LanguageModelImage::empty())).shared(),
}),
);
let image_loading = (
"Loading",
AddedContext::new(
AgentContext::Image(ImageContext {
context_id: next_context_id.post_inc(),
original_image: Arc::new(Image::empty()),
image_task: cx
.background_spawn(async move {
smol::Timer::after(Duration::from_secs(60 * 5)).await;
Some(LanguageModelImage::empty())
})
.shared(),
}),
cx,
),
AddedContext::image(ImageContext {
context_id: next_context_id.post_inc(),
project_path: None,
original_image: Arc::new(Image::empty()),
image_task: cx
.background_spawn(async move {
smol::Timer::after(Duration::from_secs(60 * 5)).await;
Some(LanguageModelImage::empty())
})
.shared(),
}),
);
let image_error = (
"Error",
AddedContext::new(
AgentContext::Image(ImageContext {
context_id: next_context_id.post_inc(),
original_image: Arc::new(Image::empty()),
image_task: Task::ready(None).shared(),
}),
cx,
),
AddedContext::image(ImageContext {
context_id: next_context_id.post_inc(),
project_path: None,
original_image: Arc::new(Image::empty()),
image_task: Task::ready(None).shared(),
}),
);
Some(
@@ -563,8 +769,5 @@ impl Component for AddedContext {
)
.into_any(),
)
None
}
}
*/

View File

@@ -98,6 +98,10 @@ impl RenderOnce for UsageBanner {
}
impl Component for UsageBanner {
fn scope() -> ComponentScope {
ComponentScope::Agent
}
fn sort_name() -> &'static str {
"AgentUsageBanner"
}

View File

@@ -15,6 +15,7 @@ path = "src/askpass.rs"
anyhow.workspace = true
futures.workspace = true
gpui.workspace = true
shlex.workspace = true
smol.workspace = true
tempfile.workspace = true
util.workspace = true

View File

@@ -72,8 +72,7 @@ impl AskPassSession {
let (askpass_opened_tx, askpass_opened_rx) = oneshot::channel::<()>();
let listener =
UnixListener::bind(&askpass_socket).context("failed to create askpass socket")?;
let zed_path = std::env::current_exe()
.context("Failed to figure out current executable path for use in askpass")?;
let zed_path = get_shell_safe_zed_path()?;
let (askpass_kill_master_tx, askpass_kill_master_rx) = oneshot::channel::<()>();
let mut kill_tx = Some(askpass_kill_master_tx);
@@ -115,7 +114,7 @@ impl AskPassSession {
// Create an askpass script that communicates back to this process.
let askpass_script = format!(
"{shebang}\n{print_args} | {zed_exe} --askpass={askpass_socket} 2> /dev/null \n",
zed_exe = zed_path.display(),
zed_exe = zed_path,
askpass_socket = askpass_socket.display(),
print_args = "printf '%s\\0' \"$@\"",
shebang = "#!/bin/sh",
@@ -161,6 +160,32 @@ impl AskPassSession {
}
}
#[cfg(unix)]
fn get_shell_safe_zed_path() -> anyhow::Result<String> {
let zed_path = std::env::current_exe()
.context("Failed to figure out current executable path for use in askpass")?
.to_string_lossy()
.to_string();
// sanity check on unix systems that the path exists and is executable
// todo(windows): implement this check for windows (or just use `is-executable` crate)
use std::os::unix::fs::MetadataExt;
let metadata = std::fs::metadata(&zed_path)
.context("Failed to check metadata of Zed executable path for use in askpass")?;
let is_executable = metadata.is_file() && metadata.mode() & 0o111 != 0;
anyhow::ensure!(
is_executable,
"Failed to verify Zed executable path for use in askpass"
);
// As of writing, this can only be fail if the path contains a null byte, which shouldn't be possible
// but shlex has annotated the error as #[non_exhaustive] so we can't make it a compile error if other
// errors are introduced in the future :(
let zed_path_escaped = shlex::try_quote(&zed_path)
.context("Failed to shell-escape Zed executable path for use in askpass")?;
return Ok(zed_path_escaped.to_string());
}
/// The main function for when Zed is running in netcat mode for use in askpass.
/// Called from both the remote server binary and the zed binary in their respective main functions.
#[cfg(unix)]

View File

@@ -193,7 +193,7 @@ impl Focusable for ConfigurationView {
impl Item for ConfigurationView {
type Event = ConfigurationViewEvent;
fn tab_content_text(&self, _window: &Window, _cx: &App) -> Option<SharedString> {
Some("Configuration".into())
fn tab_content_text(&self, _detail: usize, _cx: &App) -> SharedString {
"Configuration".into()
}
}

View File

@@ -476,7 +476,7 @@ impl AssistantPanel {
{
return;
}
context.custom_summary(new_summary, cx)
context.set_custom_summary(new_summary, cx)
});
});
}

View File

@@ -37,7 +37,7 @@ use language_model::{
ConfiguredModel, LanguageModel, LanguageModelRegistry, LanguageModelRequest,
LanguageModelRequestMessage, LanguageModelTextStream, Role, report_assistant_event,
};
use language_model_selector::{LanguageModelSelector, LanguageModelSelectorPopoverMenu, ModelType};
use language_model_selector::{LanguageModelSelector, LanguageModelSelectorPopoverMenu};
use multi_buffer::MultiBufferRow;
use parking_lot::Mutex;
use project::{CodeAction, LspAction, ProjectTransaction};
@@ -1759,6 +1759,7 @@ impl PromptEditor {
language_model_selector: cx.new(|cx| {
let fs = fs.clone();
LanguageModelSelector::new(
|cx| LanguageModelRegistry::read_global(cx).default_model(),
move |model, cx| {
update_settings_file::<AssistantSettings>(
fs.clone(),
@@ -1766,7 +1767,6 @@ impl PromptEditor {
move |settings, _| settings.set_model(model.clone()),
);
},
ModelType::Default,
window,
cx,
)
@@ -2981,6 +2981,7 @@ impl CodegenAlternative {
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages,
tools: Vec::new(),
stop: Vec::new(),
@@ -3023,7 +3024,7 @@ impl CodegenAlternative {
}
}
let http_client = cx.http_client().clone();
let http_client = cx.http_client();
let telemetry = self.telemetry.clone();
let language_name = {
let multibuffer = self.buffer.read(cx);

View File

@@ -19,7 +19,7 @@ use language_model::{
ConfiguredModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
Role, report_assistant_event,
};
use language_model_selector::{LanguageModelSelector, LanguageModelSelectorPopoverMenu, ModelType};
use language_model_selector::{LanguageModelSelector, LanguageModelSelectorPopoverMenu};
use prompt_store::PromptBuilder;
use settings::{Settings, update_settings_file};
use std::{
@@ -294,6 +294,7 @@ impl TerminalInlineAssistant {
Ok(LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages,
tools: Vec::new(),
stop: Vec::new(),
@@ -748,6 +749,7 @@ impl PromptEditor {
language_model_selector: cx.new(|cx| {
let fs = fs.clone();
LanguageModelSelector::new(
|cx| LanguageModelRegistry::read_global(cx).default_model(),
move |model, cx| {
update_settings_file::<AssistantSettings>(
fs.clone(),
@@ -755,7 +757,6 @@ impl PromptEditor {
move |settings, _| settings.set_model(model.clone()),
);
},
ModelType::Default,
window,
cx,
)

View File

@@ -459,6 +459,7 @@ pub enum ContextEvent {
ShowMaxMonthlySpendReachedError,
MessagesEdited,
SummaryChanged,
SummaryGenerated,
StreamedCompletion,
StartedThoughtProcess(Range<language::Anchor>),
EndedThoughtProcess(language::Anchor),
@@ -482,7 +483,7 @@ pub enum ContextEvent {
#[derive(Clone, Default, Debug)]
pub struct ContextSummary {
pub text: String,
done: bool,
pub done: bool,
timestamp: clock::Lamport,
}
@@ -640,7 +641,7 @@ pub struct AssistantContext {
contents: Vec<Content>,
messages_metadata: HashMap<MessageId, MessageMetadata>,
summary: Option<ContextSummary>,
pending_summary: Task<Option<()>>,
summary_task: Task<Option<()>>,
completion_count: usize,
pending_completions: Vec<PendingCompletion>,
token_count: Option<usize>,
@@ -741,7 +742,7 @@ impl AssistantContext {
thought_process_output_sections: Vec::new(),
edits_since_last_parse: edits_since_last_slash_command_parse,
summary: None,
pending_summary: Task::ready(None),
summary_task: Task::ready(None),
completion_count: Default::default(),
pending_completions: Default::default(),
token_count: None,
@@ -951,7 +952,7 @@ impl AssistantContext {
fn flush_ops(&mut self, cx: &mut Context<AssistantContext>) {
let mut changed_messages = HashSet::default();
let mut summary_changed = false;
let mut summary_generated = false;
self.pending_ops.sort_unstable_by_key(|op| op.timestamp());
for op in mem::take(&mut self.pending_ops) {
@@ -993,7 +994,7 @@ impl AssistantContext {
.map_or(true, |summary| new_summary.timestamp > summary.timestamp)
{
self.summary = Some(new_summary);
summary_changed = true;
summary_generated = true;
}
}
ContextOperation::SlashCommandStarted {
@@ -1072,8 +1073,9 @@ impl AssistantContext {
cx.notify();
}
if summary_changed {
if summary_generated {
cx.emit(ContextEvent::SummaryChanged);
cx.emit(ContextEvent::SummaryGenerated);
cx.notify();
}
}
@@ -2557,6 +2559,7 @@ impl AssistantContext {
let mut completion_request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
mode: None,
messages: Vec::new(),
tools: Vec::new(),
stop: Vec::new(),
@@ -2611,7 +2614,9 @@ impl AssistantContext {
.map(MessageContent::Text),
);
completion_request.messages.push(request_message);
if !request_message.contents_empty() {
completion_request.messages.push(request_message);
}
}
if let RequestType::SuggestEdits = request_type {
@@ -2945,7 +2950,7 @@ impl AssistantContext {
self.message_anchors.insert(insertion_ix, new_anchor);
}
pub fn summarize(&mut self, replace_old: bool, cx: &mut Context<Self>) {
pub fn summarize(&mut self, mut replace_old: bool, cx: &mut Context<Self>) {
let Some(model) = LanguageModelRegistry::read_global(cx).default_model() else {
return;
};
@@ -2965,7 +2970,18 @@ impl AssistantContext {
cache: false,
});
self.pending_summary = cx.spawn(async move |this, cx| {
// If there is no summary, it is set with `done: false` so that "Loading Summary…" can
// be displayed.
if self.summary.is_none() {
self.summary = Some(ContextSummary {
text: "".to_string(),
done: false,
timestamp: clock::Lamport::default(),
});
replace_old = true;
}
self.summary_task = cx.spawn(async move |this, cx| {
async move {
let stream = model.model.stream_completion_text(request, &cx);
let mut messages = stream.await?;
@@ -2990,6 +3006,7 @@ impl AssistantContext {
};
this.push_op(operation, cx);
cx.emit(ContextEvent::SummaryChanged);
cx.emit(ContextEvent::SummaryGenerated);
})?;
// Stop if the LLM generated multiple lines.
@@ -3010,6 +3027,7 @@ impl AssistantContext {
};
this.push_op(operation, cx);
cx.emit(ContextEvent::SummaryChanged);
cx.emit(ContextEvent::SummaryGenerated);
}
})?;
@@ -3182,7 +3200,7 @@ impl AssistantContext {
});
}
pub fn custom_summary(&mut self, custom_summary: String, cx: &mut Context<Self>) {
pub fn set_custom_summary(&mut self, custom_summary: String, cx: &mut Context<Self>) {
let timestamp = self.next_timestamp();
let summary = self.summary.get_or_insert(ContextSummary::default());
summary.timestamp = timestamp;
@@ -3190,6 +3208,15 @@ impl AssistantContext {
summary.text = custom_summary;
cx.emit(ContextEvent::SummaryChanged);
}
pub const DEFAULT_SUMMARY: SharedString = SharedString::new_static("New Text Thread");
pub fn summary_or_default(&self) -> SharedString {
self.summary
.as_ref()
.map(|summary| summary.text.clone().into())
.unwrap_or(Self::DEFAULT_SUMMARY)
}
}
fn trimmed_text_in_range(buffer: &BufferSnapshot, range: Range<text::Anchor>) -> String {

View File

@@ -39,7 +39,7 @@ use language_model::{
Role,
};
use language_model_selector::{
LanguageModelSelector, LanguageModelSelectorPopoverMenu, ModelType, ToggleModelSelector,
LanguageModelSelector, LanguageModelSelectorPopoverMenu, ToggleModelSelector,
};
use multi_buffer::MultiBufferRow;
use picker::Picker;
@@ -48,7 +48,7 @@ use project::{Project, Worktree};
use rope::Point;
use serde::{Deserialize, Serialize};
use settings::{Settings, SettingsStore, update_settings_file};
use std::{any::TypeId, borrow::Cow, cmp, ops::Range, path::PathBuf, sync::Arc, time::Duration};
use std::{any::TypeId, cmp, ops::Range, path::PathBuf, sync::Arc, time::Duration};
use text::SelectionGoal;
use ui::{
ButtonLike, Disclosure, ElevationIndex, KeyBinding, PopoverMenuHandle, TintColor, Tooltip,
@@ -291,6 +291,7 @@ impl ContextEditor {
dragged_file_worktrees: Vec::new(),
language_model_selector: cx.new(|cx| {
LanguageModelSelector::new(
|cx| LanguageModelRegistry::read_global(cx).default_model(),
move |model, cx| {
update_settings_file::<AssistantSettings>(
fs.clone(),
@@ -298,7 +299,6 @@ impl ContextEditor {
move |settings, _| settings.set_model(model.clone()),
);
},
ModelType::Default,
window,
cx,
)
@@ -618,6 +618,7 @@ impl ContextEditor {
context.save(Some(Duration::from_millis(500)), self.fs.clone(), cx);
});
}
ContextEvent::SummaryGenerated => {}
ContextEvent::StartedThoughtProcess(range) => {
let creases = self.insert_thought_process_output_sections(
[(
@@ -2179,13 +2180,8 @@ impl ContextEditor {
});
}
pub fn title(&self, cx: &App) -> Cow<str> {
self.context
.read(cx)
.summary()
.map(|summary| summary.text.clone())
.map(Cow::Owned)
.unwrap_or_else(|| Cow::Borrowed(DEFAULT_TAB_TITLE))
pub fn title(&self, cx: &App) -> SharedString {
self.context.read(cx).summary_or_default()
}
fn render_patch_block(
@@ -3160,8 +3156,8 @@ impl Focusable for ContextEditor {
impl Item for ContextEditor {
type Event = editor::EditorEvent;
fn tab_content_text(&self, _window: &Window, cx: &App) -> Option<SharedString> {
Some(util::truncate_and_trailoff(&self.title(cx), MAX_TAB_TITLE_LEN).into())
fn tab_content_text(&self, _detail: usize, cx: &App) -> SharedString {
util::truncate_and_trailoff(&self.title(cx), MAX_TAB_TITLE_LEN).into()
}
fn to_item_events(event: &Self::Event, mut f: impl FnMut(item::ItemEvent)) {
@@ -3768,7 +3764,7 @@ pub fn make_lsp_adapter_delegate(
let Some(worktree) = project.worktrees(cx).next() else {
return Ok(None::<Arc<dyn LspAdapterDelegate>>);
};
let http_client = project.client().http_client().clone();
let http_client = project.client().http_client();
project.lsp_store().update(cx, |_, cx| {
Ok(Some(LocalLspAdapterDelegate::new(
project.languages().clone(),

View File

@@ -108,8 +108,8 @@ impl EventEmitter<()> for ContextHistory {}
impl Item for ContextHistory {
type Event = ();
fn tab_content_text(&self, _window: &Window, _cx: &App) -> Option<SharedString> {
Some("History".into())
fn tab_content_text(&self, _detail: usize, _cx: &App) -> SharedString {
"History".into()
}
}

View File

@@ -83,7 +83,7 @@ impl DocsSlashCommand {
.upgrade()
.ok_or_else(|| anyhow!("workspace was dropped"))?;
let project = workspace.read(cx).project().clone();
anyhow::Ok(project.read(cx).client().http_client().clone())
anyhow::Ok(project.read(cx).client().http_client())
});
if let Some(http_client) = http_client.log_err() {

View File

@@ -35,7 +35,15 @@ fn adapt_to_json_schema_subset(json: &mut Value) -> Result<()> {
}
}
obj.remove("format");
const KEYS_TO_REMOVE: [&str; 4] = [
"format",
"additionalProperties",
"exclusiveMinimum",
"exclusiveMaximum",
];
for key in KEYS_TO_REMOVE {
obj.remove(key);
}
if let Some(default) = obj.get("default") {
let is_null = default.is_null();
@@ -47,7 +55,7 @@ fn adapt_to_json_schema_subset(json: &mut Value) -> Result<()> {
}
// If a type is not specified for an input parameter, add a default type
if obj.contains_key("description")
if matches!(obj.get("description"), Some(Value::String(_)))
&& !obj.contains_key("type")
&& !(obj.contains_key("anyOf")
|| obj.contains_key("oneOf")
@@ -119,14 +127,37 @@ mod tests {
"type": "string"
})
);
// Ensure that we do not add a type if it is an object
let mut json = json!({
"description": {
"value": "abc",
"type": "string"
}
});
adapt_to_json_schema_subset(&mut json).unwrap();
assert_eq!(
json,
json!({
"description": {
"value": "abc",
"type": "string"
}
})
);
}
#[test]
fn test_transform_removes_format() {
fn test_transform_removes_unsupported_keys() {
let mut json = json!({
"description": "A test field",
"type": "integer",
"format": "uint32"
"format": "uint32",
"exclusiveMinimum": 0,
"exclusiveMaximum": 100,
"additionalProperties": false
});
adapt_to_json_schema_subset(&mut json).unwrap();

View File

@@ -21,7 +21,6 @@ component.workspace = true
editor.workspace = true
futures.workspace = true
gpui.workspace = true
handlebars = { workspace = true, features = ["rust-embed"] }
html_to_markdown.workspace = true
http_client.workspace = true
indoc.workspace = true
@@ -32,17 +31,14 @@ linkme.workspace = true
open.workspace = true
project.workspace = true
regex.workspace = true
rust-embed.workspace = true
schemars.workspace = true
serde.workspace = true
serde_json.workspace = true
smallvec.workspace = true
ui.workspace = true
util.workspace = true
web_search.workspace = true
workspace.workspace = true
workspace-hack.workspace = true
worktree.workspace = true
workspace.workspace = true
zed_llm_client.workspace = true
[dev-dependencies]
@@ -50,14 +46,10 @@ client = { workspace = true, features = ["test-support"] }
clock = { workspace = true, features = ["test-support"] }
collections = { workspace = true, features = ["test-support"] }
gpui = { workspace = true, features = ["test-support"] }
gpui_tokio.workspace = true
fs = { workspace = true, features = ["test-support"] }
language = { workspace = true, features = ["test-support"] }
language_models.workspace = true
project = { workspace = true, features = ["test-support"] }
rand.workspace = true
pretty_assertions.workspace = true
reqwest_client.workspace = true
settings = { workspace = true, features = ["test-support"] }
tree-sitter-rust.workspace = true
workspace = { workspace = true, features = ["test-support"] }

View File

@@ -7,7 +7,6 @@ mod create_directory_tool;
mod create_file_tool;
mod delete_path_tool;
mod diagnostics_tool;
mod edit_agent;
mod edit_file_tool;
mod fetch_tool;
mod find_path_tool;
@@ -18,9 +17,9 @@ mod now_tool;
mod open_tool;
mod read_file_tool;
mod rename_tool;
mod replace;
mod schema;
mod symbol_info_tool;
mod templates;
mod terminal_tool;
mod thinking_tool;
mod ui;
@@ -36,8 +35,6 @@ use language_model::LanguageModelRegistry;
use move_path_tool::MovePathTool;
use web_search_tool::WebSearchTool;
pub(crate) use templates::*;
use crate::batch_tool::BatchTool;
use crate::code_action_tool::CodeActionTool;
use crate::code_symbols_tool::CodeSymbolsTool;

View File

@@ -14,7 +14,7 @@ use regex::{Regex, RegexBuilder};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CodeSymbolsInput {
@@ -102,7 +102,7 @@ impl Tool for CodeSymbolsTool {
match &input.path {
Some(path) => {
let path = MarkdownString::inline_code(path);
let path = MarkdownInlineCode(path);
if page > 1 {
format!("List page {page} of code symbols for {path}")
} else {

View File

@@ -11,7 +11,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{fmt::Write, path::Path};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
/// If the model requests to read a file whose size exceeds this, then
/// the tool will return the file's symbol outline instead of its contents,
@@ -82,7 +82,7 @@ impl Tool for ContentsTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<ContentsToolInput>(input.clone()) {
Ok(input) => {
let path = MarkdownString::inline_code(&input.path);
let path = MarkdownInlineCode(&input.path);
match (input.start, input.end) {
(Some(start), None) => format!("Read {path} (from line {start})"),

View File

@@ -10,7 +10,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CopyPathToolInput {
@@ -63,8 +63,8 @@ impl Tool for CopyPathTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<CopyPathToolInput>(input.clone()) {
Ok(input) => {
let src = MarkdownString::inline_code(&input.source_path);
let dest = MarkdownString::inline_code(&input.destination_path);
let src = MarkdownInlineCode(&input.source_path);
let dest = MarkdownInlineCode(&input.destination_path);
format!("Copy {src} to {dest}")
}
Err(_) => "Copy path".to_string(),

View File

@@ -10,7 +10,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CreateDirectoryToolInput {
@@ -53,10 +53,7 @@ impl Tool for CreateDirectoryTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<CreateDirectoryToolInput>(input.clone()) {
Ok(input) => {
format!(
"Create directory {}",
MarkdownString::inline_code(&input.path)
)
format!("Create directory {}", MarkdownInlineCode(&input.path))
}
Err(_) => "Create directory".to_string(),
}

View File

@@ -10,7 +10,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct CreateFileToolInput {
@@ -73,7 +73,7 @@ impl Tool for CreateFileTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<CreateFileToolInput>(input.clone()) {
Ok(input) => {
let path = MarkdownString::inline_code(&input.path);
let path = MarkdownInlineCode(&input.path);
format!("Create file {path}")
}
Err(_) => DEFAULT_UI_TEXT.to_string(),

View File

@@ -9,7 +9,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{fmt::Write, path::Path, sync::Arc};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct DiagnosticsToolInput {
@@ -66,11 +66,11 @@ impl Tool for DiagnosticsTool {
if let Some(path) = serde_json::from_value::<DiagnosticsToolInput>(input.clone())
.ok()
.and_then(|input| match input.path {
Some(path) if !path.is_empty() => Some(MarkdownString::inline_code(&path)),
Some(path) if !path.is_empty() => Some(path),
_ => None,
})
{
format!("Check diagnostics for {path}")
format!("Check diagnostics for {}", MarkdownInlineCode(&path))
} else {
"Check project diagnostics".to_string()
}

View File

@@ -1,526 +0,0 @@
mod edit_parser;
use crate::{Template, Templates};
use anyhow::{Result, anyhow};
use assistant_tool::ActionLog;
use edit_parser::EditParser;
use futures::{Stream, StreamExt, stream};
use gpui::{AsyncApp, Entity};
use language::{Anchor, Bias, Buffer, BufferSnapshot};
use language_model::{
LanguageModel, LanguageModelRequest, LanguageModelRequestMessage, MessageContent, Role,
};
use serde::Serialize;
use smallvec::SmallVec;
use std::{ops::Range, path::PathBuf, sync::Arc};
#[derive(Serialize)]
pub struct EditAgentTemplate {
path: Option<PathBuf>,
file_content: String,
instructions: String,
}
impl Template for EditAgentTemplate {
const TEMPLATE_NAME: &'static str = "edit_agent.hbs";
}
pub struct EditAgent {
model: Arc<dyn LanguageModel>,
action_log: Entity<ActionLog>,
templates: Arc<Templates>,
}
impl EditAgent {
pub fn new(
model: Arc<dyn LanguageModel>,
action_log: Entity<ActionLog>,
templates: Arc<Templates>,
) -> Self {
EditAgent {
model,
action_log,
templates,
}
}
pub async fn edit(
&self,
buffer: Entity<Buffer>,
instructions: String,
cx: &mut AsyncApp,
) -> Result<()> {
let edits = self.stream_edits(buffer.clone(), instructions, cx).await?;
self.apply_edits(buffer, edits, cx).await?;
Ok(())
}
async fn apply_edits(
&self,
buffer: Entity<Buffer>,
edits: impl Stream<Item = Result<(Range<Anchor>, String)>>,
cx: &mut AsyncApp,
) -> Result<()> {
// todo!("group all edits into one transaction")
// todo!("add tests for this")
// Ensure the buffer is tracked by the action log.
self.action_log
.update(cx, |log, cx| log.track_buffer(buffer.clone(), cx))?;
futures::pin_mut!(edits);
while let Some(edit) = edits.next().await {
let (range, content) = edit?;
// Edit the buffer and report the edit as part of the same effect cycle, otherwise
// the edit will be reported as if the user made it.
cx.update(|cx| {
buffer.update(cx, |buffer, cx| buffer.edit([(range, content)], None, cx));
self.action_log
.update(cx, |log, cx| log.buffer_edited(buffer.clone(), cx))
})?;
}
Ok(())
}
async fn stream_edits(
&self,
buffer: Entity<Buffer>,
instructions: String,
cx: &mut AsyncApp,
) -> Result<impl use<> + Stream<Item = Result<(Range<Anchor>, String)>>> {
println!("{}\n\n", instructions);
let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot())?;
let path = cx.update(|cx| snapshot.resolve_file_path(cx, true))?;
// todo!("move to background")
let prompt = EditAgentTemplate {
path,
file_content: snapshot.text(),
instructions,
}
.render(&self.templates)?;
let request = LanguageModelRequest {
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text(prompt)],
cache: false,
}],
..Default::default()
};
let mut parser = EditParser::new();
let stream = self.model.stream_completion_text(request, cx).await?.stream;
Ok(stream.flat_map(move |chunk| {
let mut edits = SmallVec::new();
let mut error = None;
let snapshot = snapshot.clone();
match chunk {
Ok(chunk) => edits = parser.push(&chunk),
Err(err) => error = Some(Err(anyhow!(err))),
}
stream::iter(
edits
.into_iter()
.map(move |edit| {
dbg!(&edit);
let range = Self::resolve_location(&snapshot, &edit.old_text);
Ok((range, edit.new_text))
})
.chain(error),
)
}))
}
fn resolve_location(buffer: &BufferSnapshot, search_query: &str) -> Range<Anchor> {
const INSERTION_COST: u32 = 3;
const DELETION_COST: u32 = 10;
const WHITESPACE_INSERTION_COST: u32 = 1;
const WHITESPACE_DELETION_COST: u32 = 1;
let buffer_len = buffer.len();
let query_len = search_query.len();
let mut matrix = SearchMatrix::new(query_len + 1, buffer_len + 1);
let mut leading_deletion_cost = 0_u32;
for (row, query_byte) in search_query.bytes().enumerate() {
let deletion_cost = if query_byte.is_ascii_whitespace() {
WHITESPACE_DELETION_COST
} else {
DELETION_COST
};
leading_deletion_cost = leading_deletion_cost.saturating_add(deletion_cost);
matrix.set(
row + 1,
0,
SearchState::new(leading_deletion_cost, SearchDirection::Diagonal),
);
for (col, buffer_byte) in buffer.bytes_in_range(0..buffer.len()).flatten().enumerate() {
let insertion_cost = if buffer_byte.is_ascii_whitespace() {
WHITESPACE_INSERTION_COST
} else {
INSERTION_COST
};
let up = SearchState::new(
matrix.get(row, col + 1).cost.saturating_add(deletion_cost),
SearchDirection::Up,
);
let left = SearchState::new(
matrix.get(row + 1, col).cost.saturating_add(insertion_cost),
SearchDirection::Left,
);
let diagonal = SearchState::new(
if query_byte == *buffer_byte {
matrix.get(row, col).cost
} else {
matrix
.get(row, col)
.cost
.saturating_add(deletion_cost + insertion_cost)
},
SearchDirection::Diagonal,
);
matrix.set(row + 1, col + 1, up.min(left).min(diagonal));
}
}
// Traceback to find the best match
let mut best_buffer_end = buffer_len;
let mut best_cost = u32::MAX;
for col in 1..=buffer_len {
let cost = matrix.get(query_len, col).cost;
if cost < best_cost {
best_cost = cost;
best_buffer_end = col;
}
}
let mut query_ix = query_len;
let mut buffer_ix = best_buffer_end;
while query_ix > 0 && buffer_ix > 0 {
let current = matrix.get(query_ix, buffer_ix);
match current.direction {
SearchDirection::Diagonal => {
query_ix -= 1;
buffer_ix -= 1;
}
SearchDirection::Up => {
query_ix -= 1;
}
SearchDirection::Left => {
buffer_ix -= 1;
}
}
}
let mut start = buffer.offset_to_point(buffer.clip_offset(buffer_ix, Bias::Left));
start.column = 0;
let mut end = buffer.offset_to_point(buffer.clip_offset(best_buffer_end, Bias::Right));
if end.column > 0 {
end.column = buffer.line_len(end.row);
}
buffer.anchor_after(start)..buffer.anchor_before(end)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum SearchDirection {
Up,
Left,
Diagonal,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
struct SearchState {
cost: u32,
direction: SearchDirection,
}
impl SearchState {
fn new(cost: u32, direction: SearchDirection) -> Self {
Self { cost, direction }
}
}
struct SearchMatrix {
cols: usize,
data: Vec<SearchState>,
}
impl SearchMatrix {
fn new(rows: usize, cols: usize) -> Self {
SearchMatrix {
cols,
data: vec![SearchState::new(0, SearchDirection::Diagonal); rows * cols],
}
}
fn get(&self, row: usize, col: usize) -> SearchState {
self.data[row * self.cols + col]
}
fn set(&mut self, row: usize, col: usize, cost: SearchState) {
self.data[row * self.cols + col] = cost;
}
}
#[cfg(test)]
mod tests {
use super::*;
use client::{Client, UserStore};
use collections::HashSet;
use fs::FakeFs;
use gpui::{AppContext, TestAppContext};
use indoc::indoc;
use language_model::LanguageModelRegistry;
use project::Project;
use rand::prelude::*;
use reqwest_client::ReqwestClient;
use serde_json::json;
use std::{fmt::Write as _, io::Write as _, path::Path, sync::mpsc};
use util::path;
#[test]
fn test_delete_run_git_blame() {
eval(
100,
0.9,
Eval {
input_path: "root/blame.rs".into(),
input_content: include_str!("fixtures/delete_run_git_blame/before.rs").into(),
instructions: indoc! {r#"
Let's delete the `run_git_blame` function while keeping all other code intact:
// ... existing code ...
const GIT_BLAME_NO_COMMIT_ERROR: &str = "fatal: no such ref: HEAD";
const GIT_BLAME_NO_PATH: &str = "fatal: no such path";
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)]
pub struct BlameEntry {
// ... existing code ...
"#}
.into(),
expected_output: include_str!("fixtures/delete_run_git_blame/after.rs").into(),
},
);
}
#[test]
fn test_extract_handle_command_output() {
eval(
100,
0.9,
Eval {
input_path: "root/blame.rs".into(),
input_content: include_str!("fixtures/extract_handle_command_output/before.rs").into(),
instructions: indoc! {r#"
Extract `handle_command_output` method from `run_git_blame`.
// ... existing code ...
async fn run_git_blame(
git_binary: &Path,
working_directory: &Path,
path: &Path,
contents: &Rope,
) -> Result<String> {
let mut child = util::command::new_smol_command(git_binary)
.current_dir(working_directory)
.arg("blame")
.arg("--incremental")
.arg("--contents")
.arg("-")
.arg(path.as_os_str())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.map_err(|e| anyhow!("Failed to start git blame process: {}", e))?;
let stdin = child
.stdin
.as_mut()
.context("failed to get pipe to stdin of git blame command")?;
for chunk in contents.chunks() {
stdin.write_all(chunk.as_bytes()).await?;
}
stdin.flush().await?;
let output = child
.output()
.await
.map_err(|e| anyhow!("Failed to read git blame output: {}", e))?;
handle_command_output(output)
}
fn handle_command_output(output: std::process::Output) -> Result<String> {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let trimmed = stderr.trim();
if trimmed == GIT_BLAME_NO_COMMIT_ERROR || trimmed.contains(GIT_BLAME_NO_PATH) {
return Ok(String::new());
}
return Err(anyhow!("git blame process failed: {}", stderr));
}
Ok(String::from_utf8(output.stdout)?)
}
// ... existing code ...
"#}
.into(),
expected_output: include_str!("fixtures/extract_handle_command_output/after.rs").into()
},
);
}
#[derive(Clone)]
struct Eval {
input_path: PathBuf,
input_content: String,
instructions: String,
expected_output: String,
}
fn eval(iterations: usize, expected_pass_ratio: f32, eval: Eval) {
let executor = gpui::background_executor();
let (tx, rx) = mpsc::channel();
for _ in 0..iterations {
let eval = eval.clone();
let tx = tx.clone();
executor
.spawn(async move {
let dispatcher = gpui::TestDispatcher::new(StdRng::from_entropy());
let mut cx = TestAppContext::build(dispatcher, None);
let output = cx.executor().block_test(async {
let test = agent_test(&mut cx).await;
apply_edits(
eval.input_path,
eval.input_content,
eval.instructions,
&test,
&mut cx,
)
.await
});
tx.send(output).unwrap();
})
.detach();
}
drop(tx);
let mut evaluated_count = 0;
report_progress(evaluated_count, iterations);
let mut failed_count = 0;
let mut failed_message = String::new();
let mut failed_outputs = HashSet::default();
while let Ok(output) = rx.recv() {
if output != eval.expected_output {
failed_count += 1;
if failed_outputs.insert(output.clone()) {
writeln!(
failed_message,
"=======\n{}\n=======",
pretty_assertions::StrComparison::new(&output, &eval.expected_output)
)
.unwrap();
}
}
evaluated_count += 1;
report_progress(evaluated_count, iterations);
}
let actual_pass_ratio = (iterations - failed_count) as f32 / iterations as f32;
println!("Actual pass ratio: {}\n", actual_pass_ratio);
assert!(
actual_pass_ratio >= expected_pass_ratio,
"Expected pass ratio: {}\nActual pass ratio: {}\nFailures: {}",
expected_pass_ratio,
actual_pass_ratio,
failed_message
);
}
fn report_progress(evaluated_count: usize, iterations: usize) {
print!("\r\x1b[KEvaluated {}/{}", evaluated_count, iterations);
std::io::stdout().flush().unwrap();
}
async fn apply_edits(
path: impl AsRef<Path>,
content: impl Into<Arc<str>>,
instructions: impl Into<String>,
test: &EditAgentTest,
cx: &mut TestAppContext,
) -> String {
let path = test
.project
.read_with(cx, |project, cx| project.find_project_path(path, cx))
.unwrap();
let buffer = test
.project
.update(cx, |project, cx| project.open_buffer(path, cx))
.await
.unwrap();
buffer.update(cx, |buffer, cx| buffer.set_text(content, cx));
test.agent
.edit(buffer.clone(), instructions.into(), &mut cx.to_async())
.await
.unwrap();
buffer.update(cx, |buffer, _cx| buffer.text())
}
struct EditAgentTest {
agent: EditAgent,
project: Entity<Project>,
}
async fn agent_test(cx: &mut TestAppContext) -> EditAgentTest {
cx.executor().allow_parking();
cx.update(settings::init);
cx.update(Project::init_settings);
cx.update(language::init);
cx.update(gpui_tokio::init);
cx.update(client::init_settings);
let fs = FakeFs::new(cx.executor().clone());
fs.insert_tree("/root", json!({})).await;
let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await;
let model = cx
.update(|cx| {
let http_client = ReqwestClient::user_agent("agent tests").unwrap();
cx.set_http_client(Arc::new(http_client));
let client = Client::production(cx);
let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
language_model::init(client.clone(), cx);
language_models::init(user_store.clone(), client.clone(), fs.clone(), cx);
let models = LanguageModelRegistry::read_global(cx);
let model = models
.available_models(cx)
.find(|model| model.id().0 == "gemini-2.0-flash")
.unwrap();
let provider = models.provider(&model.provider_id()).unwrap();
let authenticated = provider.authenticate(cx);
cx.spawn(async move |_| {
authenticated.await.unwrap();
model
})
})
.await;
let action_log = cx.new(|_| ActionLog::new(project.clone()));
EditAgentTest {
agent: EditAgent::new(model, action_log, Templates::new()),
project,
}
}
}

View File

@@ -1,246 +0,0 @@
use smallvec::SmallVec;
use std::mem;
#[derive(Debug, Eq, PartialEq)]
pub struct Edit {
pub old_text: String,
pub new_text: String,
}
#[derive(Debug)]
pub struct EditParser {
state: EditParserState,
buffer: String,
}
#[derive(Debug, PartialEq)]
enum EditParserState {
Pending,
WithinOldText,
AfterOldText { old_text: String },
WithinNewText { old_text: String },
}
impl EditParser {
pub fn new() -> Self {
EditParser {
state: EditParserState::Pending,
buffer: String::new(),
}
}
pub fn push(&mut self, chunk: &str) -> SmallVec<[Edit; 1]> {
self.buffer.push_str(chunk);
let mut edits = SmallVec::new();
loop {
match &mut self.state {
EditParserState::Pending => {
if let Some(start) = self.buffer.find("<old_text>") {
self.buffer.drain(..start + "<old_text>".len());
self.state = EditParserState::WithinOldText;
} else {
break;
}
}
EditParserState::WithinOldText => {
if let Some(end) = self.buffer.find("</old_text>") {
let mut start = 0;
if self.buffer.starts_with('\n') {
start = 1;
}
let mut old_text = self.buffer[start..end].to_string();
if old_text.ends_with('\n') {
old_text.pop();
}
self.buffer.drain(..end + "</old_text>".len());
self.state = EditParserState::AfterOldText { old_text };
} else {
break;
}
}
EditParserState::AfterOldText { old_text } => {
if let Some(start) = self.buffer.find("<new_text>") {
self.buffer.drain(..start + "<new_text>".len());
self.state = EditParserState::WithinNewText {
old_text: mem::take(old_text),
};
} else {
break;
}
}
EditParserState::WithinNewText { old_text } => {
if let Some(end) = self.buffer.find("</new_text>") {
let mut start = 0;
if self.buffer.starts_with('\n') {
start = 1;
}
let mut new_text = self.buffer[start..end].to_string();
if new_text.ends_with('\n') {
new_text.pop();
}
edits.push(Edit {
old_text: mem::take(old_text),
new_text,
});
self.buffer.drain(..end + "</new_text>".len());
self.state = EditParserState::Pending;
} else {
break;
}
}
}
}
edits
}
}
#[cfg(test)]
mod tests {
use super::*;
use indoc::indoc;
use rand::prelude::*;
use std::cmp;
#[gpui::test(iterations = 1000)]
fn test_single_edit(mut rng: StdRng) {
assert_eq!(
parse(
"<old_text>original</old_text><new_text>updated</new_text>",
&mut rng
),
vec![Edit {
old_text: "original".to_string(),
new_text: "updated".to_string(),
}]
)
}
#[gpui::test(iterations = 1000)]
fn test_multiple_edits(mut rng: StdRng) {
assert_eq!(
parse(
indoc! {"
<old_text>
first old
</old_text><new_text>first new</new_text>
<old_text>second old</old_text><new_text>
second new
</new_text>
"},
&mut rng
),
vec![
Edit {
old_text: "first old".to_string(),
new_text: "first new".to_string(),
},
Edit {
old_text: "second old".to_string(),
new_text: "second new".to_string(),
},
]
);
}
#[gpui::test(iterations = 1000)]
fn test_edits_with_extra_text(mut rng: StdRng) {
assert_eq!(
parse(
indoc! {"
ignore this <old_text>
content</old_text>extra stuff<new_text>updated content</new_text>trailing data
more text <old_text>second item
</old_text>middle text<new_text>modified second item</new_text>end
<old_text>third case</old_text><new_text>improved third case</new_text> with trailing text
"},
&mut rng
),
vec![
Edit {
old_text: "content".to_string(),
new_text: "updated content".to_string(),
},
Edit {
old_text: "second item".to_string(),
new_text: "modified second item".to_string(),
},
Edit {
old_text: "third case".to_string(),
new_text: "improved third case".to_string(),
},
]
);
}
#[gpui::test(iterations = 1000)]
fn test_nested_tags(mut rng: StdRng) {
assert_eq!(
parse(
"<old_text>code with <tag>nested</tag> elements</old_text><new_text>new <code>content</code></new_text>",
&mut rng
),
vec![Edit {
old_text: "code with <tag>nested</tag> elements".to_string(),
new_text: "new <code>content</code>".to_string(),
}]
);
}
#[gpui::test(iterations = 1000)]
fn test_empty_old_and_new_text(mut rng: StdRng) {
assert_eq!(
parse("<old_text></old_text><new_text></new_text>", &mut rng),
vec![Edit {
old_text: "".to_string(),
new_text: "".to_string(),
}]
);
}
#[gpui::test(iterations = 1000)]
fn test_with_special_characters(mut rng: StdRng) {
assert_eq!(
parse(
"<old_text>function(x) { return x * 2; }</old_text><new_text>function(x) { return x ** 2; }</new_text>",
&mut rng
),
vec![Edit {
old_text: "function(x) { return x * 2; }".to_string(),
new_text: "function(x) { return x ** 2; }".to_string(),
}]
);
}
#[gpui::test(iterations = 100)]
fn test_multiline_content(mut rng: StdRng) {
assert_eq!(
parse(
"<old_text>line1\nline2\nline3</old_text><new_text>line1\nmodified line2\nline3</new_text>",
&mut rng
),
vec![Edit {
old_text: "line1\nline2\nline3".to_string(),
new_text: "line1\nmodified line2\nline3".to_string(),
}]
);
}
fn parse(input: &str, rng: &mut StdRng) -> Vec<Edit> {
let mut parser = EditParser::new();
let chunk_count = rng.gen_range(1..=cmp::min(input.len(), 50));
let mut chunk_indices = (0..input.len()).choose_multiple(rng, chunk_count);
chunk_indices.sort();
let mut edits = Vec::new();
let mut last_ix = 0;
for chunk_ix in chunk_indices {
edits.extend(parser.push(&input[last_ix..chunk_ix]));
last_ix = chunk_ix;
}
edits.extend(parser.push(&input[last_ix..]));
edits
}
}

View File

@@ -1,5 +1,8 @@
use crate::{Templates, edit_agent::EditAgent, schema::json_schema_for};
use anyhow::{Result, anyhow};
use crate::{
replace::{replace_exact, replace_with_flexible_indent},
schema::json_schema_for,
};
use anyhow::{Context as _, Result, anyhow};
use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolCard, ToolResult, ToolUseStatus};
use buffer_diff::{BufferDiff, BufferDiffSnapshot};
use editor::{Editor, EditorMode, MultiBuffer, PathKey};
@@ -9,9 +12,7 @@ use gpui::{
use language::{
Anchor, Buffer, Capability, LanguageRegistry, LineEnding, OffsetRangeExt, Rope, TextBuffer,
};
use language_model::{
LanguageModelRegistry, LanguageModelRequestMessage, LanguageModelToolSchemaFormat,
};
use language_model::{LanguageModelRequestMessage, LanguageModelToolSchemaFormat};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -25,7 +26,7 @@ use workspace::Workspace;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct EditFileToolInput {
/// A user-friendly markdown description of the edit. This will be shown in the UI.
/// A user-friendly markdown description of what's being replaced. This will be shown in the UI.
///
/// <example>Fix API endpoint URLs</example>
/// <example>Update copyright year in `page_footer`</example>
@@ -55,35 +56,11 @@ pub struct EditFileToolInput {
/// </example>
pub path: PathBuf,
/// Edit instructions that will be interpreted by a less intelligent model,
/// which will quickly apply the edits. You should make it clear what the
/// edits are, while also minimizing the unchanged code you write. The model
/// does not have access to this conversation, so you must make sure the
/// instructions are self-contained and do not rely on external context.
///
/// Insert `// ... existing code ...` comments in your output to represent
/// unchanged code ABOVE, BELOW, and IN BETWEEN edited lines.
///
/// Bias towards repeating as few lines of the original file as possible to
/// convey the change. However, each edit should contain sufficient context
/// of unchanged lines to resolve ambiguity. When you want to delete a piece
/// of code, indicate a few lines above and below the code you want to
/// delete (surrounded by `// ... existing code ...`).
///
/// Never forget to include `// ... existing code ...` comments to represent
/// unchanged lines, otherwise the small model may not understand the
/// context of your edit and will delete important code!
///
/// <your_output>
/// // ... existing code ...
/// FIRST_EDIT
/// // ... existing code ...
/// SECOND_EDIT
/// // ... existing code ...
/// THIRD_EDIT
/// // ... existing code ...
/// </your_output>
pub edit_instructions: String,
/// The text to replace.
pub old_string: String,
/// The text to replace it with.
pub new_string: String,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
@@ -92,6 +69,10 @@ struct PartialInput {
path: String,
#[serde(default)]
display_description: String,
#[serde(default)]
old_string: String,
#[serde(default)]
new_string: String,
}
pub struct EditFileTool;
@@ -156,23 +137,6 @@ impl Tool for EditFileTool {
Err(err) => return Task::ready(Err(anyhow!(err))).into(),
};
let Some(project_path) = project.read(cx).find_project_path(&input.path, cx) else {
return Task::ready(Err(anyhow!(
"Path {} not found in project",
input.path.display()
)))
.into();
};
let Some(worktree) = project
.read(cx)
.worktree_for_id(project_path.worktree_id, cx)
else {
return Task::ready(Err(anyhow!("Worktree not found for project path"))).into();
};
let exists = worktree.update(cx, |worktree, cx| {
worktree.file_exists(&project_path.path, cx)
});
let card = window.and_then(|window| {
window
.update(cx, |_, window, cx| {
@@ -184,22 +148,12 @@ impl Tool for EditFileTool {
});
let card_clone = card.clone();
// todo!("read model from settings...")
let models = LanguageModelRegistry::read_global(cx);
let model = models
.available_models(cx)
.find(|model| model.id().0 == "gemini-2.0-flash")
.unwrap();
let provider = models.provider(&model.provider_id()).unwrap();
let authenticated = provider.authenticate(cx);
// todo!("reuse templates")
let edit_agent = EditAgent::new(model, action_log, Templates::new());
let task = cx.spawn(async move |cx: &mut AsyncApp| {
authenticated.await?;
if !exists.await? {
return Err(anyhow!("{} not found", input.path.display()));
}
let project_path = project.read_with(cx, |project, cx| {
project
.find_project_path(&input.path, cx)
.context("Path not found in project")
})??;
let buffer = project
.update(cx, |project, cx| {
@@ -207,27 +161,90 @@ impl Tool for EditFileTool {
})?
.await?;
let old_snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?;
edit_agent
.edit(buffer.clone(), input.edit_instructions.clone(), cx)
.await?;
let new_snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?;
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?;
if input.old_string.is_empty() {
return Err(anyhow!(
"`old_string` can't be empty, use another tool if you want to create a file."
));
}
if input.old_string == input.new_string {
return Err(anyhow!(
"The `old_string` and `new_string` are identical, so no changes would be made."
));
}
let result = cx
.background_spawn(async move {
// Try to match exactly
let diff = replace_exact(&input.old_string, &input.new_string, &snapshot)
.await
// If that fails, try being flexible about indentation
.or_else(|| {
replace_with_flexible_indent(
&input.old_string,
&input.new_string,
&snapshot,
)
})?;
if diff.edits.is_empty() {
return None;
}
let old_text = snapshot.text();
Some((old_text, diff))
})
.await;
let Some((old_text, diff)) = result else {
let err = buffer.read_with(cx, |buffer, _cx| {
let file_exists = buffer
.file()
.map_or(false, |file| file.disk_state().exists());
if !file_exists {
anyhow!("{} does not exist", input.path.display())
} else if buffer.is_empty() {
anyhow!(
"{} is empty, so the provided `old_string` wasn't found.",
input.path.display()
)
} else {
anyhow!("Failed to match the provided `old_string`")
}
})?;
return Err(err);
};
let snapshot = cx.update(|cx| {
action_log.update(cx, |log, cx| log.track_buffer(buffer.clone(), cx));
let snapshot = buffer.update(cx, |buffer, cx| {
buffer.finalize_last_transaction();
buffer.apply_diff(diff, cx);
buffer.finalize_last_transaction();
buffer.snapshot()
});
action_log.update(cx, |log, cx| log.buffer_edited(buffer.clone(), cx));
snapshot
})?;
project
.update(cx, |project, cx| project.save_buffer(buffer, cx))?
.await?;
let old_text = cx.background_spawn({
let old_snapshot = old_snapshot.clone();
async move { old_snapshot.text() }
});
let new_text = cx.background_spawn({
let new_snapshot = new_snapshot.clone();
async move { new_snapshot.text() }
});
let diff = cx.background_spawn(async move {
language::unified_diff(&old_snapshot.text(), &new_snapshot.text())
});
let (old_text, new_text, diff) = futures::join!(old_text, new_text, diff);
let new_text = snapshot.text();
let diff_str = cx
.background_spawn({
let old_text = old_text.clone();
let new_text = new_text.clone();
async move { language::unified_diff(&old_text, &new_text) }
})
.await;
if let Some(card) = card_clone {
card.update(cx, |card, cx| {
@@ -239,7 +256,7 @@ impl Tool for EditFileTool {
Ok(format!(
"Edited {}:\n\n```diff\n{}\n```",
input.path.display(),
diff
diff_str
))
});
@@ -603,7 +620,6 @@ async fn build_buffer_diff(
})
}
// todo!("add unit tests for failure modes of edit, like file not found, etc.")
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -7,4 +7,39 @@ Before using this tool:
2. Verify the directory path is correct (only applicable when creating new files):
- Use the `list_directory` tool to verify the parent directory exists and is the correct location
Group coherent edits together and include all of them in a single call to this tool. Add the full context needed for a small model to understand the edits.
To make a file edit, provide the following:
1. path: The full path to the file you wish to modify in the project. This path must include the root directory in the project.
2. old_string: The text to replace (must be unique within the file, and must match the file contents exactly, including all whitespace and indentation)
3. new_string: The edited text, which will replace the old_string in the file.
The tool will replace ONE occurrence of old_string with new_string in the specified file.
CRITICAL REQUIREMENTS FOR USING THIS TOOL:
1. UNIQUENESS: The old_string MUST uniquely identify the specific instance you want to change. This means:
- Include AT LEAST 3-5 lines of context BEFORE the change point
- Include AT LEAST 3-5 lines of context AFTER the change point
- Include all whitespace, indentation, and surrounding code exactly as it appears in the file
2. SINGLE INSTANCE: This tool can only change ONE instance at a time. If you need to change multiple instances:
- Make separate calls to this tool for each instance
- Each call must uniquely identify its specific instance using extensive context
3. VERIFICATION: Before using this tool:
- Check how many instances of the target text exist in the file
- If multiple instances exist, gather enough context to uniquely identify each one
- Plan separate tool calls for each instance
WARNING: If you do not follow these requirements:
- The tool will fail if old_string matches multiple locations
- The tool will fail if old_string doesn't match exactly (including whitespace)
- You may change the wrong instance if you don't include enough context
When making edits:
- Ensure the edit results in idiomatic, correct code
- Do not leave the code in a broken state
- Always use fully-qualified project paths (starting with the name of one of the project's root directories)
If you want to create a new file, use the `create_file` tool instead of this tool. Don't pass an empty `old_string`.
Remember: when making multiple file edits in a row to the same file, you should prefer to send all edits in a single message with multiple calls to this tool, rather than multiple messages with a single call each.

View File

@@ -14,7 +14,7 @@ use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownEscaped;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)]
enum ContentType {
@@ -134,7 +134,7 @@ impl Tool for FetchTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<FetchToolInput>(input.clone()) {
Ok(input) => format!("Fetch {}", MarkdownString::escape(&input.url)),
Ok(input) => format!("Fetch {}", MarkdownEscaped(&input.url)),
Err(_) => "Fetch URL".to_string(),
}
}

View File

@@ -1,15 +1,21 @@
use crate::schema::json_schema_for;
use crate::{schema::json_schema_for, ui::ToolCallCardHeader};
use anyhow::{Result, anyhow};
use assistant_tool::{ActionLog, Tool, ToolResult};
use gpui::{AnyWindowHandle, App, AppContext, Entity, Task};
use assistant_tool::{ActionLog, Tool, ToolCard, ToolResult, ToolUseStatus};
use editor::Editor;
use futures::channel::oneshot::{self, Receiver};
use gpui::{
AnyWindowHandle, App, AppContext, Context, Entity, IntoElement, Task, WeakEntity, Window,
};
use language;
use language_model::{LanguageModelRequestMessage, LanguageModelToolSchemaFormat};
use project::Project;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{cmp, fmt::Write as _, path::PathBuf, sync::Arc};
use ui::IconName;
use util::paths::PathMatcher;
use worktree::Snapshot;
use std::fmt::Write;
use std::{cmp, path::PathBuf, sync::Arc};
use ui::{Disclosure, Tooltip, prelude::*};
use util::{ResultExt, paths::PathMatcher};
use workspace::Workspace;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct FindPathToolInput {
@@ -29,7 +35,7 @@ pub struct FindPathToolInput {
/// Optional starting position for paginated results (0-based).
/// When not provided, starts from the beginning.
#[serde(default)]
pub offset: u32,
pub offset: usize,
}
const RESULTS_PER_PAGE: usize = 50;
@@ -77,13 +83,20 @@ impl Tool for FindPathTool {
Ok(input) => (input.offset, input.glob),
Err(err) => return Task::ready(Err(anyhow!(err))).into(),
};
let offset = offset as usize;
let task = search_paths(&glob, project, cx);
cx.background_spawn(async move {
let matches = task.await?;
let paginated_matches = &matches[cmp::min(offset, matches.len())
let (sender, receiver) = oneshot::channel();
let card = cx.new(|cx| FindPathToolCard::new(glob.clone(), receiver, cx));
let search_paths_task = search_paths(&glob, project, cx);
let task = cx.background_spawn(async move {
let matches = search_paths_task.await?;
let paginated_matches: &[PathBuf] = &matches[cmp::min(offset, matches.len())
..cmp::min(offset + RESULTS_PER_PAGE, matches.len())];
sender.send(paginated_matches.to_vec()).log_err();
if matches.is_empty() {
Ok("No matches found".to_string())
} else {
@@ -102,8 +115,12 @@ impl Tool for FindPathTool {
}
Ok(message)
}
})
.into()
});
ToolResult {
output: task,
card: Some(card.into()),
}
}
}
@@ -115,7 +132,7 @@ fn search_paths(glob: &str, project: Entity<Project>, cx: &mut App) -> Task<Resu
Ok(matcher) => matcher,
Err(err) => return Task::ready(Err(anyhow!("Invalid glob: {err}"))),
};
let snapshots: Vec<Snapshot> = project
let snapshots: Vec<_> = project
.read(cx)
.worktrees(cx)
.map(|worktree| worktree.read(cx).snapshot())
@@ -135,6 +152,209 @@ fn search_paths(glob: &str, project: Entity<Project>, cx: &mut App) -> Task<Resu
})
}
struct FindPathToolCard {
paths: Vec<PathBuf>,
expanded: bool,
glob: String,
_receiver_task: Option<Task<Result<()>>>,
}
impl FindPathToolCard {
fn new(glob: String, receiver: Receiver<Vec<PathBuf>>, cx: &mut Context<Self>) -> Self {
let _receiver_task = cx.spawn(async move |this, cx| {
let paths = receiver.await?;
this.update(cx, |this, _cx| {
this.paths = paths;
})
.log_err();
Ok(())
});
Self {
paths: Vec::new(),
expanded: false,
glob,
_receiver_task: Some(_receiver_task),
}
}
}
impl ToolCard for FindPathToolCard {
fn render(
&mut self,
_status: &ToolUseStatus,
_window: &mut Window,
workspace: WeakEntity<Workspace>,
cx: &mut Context<Self>,
) -> impl IntoElement {
let matches_label: SharedString = if self.paths.len() == 0 {
"No matches".into()
} else if self.paths.len() == 1 {
"1 match".into()
} else {
format!("{} matches", self.paths.len()).into()
};
let glob_label = self.glob.to_string();
let content = if !self.paths.is_empty() && self.expanded {
Some(
v_flex()
.relative()
.ml_1p5()
.px_1p5()
.gap_0p5()
.border_l_1()
.border_color(cx.theme().colors().border_variant)
.children(self.paths.iter().enumerate().map(|(index, path)| {
let path_clone = path.clone();
let workspace_clone = workspace.clone();
let button_label = path.to_string_lossy().to_string();
Button::new(("path", index), button_label)
.icon(IconName::ArrowUpRight)
.icon_size(IconSize::XSmall)
.icon_position(IconPosition::End)
.label_size(LabelSize::Small)
.color(Color::Muted)
.tooltip(Tooltip::text("Jump to File"))
.on_click(move |_, window, cx| {
workspace_clone
.update(cx, |workspace, cx| {
let path = PathBuf::from(&path_clone);
let Some(project_path) = workspace
.project()
.read(cx)
.find_project_path(&path, cx)
else {
return;
};
let open_task = workspace.open_path(
project_path,
None,
true,
window,
cx,
);
window
.spawn(cx, async move |cx| {
let item = open_task.await?;
if let Some(active_editor) =
item.downcast::<Editor>()
{
active_editor
.update_in(cx, |editor, window, cx| {
editor.go_to_singleton_buffer_point(
language::Point::new(0, 0),
window,
cx,
);
})
.log_err();
}
anyhow::Ok(())
})
.detach_and_log_err(cx);
})
.ok();
})
}))
.into_any(),
)
} else {
None
};
v_flex()
.mb_2()
.gap_1()
.child(
ToolCallCardHeader::new(IconName::SearchCode, matches_label)
.with_code_path(glob_label)
.disclosure_slot(
Disclosure::new("path-search-disclosure", self.expanded)
.opened_icon(IconName::ChevronUp)
.closed_icon(IconName::ChevronDown)
.disabled(self.paths.is_empty())
.on_click(cx.listener(move |this, _, _, _cx| {
this.expanded = !this.expanded;
})),
),
)
.children(content)
}
}
impl Component for FindPathTool {
fn scope() -> ComponentScope {
ComponentScope::Agent
}
fn sort_name() -> &'static str {
"FindPathTool"
}
fn preview(window: &mut Window, cx: &mut App) -> Option<AnyElement> {
let successful_card = cx.new(|_| FindPathToolCard {
paths: vec![
PathBuf::from("src/main.rs"),
PathBuf::from("src/lib.rs"),
PathBuf::from("tests/test.rs"),
],
expanded: true,
glob: "*.rs".to_string(),
_receiver_task: None,
});
let empty_card = cx.new(|_| FindPathToolCard {
paths: Vec::new(),
expanded: false,
glob: "*.nonexistent".to_string(),
_receiver_task: None,
});
Some(
v_flex()
.gap_6()
.children(vec![example_group(vec![
single_example(
"With Paths",
div()
.size_full()
.child(successful_card.update(cx, |tool, cx| {
tool.render(
&ToolUseStatus::Finished("".into()),
window,
WeakEntity::new_invalid(),
cx,
)
.into_any_element()
}))
.into_any_element(),
),
single_example(
"No Paths",
div()
.size_full()
.child(empty_card.update(cx, |tool, cx| {
tool.render(
&ToolUseStatus::Finished("".into()),
window,
WeakEntity::new_invalid(),
cx,
)
.into_any_element()
}))
.into_any_element(),
),
])])
.into_any_element(),
)
}
}
#[cfg(test)]
mod test {
use super::*;

View File

@@ -1,328 +0,0 @@
use crate::commit::get_messages;
use crate::{GitRemote, Oid};
use anyhow::{Context as _, Result, anyhow};
use collections::{HashMap, HashSet};
use futures::AsyncWriteExt;
use gpui::SharedString;
use serde::{Deserialize, Serialize};
use std::process::Stdio;
use std::{ops::Range, path::Path};
use text::Rope;
use time::OffsetDateTime;
use time::UtcOffset;
use time::macros::format_description;
pub use git2 as libgit;
#[derive(Debug, Clone, Default)]
pub struct Blame {
pub entries: Vec<BlameEntry>,
pub messages: HashMap<Oid, String>,
pub remote_url: Option<String>,
}
#[derive(Clone, Debug, Default)]
pub struct ParsedCommitMessage {
pub message: SharedString,
pub permalink: Option<url::Url>,
pub pull_request: Option<crate::hosting_provider::PullRequest>,
pub remote: Option<GitRemote>,
}
impl Blame {
pub async fn for_path(
git_binary: &Path,
working_directory: &Path,
path: &Path,
content: &Rope,
remote_url: Option<String>,
) -> Result<Self> {
let output = run_git_blame(git_binary, working_directory, path, content).await?;
let mut entries = parse_git_blame(&output)?;
entries.sort_unstable_by(|a, b| a.range.start.cmp(&b.range.start));
let mut unique_shas = HashSet::default();
for entry in entries.iter_mut() {
unique_shas.insert(entry.sha);
}
let shas = unique_shas.into_iter().collect::<Vec<_>>();
let messages = get_messages(working_directory, &shas)
.await
.context("failed to get commit messages")?;
Ok(Self {
entries,
messages,
remote_url,
})
}
}
const GIT_BLAME_NO_COMMIT_ERROR: &str = "fatal: no such ref: HEAD";
const GIT_BLAME_NO_PATH: &str = "fatal: no such path";
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)]
pub struct BlameEntry {
pub sha: Oid,
pub range: Range<u32>,
pub original_line_number: u32,
pub author: Option<String>,
pub author_mail: Option<String>,
pub author_time: Option<i64>,
pub author_tz: Option<String>,
pub committer_name: Option<String>,
pub committer_email: Option<String>,
pub committer_time: Option<i64>,
pub committer_tz: Option<String>,
pub summary: Option<String>,
pub previous: Option<String>,
pub filename: String,
}
impl BlameEntry {
// Returns a BlameEntry by parsing the first line of a `git blame --incremental`
// entry. The line MUST have this format:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
fn new_from_blame_line(line: &str) -> Result<BlameEntry> {
let mut parts = line.split_whitespace();
let sha = parts
.next()
.and_then(|line| line.parse::<Oid>().ok())
.ok_or_else(|| anyhow!("failed to parse sha"))?;
let original_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse original line number"))?;
let final_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let line_count = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let start_line = final_line_number.saturating_sub(1);
let end_line = start_line + line_count;
let range = start_line..end_line;
Ok(Self {
sha,
range,
original_line_number,
..Default::default()
})
}
pub fn author_offset_date_time(&self) -> Result<time::OffsetDateTime> {
if let (Some(author_time), Some(author_tz)) = (self.author_time, &self.author_tz) {
let format = format_description!("[offset_hour][offset_minute]");
let offset = UtcOffset::parse(author_tz, &format)?;
let date_time_utc = OffsetDateTime::from_unix_timestamp(author_time)?;
Ok(date_time_utc.to_offset(offset))
} else {
// Directly return current time in UTC if there's no committer time or timezone
Ok(time::OffsetDateTime::now_utc())
}
}
}
// parse_git_blame parses the output of `git blame --incremental`, which returns
// all the blame-entries for a given path incrementally, as it finds them.
//
// Each entry *always* starts with:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
//
// Each entry *always* ends with:
//
// filename <whitespace-quoted-filename-goes-here>
//
// Line numbers are 1-indexed.
//
// A `git blame --incremental` entry looks like this:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 2 2 1
// author Joe Schmoe
// author-mail <joe.schmoe@example.com>
// author-time 1709741400
// author-tz +0100
// committer Joe Schmoe
// committer-mail <joe.schmoe@example.com>
// committer-time 1709741400
// committer-tz +0100
// summary Joe's cool commit
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// If the entry has the same SHA as an entry that was already printed then no
// signature information is printed:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 3 4 1
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// More about `--incremental` output: https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-blame.html
fn parse_git_blame(output: &str) -> Result<Vec<BlameEntry>> {
let mut entries: Vec<BlameEntry> = Vec::new();
let mut index: HashMap<Oid, usize> = HashMap::default();
let mut current_entry: Option<BlameEntry> = None;
for line in output.lines() {
let mut done = false;
match &mut current_entry {
None => {
let mut new_entry = BlameEntry::new_from_blame_line(line)?;
if let Some(existing_entry) = index
.get(&new_entry.sha)
.and_then(|slot| entries.get(*slot))
{
new_entry.author.clone_from(&existing_entry.author);
new_entry
.author_mail
.clone_from(&existing_entry.author_mail);
new_entry.author_time = existing_entry.author_time;
new_entry.author_tz.clone_from(&existing_entry.author_tz);
new_entry
.committer_name
.clone_from(&existing_entry.committer_name);
new_entry
.committer_email
.clone_from(&existing_entry.committer_email);
new_entry.committer_time = existing_entry.committer_time;
new_entry
.committer_tz
.clone_from(&existing_entry.committer_tz);
new_entry.summary.clone_from(&existing_entry.summary);
}
current_entry.replace(new_entry);
}
Some(entry) => {
let Some((key, value)) = line.split_once(' ') else {
continue;
};
let is_committed = !entry.sha.is_zero();
match key {
"filename" => {
entry.filename = value.into();
done = true;
}
"previous" => entry.previous = Some(value.into()),
"summary" if is_committed => entry.summary = Some(value.into()),
"author" if is_committed => entry.author = Some(value.into()),
"author-mail" if is_committed => entry.author_mail = Some(value.into()),
"author-time" if is_committed => {
entry.author_time = Some(value.parse::<i64>()?)
}
"author-tz" if is_committed => entry.author_tz = Some(value.into()),
"committer" if is_committed => entry.committer_name = Some(value.into()),
"committer-mail" if is_committed => entry.committer_email = Some(value.into()),
"committer-time" if is_committed => {
entry.committer_time = Some(value.parse::<i64>()?)
}
"committer-tz" if is_committed => entry.committer_tz = Some(value.into()),
_ => {}
}
}
};
if done {
if let Some(entry) = current_entry.take() {
index.insert(entry.sha, entries.len());
// We only want annotations that have a commit.
if !entry.sha.is_zero() {
entries.push(entry);
}
}
}
}
Ok(entries)
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::BlameEntry;
use super::parse_git_blame;
fn read_test_data(filename: &str) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push(filename);
std::fs::read_to_string(&path)
.unwrap_or_else(|_| panic!("Could not read test data at {:?}. Is it generated?", path))
}
fn assert_eq_golden(entries: &Vec<BlameEntry>, golden_filename: &str) {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push("golden");
path.push(format!("{}.json", golden_filename));
let mut have_json =
serde_json::to_string_pretty(&entries).expect("could not serialize entries to JSON");
// We always want to save with a trailing newline.
have_json.push('\n');
let update = std::env::var("UPDATE_GOLDEN")
.map(|val| val.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if update {
std::fs::create_dir_all(path.parent().unwrap())
.expect("could not create golden test data directory");
std::fs::write(&path, have_json).expect("could not write out golden data");
} else {
let want_json =
std::fs::read_to_string(&path).unwrap_or_else(|_| {
panic!("could not read golden test data file at {:?}. Did you run the test with UPDATE_GOLDEN=true before?", path);
}).replace("\r\n", "\n");
pretty_assertions::assert_eq!(have_json, want_json, "wrong blame entries");
}
}
#[test]
fn test_parse_git_blame_not_committed() {
let output = read_test_data("blame_incremental_not_committed");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_not_committed");
}
#[test]
fn test_parse_git_blame_simple() {
let output = read_test_data("blame_incremental_simple");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_simple");
}
#[test]
fn test_parse_git_blame_complex() {
let output = read_test_data("blame_incremental_complex");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_complex");
}
}

View File

@@ -1,374 +0,0 @@
use crate::commit::get_messages;
use crate::{GitRemote, Oid};
use anyhow::{Context as _, Result, anyhow};
use collections::{HashMap, HashSet};
use futures::AsyncWriteExt;
use gpui::SharedString;
use serde::{Deserialize, Serialize};
use std::process::Stdio;
use std::{ops::Range, path::Path};
use text::Rope;
use time::OffsetDateTime;
use time::UtcOffset;
use time::macros::format_description;
pub use git2 as libgit;
#[derive(Debug, Clone, Default)]
pub struct Blame {
pub entries: Vec<BlameEntry>,
pub messages: HashMap<Oid, String>,
pub remote_url: Option<String>,
}
#[derive(Clone, Debug, Default)]
pub struct ParsedCommitMessage {
pub message: SharedString,
pub permalink: Option<url::Url>,
pub pull_request: Option<crate::hosting_provider::PullRequest>,
pub remote: Option<GitRemote>,
}
impl Blame {
pub async fn for_path(
git_binary: &Path,
working_directory: &Path,
path: &Path,
content: &Rope,
remote_url: Option<String>,
) -> Result<Self> {
let output = run_git_blame(git_binary, working_directory, path, content).await?;
let mut entries = parse_git_blame(&output)?;
entries.sort_unstable_by(|a, b| a.range.start.cmp(&b.range.start));
let mut unique_shas = HashSet::default();
for entry in entries.iter_mut() {
unique_shas.insert(entry.sha);
}
let shas = unique_shas.into_iter().collect::<Vec<_>>();
let messages = get_messages(working_directory, &shas)
.await
.context("failed to get commit messages")?;
Ok(Self {
entries,
messages,
remote_url,
})
}
}
const GIT_BLAME_NO_COMMIT_ERROR: &str = "fatal: no such ref: HEAD";
const GIT_BLAME_NO_PATH: &str = "fatal: no such path";
async fn run_git_blame(
git_binary: &Path,
working_directory: &Path,
path: &Path,
contents: &Rope,
) -> Result<String> {
let mut child = util::command::new_smol_command(git_binary)
.current_dir(working_directory)
.arg("blame")
.arg("--incremental")
.arg("--contents")
.arg("-")
.arg(path.as_os_str())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.map_err(|e| anyhow!("Failed to start git blame process: {}", e))?;
let stdin = child
.stdin
.as_mut()
.context("failed to get pipe to stdin of git blame command")?;
for chunk in contents.chunks() {
stdin.write_all(chunk.as_bytes()).await?;
}
stdin.flush().await?;
let output = child
.output()
.await
.map_err(|e| anyhow!("Failed to read git blame output: {}", e))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let trimmed = stderr.trim();
if trimmed == GIT_BLAME_NO_COMMIT_ERROR || trimmed.contains(GIT_BLAME_NO_PATH) {
return Ok(String::new());
}
return Err(anyhow!("git blame process failed: {}", stderr));
}
Ok(String::from_utf8(output.stdout)?)
}
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)]
pub struct BlameEntry {
pub sha: Oid,
pub range: Range<u32>,
pub original_line_number: u32,
pub author: Option<String>,
pub author_mail: Option<String>,
pub author_time: Option<i64>,
pub author_tz: Option<String>,
pub committer_name: Option<String>,
pub committer_email: Option<String>,
pub committer_time: Option<i64>,
pub committer_tz: Option<String>,
pub summary: Option<String>,
pub previous: Option<String>,
pub filename: String,
}
impl BlameEntry {
// Returns a BlameEntry by parsing the first line of a `git blame --incremental`
// entry. The line MUST have this format:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
fn new_from_blame_line(line: &str) -> Result<BlameEntry> {
let mut parts = line.split_whitespace();
let sha = parts
.next()
.and_then(|line| line.parse::<Oid>().ok())
.ok_or_else(|| anyhow!("failed to parse sha"))?;
let original_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse original line number"))?;
let final_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let line_count = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let start_line = final_line_number.saturating_sub(1);
let end_line = start_line + line_count;
let range = start_line..end_line;
Ok(Self {
sha,
range,
original_line_number,
..Default::default()
})
}
pub fn author_offset_date_time(&self) -> Result<time::OffsetDateTime> {
if let (Some(author_time), Some(author_tz)) = (self.author_time, &self.author_tz) {
let format = format_description!("[offset_hour][offset_minute]");
let offset = UtcOffset::parse(author_tz, &format)?;
let date_time_utc = OffsetDateTime::from_unix_timestamp(author_time)?;
Ok(date_time_utc.to_offset(offset))
} else {
// Directly return current time in UTC if there's no committer time or timezone
Ok(time::OffsetDateTime::now_utc())
}
}
}
// parse_git_blame parses the output of `git blame --incremental`, which returns
// all the blame-entries for a given path incrementally, as it finds them.
//
// Each entry *always* starts with:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
//
// Each entry *always* ends with:
//
// filename <whitespace-quoted-filename-goes-here>
//
// Line numbers are 1-indexed.
//
// A `git blame --incremental` entry looks like this:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 2 2 1
// author Joe Schmoe
// author-mail <joe.schmoe@example.com>
// author-time 1709741400
// author-tz +0100
// committer Joe Schmoe
// committer-mail <joe.schmoe@example.com>
// committer-time 1709741400
// committer-tz +0100
// summary Joe's cool commit
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// If the entry has the same SHA as an entry that was already printed then no
// signature information is printed:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 3 4 1
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// More about `--incremental` output: https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-blame.html
fn parse_git_blame(output: &str) -> Result<Vec<BlameEntry>> {
let mut entries: Vec<BlameEntry> = Vec::new();
let mut index: HashMap<Oid, usize> = HashMap::default();
let mut current_entry: Option<BlameEntry> = None;
for line in output.lines() {
let mut done = false;
match &mut current_entry {
None => {
let mut new_entry = BlameEntry::new_from_blame_line(line)?;
if let Some(existing_entry) = index
.get(&new_entry.sha)
.and_then(|slot| entries.get(*slot))
{
new_entry.author.clone_from(&existing_entry.author);
new_entry
.author_mail
.clone_from(&existing_entry.author_mail);
new_entry.author_time = existing_entry.author_time;
new_entry.author_tz.clone_from(&existing_entry.author_tz);
new_entry
.committer_name
.clone_from(&existing_entry.committer_name);
new_entry
.committer_email
.clone_from(&existing_entry.committer_email);
new_entry.committer_time = existing_entry.committer_time;
new_entry
.committer_tz
.clone_from(&existing_entry.committer_tz);
new_entry.summary.clone_from(&existing_entry.summary);
}
current_entry.replace(new_entry);
}
Some(entry) => {
let Some((key, value)) = line.split_once(' ') else {
continue;
};
let is_committed = !entry.sha.is_zero();
match key {
"filename" => {
entry.filename = value.into();
done = true;
}
"previous" => entry.previous = Some(value.into()),
"summary" if is_committed => entry.summary = Some(value.into()),
"author" if is_committed => entry.author = Some(value.into()),
"author-mail" if is_committed => entry.author_mail = Some(value.into()),
"author-time" if is_committed => {
entry.author_time = Some(value.parse::<i64>()?)
}
"author-tz" if is_committed => entry.author_tz = Some(value.into()),
"committer" if is_committed => entry.committer_name = Some(value.into()),
"committer-mail" if is_committed => entry.committer_email = Some(value.into()),
"committer-time" if is_committed => {
entry.committer_time = Some(value.parse::<i64>()?)
}
"committer-tz" if is_committed => entry.committer_tz = Some(value.into()),
_ => {}
}
}
};
if done {
if let Some(entry) = current_entry.take() {
index.insert(entry.sha, entries.len());
// We only want annotations that have a commit.
if !entry.sha.is_zero() {
entries.push(entry);
}
}
}
}
Ok(entries)
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::BlameEntry;
use super::parse_git_blame;
fn read_test_data(filename: &str) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push(filename);
std::fs::read_to_string(&path)
.unwrap_or_else(|_| panic!("Could not read test data at {:?}. Is it generated?", path))
}
fn assert_eq_golden(entries: &Vec<BlameEntry>, golden_filename: &str) {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push("golden");
path.push(format!("{}.json", golden_filename));
let mut have_json =
serde_json::to_string_pretty(&entries).expect("could not serialize entries to JSON");
// We always want to save with a trailing newline.
have_json.push('\n');
let update = std::env::var("UPDATE_GOLDEN")
.map(|val| val.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if update {
std::fs::create_dir_all(path.parent().unwrap())
.expect("could not create golden test data directory");
std::fs::write(&path, have_json).expect("could not write out golden data");
} else {
let want_json =
std::fs::read_to_string(&path).unwrap_or_else(|_| {
panic!("could not read golden test data file at {:?}. Did you run the test with UPDATE_GOLDEN=true before?", path);
}).replace("\r\n", "\n");
pretty_assertions::assert_eq!(have_json, want_json, "wrong blame entries");
}
}
#[test]
fn test_parse_git_blame_not_committed() {
let output = read_test_data("blame_incremental_not_committed");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_not_committed");
}
#[test]
fn test_parse_git_blame_simple() {
let output = read_test_data("blame_incremental_simple");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_simple");
}
#[test]
fn test_parse_git_blame_complex() {
let output = read_test_data("blame_incremental_complex");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_complex");
}
}

View File

@@ -1,378 +0,0 @@
use crate::commit::get_messages;
use crate::{GitRemote, Oid};
use anyhow::{Context as _, Result, anyhow};
use collections::{HashMap, HashSet};
use futures::AsyncWriteExt;
use gpui::SharedString;
use serde::{Deserialize, Serialize};
use std::process::Stdio;
use std::{ops::Range, path::Path};
use text::Rope;
use time::OffsetDateTime;
use time::UtcOffset;
use time::macros::format_description;
pub use git2 as libgit;
#[derive(Debug, Clone, Default)]
pub struct Blame {
pub entries: Vec<BlameEntry>,
pub messages: HashMap<Oid, String>,
pub remote_url: Option<String>,
}
#[derive(Clone, Debug, Default)]
pub struct ParsedCommitMessage {
pub message: SharedString,
pub permalink: Option<url::Url>,
pub pull_request: Option<crate::hosting_provider::PullRequest>,
pub remote: Option<GitRemote>,
}
impl Blame {
pub async fn for_path(
git_binary: &Path,
working_directory: &Path,
path: &Path,
content: &Rope,
remote_url: Option<String>,
) -> Result<Self> {
let output = run_git_blame(git_binary, working_directory, path, content).await?;
let mut entries = parse_git_blame(&output)?;
entries.sort_unstable_by(|a, b| a.range.start.cmp(&b.range.start));
let mut unique_shas = HashSet::default();
for entry in entries.iter_mut() {
unique_shas.insert(entry.sha);
}
let shas = unique_shas.into_iter().collect::<Vec<_>>();
let messages = get_messages(working_directory, &shas)
.await
.context("failed to get commit messages")?;
Ok(Self {
entries,
messages,
remote_url,
})
}
}
const GIT_BLAME_NO_COMMIT_ERROR: &str = "fatal: no such ref: HEAD";
const GIT_BLAME_NO_PATH: &str = "fatal: no such path";
async fn run_git_blame(
git_binary: &Path,
working_directory: &Path,
path: &Path,
contents: &Rope,
) -> Result<String> {
let mut child = util::command::new_smol_command(git_binary)
.current_dir(working_directory)
.arg("blame")
.arg("--incremental")
.arg("--contents")
.arg("-")
.arg(path.as_os_str())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.map_err(|e| anyhow!("Failed to start git blame process: {}", e))?;
let stdin = child
.stdin
.as_mut()
.context("failed to get pipe to stdin of git blame command")?;
for chunk in contents.chunks() {
stdin.write_all(chunk.as_bytes()).await?;
}
stdin.flush().await?;
let output = child
.output()
.await
.map_err(|e| anyhow!("Failed to read git blame output: {}", e))?;
handle_command_output(output)
}
fn handle_command_output(output: std::process::Output) -> Result<String> {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let trimmed = stderr.trim();
if trimmed == GIT_BLAME_NO_COMMIT_ERROR || trimmed.contains(GIT_BLAME_NO_PATH) {
return Ok(String::new());
}
return Err(anyhow!("git blame process failed: {}", stderr));
}
Ok(String::from_utf8(output.stdout)?)
}
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)]
pub struct BlameEntry {
pub sha: Oid,
pub range: Range<u32>,
pub original_line_number: u32,
pub author: Option<String>,
pub author_mail: Option<String>,
pub author_time: Option<i64>,
pub author_tz: Option<String>,
pub committer_name: Option<String>,
pub committer_email: Option<String>,
pub committer_time: Option<i64>,
pub committer_tz: Option<String>,
pub summary: Option<String>,
pub previous: Option<String>,
pub filename: String,
}
impl BlameEntry {
// Returns a BlameEntry by parsing the first line of a `git blame --incremental`
// entry. The line MUST have this format:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
fn new_from_blame_line(line: &str) -> Result<BlameEntry> {
let mut parts = line.split_whitespace();
let sha = parts
.next()
.and_then(|line| line.parse::<Oid>().ok())
.ok_or_else(|| anyhow!("failed to parse sha"))?;
let original_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse original line number"))?;
let final_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let line_count = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let start_line = final_line_number.saturating_sub(1);
let end_line = start_line + line_count;
let range = start_line..end_line;
Ok(Self {
sha,
range,
original_line_number,
..Default::default()
})
}
pub fn author_offset_date_time(&self) -> Result<time::OffsetDateTime> {
if let (Some(author_time), Some(author_tz)) = (self.author_time, &self.author_tz) {
let format = format_description!("[offset_hour][offset_minute]");
let offset = UtcOffset::parse(author_tz, &format)?;
let date_time_utc = OffsetDateTime::from_unix_timestamp(author_time)?;
Ok(date_time_utc.to_offset(offset))
} else {
// Directly return current time in UTC if there's no committer time or timezone
Ok(time::OffsetDateTime::now_utc())
}
}
}
// parse_git_blame parses the output of `git blame --incremental`, which returns
// all the blame-entries for a given path incrementally, as it finds them.
//
// Each entry *always* starts with:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
//
// Each entry *always* ends with:
//
// filename <whitespace-quoted-filename-goes-here>
//
// Line numbers are 1-indexed.
//
// A `git blame --incremental` entry looks like this:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 2 2 1
// author Joe Schmoe
// author-mail <joe.schmoe@example.com>
// author-time 1709741400
// author-tz +0100
// committer Joe Schmoe
// committer-mail <joe.schmoe@example.com>
// committer-time 1709741400
// committer-tz +0100
// summary Joe's cool commit
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// If the entry has the same SHA as an entry that was already printed then no
// signature information is printed:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 3 4 1
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// More about `--incremental` output: https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-blame.html
fn parse_git_blame(output: &str) -> Result<Vec<BlameEntry>> {
let mut entries: Vec<BlameEntry> = Vec::new();
let mut index: HashMap<Oid, usize> = HashMap::default();
let mut current_entry: Option<BlameEntry> = None;
for line in output.lines() {
let mut done = false;
match &mut current_entry {
None => {
let mut new_entry = BlameEntry::new_from_blame_line(line)?;
if let Some(existing_entry) = index
.get(&new_entry.sha)
.and_then(|slot| entries.get(*slot))
{
new_entry.author.clone_from(&existing_entry.author);
new_entry
.author_mail
.clone_from(&existing_entry.author_mail);
new_entry.author_time = existing_entry.author_time;
new_entry.author_tz.clone_from(&existing_entry.author_tz);
new_entry
.committer_name
.clone_from(&existing_entry.committer_name);
new_entry
.committer_email
.clone_from(&existing_entry.committer_email);
new_entry.committer_time = existing_entry.committer_time;
new_entry
.committer_tz
.clone_from(&existing_entry.committer_tz);
new_entry.summary.clone_from(&existing_entry.summary);
}
current_entry.replace(new_entry);
}
Some(entry) => {
let Some((key, value)) = line.split_once(' ') else {
continue;
};
let is_committed = !entry.sha.is_zero();
match key {
"filename" => {
entry.filename = value.into();
done = true;
}
"previous" => entry.previous = Some(value.into()),
"summary" if is_committed => entry.summary = Some(value.into()),
"author" if is_committed => entry.author = Some(value.into()),
"author-mail" if is_committed => entry.author_mail = Some(value.into()),
"author-time" if is_committed => {
entry.author_time = Some(value.parse::<i64>()?)
}
"author-tz" if is_committed => entry.author_tz = Some(value.into()),
"committer" if is_committed => entry.committer_name = Some(value.into()),
"committer-mail" if is_committed => entry.committer_email = Some(value.into()),
"committer-time" if is_committed => {
entry.committer_time = Some(value.parse::<i64>()?)
}
"committer-tz" if is_committed => entry.committer_tz = Some(value.into()),
_ => {}
}
}
};
if done {
if let Some(entry) = current_entry.take() {
index.insert(entry.sha, entries.len());
// We only want annotations that have a commit.
if !entry.sha.is_zero() {
entries.push(entry);
}
}
}
}
Ok(entries)
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::BlameEntry;
use super::parse_git_blame;
fn read_test_data(filename: &str) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push(filename);
std::fs::read_to_string(&path)
.unwrap_or_else(|_| panic!("Could not read test data at {:?}. Is it generated?", path))
}
fn assert_eq_golden(entries: &Vec<BlameEntry>, golden_filename: &str) {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push("golden");
path.push(format!("{}.json", golden_filename));
let mut have_json =
serde_json::to_string_pretty(&entries).expect("could not serialize entries to JSON");
// We always want to save with a trailing newline.
have_json.push('\n');
let update = std::env::var("UPDATE_GOLDEN")
.map(|val| val.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if update {
std::fs::create_dir_all(path.parent().unwrap())
.expect("could not create golden test data directory");
std::fs::write(&path, have_json).expect("could not write out golden data");
} else {
let want_json =
std::fs::read_to_string(&path).unwrap_or_else(|_| {
panic!("could not read golden test data file at {:?}. Did you run the test with UPDATE_GOLDEN=true before?", path);
}).replace("\r\n", "\n");
pretty_assertions::assert_eq!(have_json, want_json, "wrong blame entries");
}
}
#[test]
fn test_parse_git_blame_not_committed() {
let output = read_test_data("blame_incremental_not_committed");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_not_committed");
}
#[test]
fn test_parse_git_blame_simple() {
let output = read_test_data("blame_incremental_simple");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_simple");
}
#[test]
fn test_parse_git_blame_complex() {
let output = read_test_data("blame_incremental_complex");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_complex");
}
}

View File

@@ -1,374 +0,0 @@
use crate::commit::get_messages;
use crate::{GitRemote, Oid};
use anyhow::{Context as _, Result, anyhow};
use collections::{HashMap, HashSet};
use futures::AsyncWriteExt;
use gpui::SharedString;
use serde::{Deserialize, Serialize};
use std::process::Stdio;
use std::{ops::Range, path::Path};
use text::Rope;
use time::OffsetDateTime;
use time::UtcOffset;
use time::macros::format_description;
pub use git2 as libgit;
#[derive(Debug, Clone, Default)]
pub struct Blame {
pub entries: Vec<BlameEntry>,
pub messages: HashMap<Oid, String>,
pub remote_url: Option<String>,
}
#[derive(Clone, Debug, Default)]
pub struct ParsedCommitMessage {
pub message: SharedString,
pub permalink: Option<url::Url>,
pub pull_request: Option<crate::hosting_provider::PullRequest>,
pub remote: Option<GitRemote>,
}
impl Blame {
pub async fn for_path(
git_binary: &Path,
working_directory: &Path,
path: &Path,
content: &Rope,
remote_url: Option<String>,
) -> Result<Self> {
let output = run_git_blame(git_binary, working_directory, path, content).await?;
let mut entries = parse_git_blame(&output)?;
entries.sort_unstable_by(|a, b| a.range.start.cmp(&b.range.start));
let mut unique_shas = HashSet::default();
for entry in entries.iter_mut() {
unique_shas.insert(entry.sha);
}
let shas = unique_shas.into_iter().collect::<Vec<_>>();
let messages = get_messages(working_directory, &shas)
.await
.context("failed to get commit messages")?;
Ok(Self {
entries,
messages,
remote_url,
})
}
}
const GIT_BLAME_NO_COMMIT_ERROR: &str = "fatal: no such ref: HEAD";
const GIT_BLAME_NO_PATH: &str = "fatal: no such path";
async fn run_git_blame(
git_binary: &Path,
working_directory: &Path,
path: &Path,
contents: &Rope,
) -> Result<String> {
let mut child = util::command::new_smol_command(git_binary)
.current_dir(working_directory)
.arg("blame")
.arg("--incremental")
.arg("--contents")
.arg("-")
.arg(path.as_os_str())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.map_err(|e| anyhow!("Failed to start git blame process: {}", e))?;
let stdin = child
.stdin
.as_mut()
.context("failed to get pipe to stdin of git blame command")?;
for chunk in contents.chunks() {
stdin.write_all(chunk.as_bytes()).await?;
}
stdin.flush().await?;
let output = child
.output()
.await
.map_err(|e| anyhow!("Failed to read git blame output: {}", e))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let trimmed = stderr.trim();
if trimmed == GIT_BLAME_NO_COMMIT_ERROR || trimmed.contains(GIT_BLAME_NO_PATH) {
return Ok(String::new());
}
return Err(anyhow!("git blame process failed: {}", stderr));
}
Ok(String::from_utf8(output.stdout)?)
}
#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)]
pub struct BlameEntry {
pub sha: Oid,
pub range: Range<u32>,
pub original_line_number: u32,
pub author: Option<String>,
pub author_mail: Option<String>,
pub author_time: Option<i64>,
pub author_tz: Option<String>,
pub committer_name: Option<String>,
pub committer_email: Option<String>,
pub committer_time: Option<i64>,
pub committer_tz: Option<String>,
pub summary: Option<String>,
pub previous: Option<String>,
pub filename: String,
}
impl BlameEntry {
// Returns a BlameEntry by parsing the first line of a `git blame --incremental`
// entry. The line MUST have this format:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
fn new_from_blame_line(line: &str) -> Result<BlameEntry> {
let mut parts = line.split_whitespace();
let sha = parts
.next()
.and_then(|line| line.parse::<Oid>().ok())
.ok_or_else(|| anyhow!("failed to parse sha"))?;
let original_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse original line number"))?;
let final_line_number = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let line_count = parts
.next()
.and_then(|line| line.parse::<u32>().ok())
.ok_or_else(|| anyhow!("Failed to parse final line number"))?;
let start_line = final_line_number.saturating_sub(1);
let end_line = start_line + line_count;
let range = start_line..end_line;
Ok(Self {
sha,
range,
original_line_number,
..Default::default()
})
}
pub fn author_offset_date_time(&self) -> Result<time::OffsetDateTime> {
if let (Some(author_time), Some(author_tz)) = (self.author_time, &self.author_tz) {
let format = format_description!("[offset_hour][offset_minute]");
let offset = UtcOffset::parse(author_tz, &format)?;
let date_time_utc = OffsetDateTime::from_unix_timestamp(author_time)?;
Ok(date_time_utc.to_offset(offset))
} else {
// Directly return current time in UTC if there's no committer time or timezone
Ok(time::OffsetDateTime::now_utc())
}
}
}
// parse_git_blame parses the output of `git blame --incremental`, which returns
// all the blame-entries for a given path incrementally, as it finds them.
//
// Each entry *always* starts with:
//
// <40-byte-hex-sha1> <sourceline> <resultline> <num-lines>
//
// Each entry *always* ends with:
//
// filename <whitespace-quoted-filename-goes-here>
//
// Line numbers are 1-indexed.
//
// A `git blame --incremental` entry looks like this:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 2 2 1
// author Joe Schmoe
// author-mail <joe.schmoe@example.com>
// author-time 1709741400
// author-tz +0100
// committer Joe Schmoe
// committer-mail <joe.schmoe@example.com>
// committer-time 1709741400
// committer-tz +0100
// summary Joe's cool commit
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// If the entry has the same SHA as an entry that was already printed then no
// signature information is printed:
//
// 6ad46b5257ba16d12c5ca9f0d4900320959df7f4 3 4 1
// previous 486c2409237a2c627230589e567024a96751d475 index.js
// filename index.js
//
// More about `--incremental` output: https://mirrors.edge.kernel.org/pub/software/scm/git/docs/git-blame.html
fn parse_git_blame(output: &str) -> Result<Vec<BlameEntry>> {
let mut entries: Vec<BlameEntry> = Vec::new();
let mut index: HashMap<Oid, usize> = HashMap::default();
let mut current_entry: Option<BlameEntry> = None;
for line in output.lines() {
let mut done = false;
match &mut current_entry {
None => {
let mut new_entry = BlameEntry::new_from_blame_line(line)?;
if let Some(existing_entry) = index
.get(&new_entry.sha)
.and_then(|slot| entries.get(*slot))
{
new_entry.author.clone_from(&existing_entry.author);
new_entry
.author_mail
.clone_from(&existing_entry.author_mail);
new_entry.author_time = existing_entry.author_time;
new_entry.author_tz.clone_from(&existing_entry.author_tz);
new_entry
.committer_name
.clone_from(&existing_entry.committer_name);
new_entry
.committer_email
.clone_from(&existing_entry.committer_email);
new_entry.committer_time = existing_entry.committer_time;
new_entry
.committer_tz
.clone_from(&existing_entry.committer_tz);
new_entry.summary.clone_from(&existing_entry.summary);
}
current_entry.replace(new_entry);
}
Some(entry) => {
let Some((key, value)) = line.split_once(' ') else {
continue;
};
let is_committed = !entry.sha.is_zero();
match key {
"filename" => {
entry.filename = value.into();
done = true;
}
"previous" => entry.previous = Some(value.into()),
"summary" if is_committed => entry.summary = Some(value.into()),
"author" if is_committed => entry.author = Some(value.into()),
"author-mail" if is_committed => entry.author_mail = Some(value.into()),
"author-time" if is_committed => {
entry.author_time = Some(value.parse::<i64>()?)
}
"author-tz" if is_committed => entry.author_tz = Some(value.into()),
"committer" if is_committed => entry.committer_name = Some(value.into()),
"committer-mail" if is_committed => entry.committer_email = Some(value.into()),
"committer-time" if is_committed => {
entry.committer_time = Some(value.parse::<i64>()?)
}
"committer-tz" if is_committed => entry.committer_tz = Some(value.into()),
_ => {}
}
}
};
if done {
if let Some(entry) = current_entry.take() {
index.insert(entry.sha, entries.len());
// We only want annotations that have a commit.
if !entry.sha.is_zero() {
entries.push(entry);
}
}
}
}
Ok(entries)
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::BlameEntry;
use super::parse_git_blame;
fn read_test_data(filename: &str) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push(filename);
std::fs::read_to_string(&path)
.unwrap_or_else(|_| panic!("Could not read test data at {:?}. Is it generated?", path))
}
fn assert_eq_golden(entries: &Vec<BlameEntry>, golden_filename: &str) {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("test_data");
path.push("golden");
path.push(format!("{}.json", golden_filename));
let mut have_json =
serde_json::to_string_pretty(&entries).expect("could not serialize entries to JSON");
// We always want to save with a trailing newline.
have_json.push('\n');
let update = std::env::var("UPDATE_GOLDEN")
.map(|val| val.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if update {
std::fs::create_dir_all(path.parent().unwrap())
.expect("could not create golden test data directory");
std::fs::write(&path, have_json).expect("could not write out golden data");
} else {
let want_json =
std::fs::read_to_string(&path).unwrap_or_else(|_| {
panic!("could not read golden test data file at {:?}. Did you run the test with UPDATE_GOLDEN=true before?", path);
}).replace("\r\n", "\n");
pretty_assertions::assert_eq!(have_json, want_json, "wrong blame entries");
}
}
#[test]
fn test_parse_git_blame_not_committed() {
let output = read_test_data("blame_incremental_not_committed");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_not_committed");
}
#[test]
fn test_parse_git_blame_simple() {
let output = read_test_data("blame_incremental_simple");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_simple");
}
#[test]
fn test_parse_git_blame_complex() {
let output = read_test_data("blame_incremental_complex");
let entries = parse_git_blame(&output).unwrap();
assert_eq_golden(&entries, "blame_incremental_complex");
}
}

View File

@@ -13,7 +13,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{cmp, fmt::Write, sync::Arc};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
use util::paths::PathMatcher;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
@@ -75,7 +75,7 @@ impl Tool for GrepTool {
match serde_json::from_value::<GrepToolInput>(input.clone()) {
Ok(input) => {
let page = input.page();
let regex_str = MarkdownString::inline_code(&input.regex);
let regex_str = MarkdownInlineCode(&input.regex);
let case_info = if input.case_sensitive {
" (case-sensitive)"
} else {

View File

@@ -8,7 +8,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{fmt::Write, path::Path, sync::Arc};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ListDirectoryToolInput {
@@ -63,7 +63,7 @@ impl Tool for ListDirectoryTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<ListDirectoryToolInput>(input.clone()) {
Ok(input) => {
let path = MarkdownString::inline_code(&input.path);
let path = MarkdownInlineCode(&input.path);
format!("List the {path} directory's contents")
}
Err(_) => "List directory".to_string(),

View File

@@ -8,7 +8,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{path::Path, sync::Arc};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct MovePathToolInput {
@@ -61,8 +61,8 @@ impl Tool for MovePathTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<MovePathToolInput>(input.clone()) {
Ok(input) => {
let src = MarkdownString::inline_code(&input.source_path);
let dest = MarkdownString::inline_code(&input.destination_path);
let src = MarkdownInlineCode(&input.source_path);
let dest = MarkdownInlineCode(&input.destination_path);
let src_path = Path::new(&input.source_path);
let dest_path = Path::new(&input.destination_path);
@@ -71,7 +71,7 @@ impl Tool for MovePathTool {
.and_then(|os_str| os_str.to_os_string().into_string().ok())
{
Some(filename) if src_path.parent() == dest_path.parent() => {
let filename = MarkdownString::inline_code(&filename);
let filename = MarkdownInlineCode(&filename);
format!("Rename {src} to {filename}")
}
_ => {

View File

@@ -8,7 +8,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownEscaped;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct OpenToolInput {
@@ -41,7 +41,7 @@ impl Tool for OpenTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<OpenToolInput>(input.clone()) {
Ok(input) => format!("Open `{}`", MarkdownString::escape(&input.path_or_url)),
Ok(input) => format!("Open `{}`", MarkdownEscaped(&input.path_or_url)),
Err(_) => "Open file or URL".to_string(),
}
}

View File

@@ -11,7 +11,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
/// If the model requests to read a file whose size exceeds this, then
/// the tool will return an error along with the model's symbol outline,
@@ -40,7 +40,7 @@ pub struct ReadFileToolInput {
#[serde(default)]
pub start_line: Option<usize>,
/// Optional line number to end reading on (1-based index)
/// Optional line number to end reading on (1-based index, inclusive)
#[serde(default)]
pub end_line: Option<usize>,
}
@@ -71,7 +71,7 @@ impl Tool for ReadFileTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<ReadFileToolInput>(input.clone()) {
Ok(input) => {
let path = MarkdownString::inline_code(&input.path);
let path = MarkdownInlineCode(&input.path);
match (input.start_line, input.end_line) {
(Some(start), None) => format!("Read file {path} (from line {start})"),
(Some(start), Some(end)) => format!("Read file {path} (lines {start}-{end})"),
@@ -128,7 +128,7 @@ impl Tool for ReadFileTool {
let start = input.start_line.unwrap_or(1);
let lines = text.split('\n').skip(start - 1);
if let Some(end) = input.end_line {
let count = end.saturating_sub(start).max(1); // Ensure at least 1 line
let count = end.saturating_sub(start).saturating_add(1); // Ensure at least 1 line
Itertools::intersperse(lines.take(count), "\n").collect()
} else {
Itertools::intersperse(lines, "\n").collect()
@@ -329,7 +329,7 @@ mod test {
.output
})
.await;
assert_eq!(result.unwrap(), "Line 2\nLine 3");
assert_eq!(result.unwrap(), "Line 2\nLine 3\nLine 4");
}
fn init_test(cx: &mut TestAppContext) {

View File

@@ -0,0 +1,872 @@
use language::{BufferSnapshot, Diff, Point, ToOffset};
use project::search::SearchQuery;
use std::iter;
use util::{ResultExt as _, paths::PathMatcher};
/// Performs an exact string replacement in a buffer, requiring precise character-for-character matching.
/// Uses the search functionality to locate the first occurrence of the exact string.
/// Returns None if no exact match is found in the buffer.
pub async fn replace_exact(old: &str, new: &str, snapshot: &BufferSnapshot) -> Option<Diff> {
let query = SearchQuery::text(
old,
false,
true,
true,
PathMatcher::new(iter::empty::<&str>()).ok()?,
PathMatcher::new(iter::empty::<&str>()).ok()?,
false,
None,
)
.log_err()?;
let matches = query.search(&snapshot, None).await;
if matches.is_empty() {
return None;
}
let edit_range = matches[0].clone();
let diff = language::text_diff(&old, &new);
let edits = diff
.into_iter()
.map(|(old_range, text)| {
let start = edit_range.start + old_range.start;
let end = edit_range.start + old_range.end;
(start..end, text)
})
.collect::<Vec<_>>();
let diff = language::Diff {
base_version: snapshot.version().clone(),
line_ending: snapshot.line_ending(),
edits,
};
Some(diff)
}
/// Performs a replacement that's indentation-aware - matches text content ignoring leading whitespace differences.
/// When replacing, preserves the indentation level found in the buffer at each matching line.
/// Returns None if no match found or if indentation is offset inconsistently across matched lines.
pub fn replace_with_flexible_indent(old: &str, new: &str, buffer: &BufferSnapshot) -> Option<Diff> {
let (old_lines, old_min_indent) = lines_with_min_indent(old);
let (new_lines, new_min_indent) = lines_with_min_indent(new);
let min_indent = old_min_indent.min(new_min_indent);
let old_lines = drop_lines_prefix(&old_lines, min_indent);
let new_lines = drop_lines_prefix(&new_lines, min_indent);
let max_row = buffer.max_point().row;
'windows: for start_row in 0..max_row + 1 {
let end_row = start_row + old_lines.len().saturating_sub(1) as u32;
if end_row > max_row {
// The buffer ends before fully matching the pattern
return None;
}
let start_point = Point::new(start_row, 0);
let end_point = Point::new(end_row, buffer.line_len(end_row));
let range = start_point.to_offset(buffer)..end_point.to_offset(buffer);
let window_text = buffer.text_for_range(range.clone());
let mut window_lines = window_text.lines();
let mut old_lines_iter = old_lines.iter();
let mut common_mismatch = None;
#[derive(Eq, PartialEq)]
enum Mismatch {
OverIndented(String),
UnderIndented(String),
}
while let (Some(window_line), Some(old_line)) = (window_lines.next(), old_lines_iter.next())
{
let line_trimmed = window_line.trim_start();
if line_trimmed != old_line.trim_start() {
continue 'windows;
}
if line_trimmed.is_empty() {
continue;
}
let line_mismatch = if window_line.len() > old_line.len() {
let prefix = window_line[..window_line.len() - old_line.len()].to_string();
Mismatch::UnderIndented(prefix)
} else {
let prefix = old_line[..old_line.len() - window_line.len()].to_string();
Mismatch::OverIndented(prefix)
};
match &common_mismatch {
Some(common_mismatch) if common_mismatch != &line_mismatch => {
continue 'windows;
}
Some(_) => (),
None => common_mismatch = Some(line_mismatch),
}
}
if let Some(common_mismatch) = &common_mismatch {
let line_ending = buffer.line_ending();
let replacement = new_lines
.iter()
.map(|new_line| {
if new_line.trim().is_empty() {
new_line.to_string()
} else {
match common_mismatch {
Mismatch::UnderIndented(prefix) => prefix.to_string() + new_line,
Mismatch::OverIndented(prefix) => new_line
.strip_prefix(prefix)
.unwrap_or(new_line)
.to_string(),
}
}
})
.collect::<Vec<_>>()
.join(line_ending.as_str());
let diff = Diff {
base_version: buffer.version().clone(),
line_ending,
edits: vec![(range, replacement.into())],
};
return Some(diff);
}
}
None
}
fn drop_lines_prefix<'a>(lines: &'a [&str], prefix_len: usize) -> Vec<&'a str> {
lines
.iter()
.map(|line| line.get(prefix_len..).unwrap_or(""))
.collect()
}
fn lines_with_min_indent(input: &str) -> (Vec<&str>, usize) {
let mut lines = Vec::new();
let mut min_indent: Option<usize> = None;
for line in input.lines() {
lines.push(line);
if !line.trim().is_empty() {
let indent = line.len() - line.trim_start().len();
min_indent = Some(min_indent.map_or(indent, |m| m.min(indent)));
}
}
(lines, min_indent.unwrap_or(0))
}
#[cfg(test)]
mod replace_exact_tests {
use super::*;
use gpui::TestAppContext;
use gpui::prelude::*;
#[gpui::test]
async fn basic(cx: &mut TestAppContext) {
let result = test_replace_exact(cx, "let x = 41;", "let x = 41;", "let x = 42;").await;
assert_eq!(result, Some("let x = 42;".to_string()));
}
#[gpui::test]
async fn no_match(cx: &mut TestAppContext) {
let result = test_replace_exact(cx, "let x = 41;", "let y = 42;", "let y = 43;").await;
assert_eq!(result, None);
}
#[gpui::test]
async fn multi_line(cx: &mut TestAppContext) {
let whole = "fn example() {\n let x = 41;\n println!(\"x = {}\", x);\n}";
let old_text = " let x = 41;\n println!(\"x = {}\", x);";
let new_text = " let x = 42;\n println!(\"x = {}\", x);";
let result = test_replace_exact(cx, whole, old_text, new_text).await;
assert_eq!(
result,
Some("fn example() {\n let x = 42;\n println!(\"x = {}\", x);\n}".to_string())
);
}
#[gpui::test]
async fn multiple_occurrences(cx: &mut TestAppContext) {
let whole = "let x = 41;\nlet y = 41;\nlet z = 41;";
let result = test_replace_exact(cx, whole, "let x = 41;", "let x = 42;").await;
assert_eq!(
result,
Some("let x = 42;\nlet y = 41;\nlet z = 41;".to_string())
);
}
#[gpui::test]
async fn empty_buffer(cx: &mut TestAppContext) {
let result = test_replace_exact(cx, "", "let x = 41;", "let x = 42;").await;
assert_eq!(result, None);
}
#[gpui::test]
async fn partial_match(cx: &mut TestAppContext) {
let whole = "let x = 41; let y = 42;";
let result = test_replace_exact(cx, whole, "let x = 41", "let x = 42").await;
assert_eq!(result, Some("let x = 42; let y = 42;".to_string()));
}
#[gpui::test]
async fn whitespace_sensitive(cx: &mut TestAppContext) {
let result = test_replace_exact(cx, "let x = 41;", " let x = 41;", "let x = 42;").await;
assert_eq!(result, None);
}
#[gpui::test]
async fn entire_buffer(cx: &mut TestAppContext) {
let result = test_replace_exact(cx, "let x = 41;", "let x = 41;", "let x = 42;").await;
assert_eq!(result, Some("let x = 42;".to_string()));
}
async fn test_replace_exact(
cx: &mut TestAppContext,
whole: &str,
old: &str,
new: &str,
) -> Option<String> {
let buffer = cx.new(|cx| language::Buffer::local(whole, cx));
let buffer_snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let diff = replace_exact(old, new, &buffer_snapshot).await;
diff.map(|diff| {
buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
})
})
}
}
#[cfg(test)]
mod flexible_indent_tests {
use super::*;
use gpui::TestAppContext;
use gpui::prelude::*;
use unindent::Unindent;
#[gpui::test]
fn test_underindented_single_line(cx: &mut TestAppContext) {
let cur = " let a = 41;".to_string();
let old = " let a = 41;".to_string();
let new = " let a = 42;".to_string();
let exp = " let a = 42;".to_string();
let result = test_replace_with_flexible_indent(cx, &cur, &old, &new);
assert_eq!(result, Some(exp.to_string()))
}
#[gpui::test]
fn test_overindented_single_line(cx: &mut TestAppContext) {
let cur = " let a = 41;".to_string();
let old = " let a = 41;".to_string();
let new = " let a = 42;".to_string();
let exp = " let a = 42;".to_string();
let result = test_replace_with_flexible_indent(cx, &cur, &old, &new);
assert_eq!(result, Some(exp.to_string()))
}
#[gpui::test]
fn test_underindented_multi_line(cx: &mut TestAppContext) {
let whole = r#"
fn test() {
let x = 5;
println!("x = {}", x);
let y = 10;
}
"#
.unindent();
let old = r#"
let x = 5;
println!("x = {}", x);
"#
.unindent();
let new = r#"
let x = 42;
println!("New value: {}", x);
"#
.unindent();
let expected = r#"
fn test() {
let x = 42;
println!("New value: {}", x);
let y = 10;
}
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
Some(expected.to_string())
);
}
#[gpui::test]
fn test_overindented_multi_line(cx: &mut TestAppContext) {
let cur = r#"
fn foo() {
let a = 41;
let b = 3.13;
}
"#
.unindent();
// 6 space indent instead of 4
let old = " let a = 41;\n let b = 3.13;";
let new = " let a = 42;\n let b = 3.14;";
let expected = r#"
fn foo() {
let a = 42;
let b = 3.14;
}
"#
.unindent();
let result = test_replace_with_flexible_indent(cx, &cur, &old, &new);
assert_eq!(result, Some(expected.to_string()))
}
#[gpui::test]
fn test_replace_inconsistent_indentation(cx: &mut TestAppContext) {
let whole = r#"
fn test() {
if condition {
println!("{}", 43);
}
}
"#
.unindent();
let old = r#"
if condition {
println!("{}", 43);
"#
.unindent();
let new = r#"
if condition {
println!("{}", 42);
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
None
);
}
#[gpui::test]
fn test_replace_with_empty_lines(cx: &mut TestAppContext) {
// Test with empty lines
let whole = r#"
fn test() {
let x = 5;
println!("x = {}", x);
}
"#
.unindent();
let old = r#"
let x = 5;
println!("x = {}", x);
"#
.unindent();
let new = r#"
let x = 10;
println!("New x: {}", x);
"#
.unindent();
let expected = r#"
fn test() {
let x = 10;
println!("New x: {}", x);
}
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
Some(expected.to_string())
);
}
#[gpui::test]
fn test_replace_no_match(cx: &mut TestAppContext) {
let whole = r#"
fn test() {
let x = 5;
}
"#
.unindent();
let old = r#"
let y = 10;
"#
.unindent();
let new = r#"
let y = 20;
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
None
);
}
#[gpui::test]
fn test_replace_whole_ends_before_matching_old(cx: &mut TestAppContext) {
let whole = r#"
fn test() {
let x = 5;
"#
.unindent();
let old = r#"
let x = 5;
println!("x = {}", x);
"#
.unindent();
let new = r#"
let x = 10;
println!("x = {}", x);
"#
.unindent();
// Should return None because whole doesn't fully contain the old text
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
None
);
}
#[gpui::test]
fn test_replace_whole_is_shorter_than_old(cx: &mut TestAppContext) {
let whole = r#"
let x = 5;
"#
.unindent();
let old = r#"
let x = 5;
let y = 10;
"#
.unindent();
let new = r#"
let x = 5;
let y = 20;
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
None
);
}
#[gpui::test]
fn test_replace_old_is_empty(cx: &mut TestAppContext) {
let whole = r#"
fn test() {
let x = 5;
}
"#
.unindent();
let old = "";
let new = r#"
let y = 10;
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
None
);
}
#[gpui::test]
fn test_replace_whole_is_empty(cx: &mut TestAppContext) {
let whole = "";
let old = r#"
let x = 5;
"#
.unindent();
let new = r#"
let x = 10;
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
None
);
}
#[test]
fn test_lines_with_min_indent() {
// Empty string
assert_eq!(lines_with_min_indent(""), (vec![], 0));
// Single line without indentation
assert_eq!(lines_with_min_indent("hello"), (vec!["hello"], 0));
// Multiple lines with no indentation
assert_eq!(
lines_with_min_indent("line1\nline2\nline3"),
(vec!["line1", "line2", "line3"], 0)
);
// Multiple lines with consistent indentation
assert_eq!(
lines_with_min_indent(" line1\n line2\n line3"),
(vec![" line1", " line2", " line3"], 2)
);
// Multiple lines with varying indentation
assert_eq!(
lines_with_min_indent(" line1\n line2\n line3"),
(vec![" line1", " line2", " line3"], 2)
);
// Lines with mixed indentation and empty lines
assert_eq!(
lines_with_min_indent(" line1\n\n line2"),
(vec![" line1", "", " line2"], 2)
);
}
#[gpui::test]
fn test_replace_with_missing_indent_uneven_match(cx: &mut TestAppContext) {
let whole = r#"
fn test() {
if true {
let x = 5;
println!("x = {}", x);
}
}
"#
.unindent();
let old = r#"
let x = 5;
println!("x = {}", x);
"#
.unindent();
let new = r#"
let x = 42;
println!("x = {}", x);
"#
.unindent();
let expected = r#"
fn test() {
if true {
let x = 42;
println!("x = {}", x);
}
}
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
Some(expected.to_string())
);
}
#[gpui::test]
fn test_replace_big_example(cx: &mut TestAppContext) {
let whole = r#"
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_valid_age() {
assert!(is_valid_age(0));
assert!(!is_valid_age(151));
}
}
"#
.unindent();
let old = r#"
#[test]
fn test_is_valid_age() {
assert!(is_valid_age(0));
assert!(!is_valid_age(151));
}
"#
.unindent();
let new = r#"
#[test]
fn test_is_valid_age() {
assert!(is_valid_age(0));
assert!(!is_valid_age(151));
}
#[test]
fn test_group_people_by_age() {
let people = vec![
Person::new("Young One", 5, "young@example.com").unwrap(),
Person::new("Teen One", 15, "teen@example.com").unwrap(),
Person::new("Teen Two", 18, "teen2@example.com").unwrap(),
Person::new("Adult One", 25, "adult@example.com").unwrap(),
];
let groups = group_people_by_age(&people);
assert_eq!(groups.get(&0).unwrap().len(), 1); // One person in 0-9
assert_eq!(groups.get(&10).unwrap().len(), 2); // Two people in 10-19
assert_eq!(groups.get(&20).unwrap().len(), 1); // One person in 20-29
}
"#
.unindent();
let expected = r#"
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_valid_age() {
assert!(is_valid_age(0));
assert!(!is_valid_age(151));
}
#[test]
fn test_group_people_by_age() {
let people = vec![
Person::new("Young One", 5, "young@example.com").unwrap(),
Person::new("Teen One", 15, "teen@example.com").unwrap(),
Person::new("Teen Two", 18, "teen2@example.com").unwrap(),
Person::new("Adult One", 25, "adult@example.com").unwrap(),
];
let groups = group_people_by_age(&people);
assert_eq!(groups.get(&0).unwrap().len(), 1); // One person in 0-9
assert_eq!(groups.get(&10).unwrap().len(), 2); // Two people in 10-19
assert_eq!(groups.get(&20).unwrap().len(), 1); // One person in 20-29
}
}
"#
.unindent();
assert_eq!(
test_replace_with_flexible_indent(cx, &whole, &old, &new),
Some(expected.to_string())
);
}
#[test]
fn test_drop_lines_prefix() {
// Empty array
assert_eq!(drop_lines_prefix(&[], 2), Vec::<&str>::new());
// Zero prefix length
assert_eq!(
drop_lines_prefix(&["line1", "line2"], 0),
vec!["line1", "line2"]
);
// Normal prefix drop
assert_eq!(
drop_lines_prefix(&[" line1", " line2"], 2),
vec!["line1", "line2"]
);
// Prefix longer than some lines
assert_eq!(drop_lines_prefix(&[" line1", "a"], 2), vec!["line1", ""]);
// Prefix longer than all lines
assert_eq!(drop_lines_prefix(&["a", "b"], 5), vec!["", ""]);
// Mixed length lines
assert_eq!(
drop_lines_prefix(&[" line1", " line2", " line3"], 2),
vec![" line1", "line2", " line3"]
);
}
#[gpui::test]
async fn test_replace_exact_basic(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| language::Buffer::local("let x = 41;", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let diff = replace_exact("let x = 41;", "let x = 42;", &snapshot).await;
assert!(diff.is_some());
let diff = diff.unwrap();
assert_eq!(diff.edits.len(), 1);
let result = buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
});
assert_eq!(result, "let x = 42;");
}
#[gpui::test]
async fn test_replace_exact_no_match(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| language::Buffer::local("let x = 41;", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let diff = replace_exact("let y = 42;", "let y = 43;", &snapshot).await;
assert!(diff.is_none());
}
#[gpui::test]
async fn test_replace_exact_multi_line(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| {
language::Buffer::local(
"fn example() {\n let x = 41;\n println!(\"x = {}\", x);\n}",
cx,
)
});
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let old_text = " let x = 41;\n println!(\"x = {}\", x);";
let new_text = " let x = 42;\n println!(\"x = {}\", x);";
let diff = replace_exact(old_text, new_text, &snapshot).await;
assert!(diff.is_some());
let diff = diff.unwrap();
let result = buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
});
assert_eq!(
result,
"fn example() {\n let x = 42;\n println!(\"x = {}\", x);\n}"
);
}
#[gpui::test]
async fn test_replace_exact_multiple_occurrences(cx: &mut TestAppContext) {
let buffer =
cx.new(|cx| language::Buffer::local("let x = 41;\nlet y = 41;\nlet z = 41;", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
// Should replace only the first occurrence
let diff = replace_exact("let x = 41;", "let x = 42;", &snapshot).await;
assert!(diff.is_some());
let diff = diff.unwrap();
let result = buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
});
assert_eq!(result, "let x = 42;\nlet y = 41;\nlet z = 41;");
}
#[gpui::test]
async fn test_replace_exact_empty_buffer(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| language::Buffer::local("", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let diff = replace_exact("let x = 41;", "let x = 42;", &snapshot).await;
assert!(diff.is_none());
}
#[gpui::test]
async fn test_replace_exact_partial_match(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| language::Buffer::local("let x = 41; let y = 42;", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
// Verify substring replacement actually works
let diff = replace_exact("let x = 41", "let x = 42", &snapshot).await;
assert!(diff.is_some());
let diff = diff.unwrap();
let result = buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
});
assert_eq!(result, "let x = 42; let y = 42;");
}
#[gpui::test]
async fn test_replace_exact_whitespace_sensitive(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| language::Buffer::local("let x = 41;", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let diff = replace_exact(" let x = 41;", "let x = 42;", &snapshot).await;
assert!(diff.is_none());
}
#[gpui::test]
async fn test_replace_exact_entire_buffer(cx: &mut TestAppContext) {
let buffer = cx.new(|cx| language::Buffer::local("let x = 41;", cx));
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let diff = replace_exact("let x = 41;", "let x = 42;", &snapshot).await;
assert!(diff.is_some());
let diff = diff.unwrap();
let result = buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
});
assert_eq!(result, "let x = 42;");
}
fn test_replace_with_flexible_indent(
cx: &mut TestAppContext,
whole: &str,
old: &str,
new: &str,
) -> Option<String> {
// Create a local buffer with the test content
let buffer = cx.new(|cx| language::Buffer::local(whole, cx));
// Get the buffer snapshot
let buffer_snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
// Call replace_flexible and transform the result
replace_with_flexible_indent(old, new, &buffer_snapshot).map(|diff| {
buffer.update(cx, |buffer, cx| {
let _ = buffer.apply_diff(diff, cx);
buffer.text()
})
})
}
}

View File

@@ -8,7 +8,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::{fmt::Write, ops::Range, sync::Arc};
use ui::IconName;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
use crate::schema::json_schema_for;
@@ -91,7 +91,7 @@ impl Tool for SymbolInfoTool {
fn ui_text(&self, input: &serde_json::Value) -> String {
match serde_json::from_value::<SymbolInfoToolInput>(input.clone()) {
Ok(input) => {
let symbol = MarkdownString::inline_code(&input.symbol);
let symbol = MarkdownInlineCode(&input.symbol);
match input.command {
Info::Definition => {

View File

@@ -1,32 +0,0 @@
use anyhow::Result;
use handlebars::Handlebars;
use rust_embed::RustEmbed;
use serde::Serialize;
use std::sync::Arc;
#[derive(RustEmbed)]
#[folder = "src/templates"]
#[include = "*.hbs"]
struct Assets;
pub struct Templates(Handlebars<'static>);
impl Templates {
pub fn new() -> Arc<Self> {
let mut handlebars = Handlebars::new();
handlebars.register_embed_templates::<Assets>().unwrap();
handlebars.register_escape_fn(|text| text.into());
Arc::new(Self(handlebars))
}
}
pub trait Template: Sized {
const TEMPLATE_NAME: &'static str;
fn render(&self, templates: &Templates) -> Result<String>
where
Self: Serialize + Sized,
{
Ok(templates.0.render(Self::TEMPLATE_NAME, self)?)
}
}

View File

@@ -1,46 +0,0 @@
You are an expert text editor. Taking the following file as an input:
```{{path}}
{{file_content}}
```
Your response must be a series of edits in the following format:
<edits>
<old_text>
OLD TEXT 1 HERE
</old_text>
<new_text>
NEW TEXT 1 HERE
</new_text>
<old_text>
OLD TEXT 2 HERE
</old_text>
<new_text>
NEW TEXT 2 HERE
</new_text>
<old_text>
OLD TEXT 3 HERE
</old_text>
<new_text>
NEW TEXT 3 HERE
</new_text>
</edits>
Rules for editing:
- `old_text` represents full lines (including indentation) in the input file that will be replaced with `new_text`
- It is crucial that `old_text` is unique and unambiguous.
- Always include enough context around the lines you want to replace in `old_text` such that it's impossible to mistake them for other lines.
- If you want to replace many occurrences of the same text, repeat the same `old_text`/`new_text` pair multiple times and I will apply them sequentially, one occurrence at a time.
- Don't explain why you made a change, just report the edits.
- Never do MORE than what the user has requested.
- Never do LESS than what the user has requested.
<user_instructions>
{{instructions}}
</user_instructions>
<edits>

View File

@@ -15,7 +15,7 @@ use std::path::Path;
use std::sync::Arc;
use ui::IconName;
use util::command::new_smol_command;
use util::markdown::MarkdownString;
use util::markdown::MarkdownInlineCode;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct TerminalToolInput {
@@ -55,17 +55,14 @@ impl Tool for TerminalTool {
let first_line = lines.next().unwrap_or_default();
let remaining_line_count = lines.count();
match remaining_line_count {
0 => MarkdownString::inline_code(&first_line).0,
1 => {
MarkdownString::inline_code(&format!(
"{} - {} more line",
first_line, remaining_line_count
))
.0
}
n => {
MarkdownString::inline_code(&format!("{} - {} more lines", first_line, n)).0
}
0 => MarkdownInlineCode(&first_line).to_string(),
1 => MarkdownInlineCode(&format!(
"{} - {} more line",
first_line, remaining_line_count
))
.to_string(),
n => MarkdownInlineCode(&format!("{} - {} more lines", first_line, n))
.to_string(),
}
}
Err(_) => "Run terminal command".to_string(),
@@ -205,39 +202,52 @@ async fn run_command_limited(working_dir: Arc<Path>, command: String) -> Result<
consume_reader(out_reader, truncated).await?;
consume_reader(err_reader, truncated).await?;
let status = cmd.status().await.context("Failed to get command status")?;
// Handle potential errors during status retrieval, including interruption.
match cmd.status().await {
Ok(status) => {
let output_string = if truncated {
// Valid to find `\n` in UTF-8 since 0-127 ASCII characters are not used in
// multi-byte characters.
let last_line_ix = combined_buffer.bytes().rposition(|b| b == b'\n');
let buffer_content =
&combined_buffer[..last_line_ix.unwrap_or(combined_buffer.len())];
let output_string = if truncated {
// Valid to find `\n` in UTF-8 since 0-127 ASCII characters are not used in
// multi-byte characters.
let last_line_ix = combined_buffer.bytes().rposition(|b| b == b'\n');
let combined_buffer = &combined_buffer[..last_line_ix.unwrap_or(combined_buffer.len())];
format!(
"Command output too long. The first {} bytes:\n\n{}",
buffer_content.len(),
output_block(buffer_content),
)
} else {
output_block(&combined_buffer)
};
format!(
"Command output too long. The first {} bytes:\n\n{}",
combined_buffer.len(),
output_block(&combined_buffer),
)
} else {
output_block(&combined_buffer)
};
let output_with_status = if status.success() {
if output_string.is_empty() {
"Command executed successfully.".to_string()
} else {
output_string
}
} else {
format!(
"Command failed with exit code {} (shell: {}).\n\n{}",
status.code().unwrap_or(-1),
shell,
output_string,
)
};
let output_with_status = if status.success() {
if output_string.is_empty() {
"Command executed successfully.".to_string()
} else {
output_string.to_string()
Ok(output_with_status)
}
} else {
format!(
"Command failed with exit code {} (shell: {}).\n\n{}",
status.code().unwrap_or(-1),
shell,
output_string,
)
};
Ok(output_with_status)
Err(err) => {
// Error occurred getting status (potential interruption). Include partial output.
let partial_output = output_block(&combined_buffer);
let error_message = format!(
"Command failed or was interrupted.\nPartial output captured:\n\n{}",
partial_output
);
Err(anyhow!(err).context(error_message))
}
}
}
async fn consume_reader<T: AsyncReadExt + Unpin>(

View File

@@ -1,4 +1,4 @@
use gpui::{Animation, AnimationExt, App, IntoElement, pulsating_between};
use gpui::{Animation, AnimationExt, AnyElement, App, IntoElement, pulsating_between};
use std::time::Duration;
use ui::{Tooltip, prelude::*};
@@ -8,6 +8,8 @@ pub struct ToolCallCardHeader {
icon: IconName,
primary_text: SharedString,
secondary_text: Option<SharedString>,
code_path: Option<SharedString>,
disclosure_slot: Option<AnyElement>,
is_loading: bool,
error: Option<String>,
}
@@ -18,6 +20,8 @@ impl ToolCallCardHeader {
icon,
primary_text: primary_text.into(),
secondary_text: None,
code_path: None,
disclosure_slot: None,
is_loading: false,
error: None,
}
@@ -28,6 +32,16 @@ impl ToolCallCardHeader {
self
}
pub fn with_code_path(mut self, text: impl Into<SharedString>) -> Self {
self.code_path = Some(text.into());
self
}
pub fn disclosure_slot(mut self, element: impl IntoElement) -> Self {
self.disclosure_slot = Some(element.into_any_element());
self
}
pub fn loading(mut self) -> Self {
self.is_loading = true;
self
@@ -42,26 +56,36 @@ impl ToolCallCardHeader {
impl RenderOnce for ToolCallCardHeader {
fn render(self, window: &mut Window, cx: &mut App) -> impl IntoElement {
let font_size = rems(0.8125);
let line_height = window.line_height();
let secondary_text = self.secondary_text;
let code_path = self.code_path;
let bullet_divider = || {
div()
.size(px(3.))
.rounded_full()
.bg(cx.theme().colors().text)
};
h_flex()
.id("tool-label-container")
.gap_1p5()
.gap_2()
.max_w_full()
.overflow_x_scroll()
.opacity(0.8)
.child(
h_flex().h(window.line_height()).justify_center().child(
Icon::new(self.icon)
.size(IconSize::XSmall)
.color(Color::Muted),
),
)
.child(
h_flex()
.h(window.line_height())
.h(line_height)
.gap_1p5()
.text_size(font_size)
.child(
h_flex().h(line_height).justify_center().child(
Icon::new(self.icon)
.size(IconSize::XSmall)
.color(Color::Muted),
),
)
.map(|this| {
if let Some(error) = &self.error {
this.child(format!("{} failed", self.primary_text)).child(
@@ -76,13 +100,15 @@ impl RenderOnce for ToolCallCardHeader {
}
})
.when_some(secondary_text, |this, secondary_text| {
this.child(
div()
.size(px(3.))
.rounded_full()
.bg(cx.theme().colors().text),
this.child(bullet_divider())
.child(div().text_size(font_size).child(secondary_text.clone()))
})
.when_some(code_path, |this, code_path| {
this.child(bullet_divider()).child(
Label::new(code_path.clone())
.size(LabelSize::Small)
.inline_code(cx),
)
.child(div().text_size(font_size).child(secondary_text.clone()))
})
.with_animation(
"loading-label",
@@ -98,5 +124,11 @@ impl RenderOnce for ToolCallCardHeader {
},
),
)
.when_some(self.disclosure_slot, |container, disclosure_slot| {
container
.group("disclosure")
.justify_between()
.child(div().visible_on_hover("disclosure").child(disclosure_slot))
})
}
}

View File

@@ -91,7 +91,7 @@ fn view_release_notes_locally(
let buffer = cx.new(|cx| MultiBuffer::singleton(buffer, cx));
let tab_description = SharedString::from(body.title.to_string());
let tab_content = SharedString::from(body.title.to_string());
let editor = cx.new(|cx| {
Editor::for_multibuffer(buffer, Some(project), window, cx)
});
@@ -102,7 +102,7 @@ fn view_release_notes_locally(
editor,
workspace_handle,
language_registry,
Some(tab_description),
tab_content,
window,
cx,
);

View File

@@ -564,7 +564,7 @@ impl Client {
pub fn production(cx: &mut App) -> Arc<Self> {
let clock = Arc::new(clock::RealSystemClock);
let http = Arc::new(HttpClientWithUrl::new_uri(
let http = Arc::new(HttpClientWithUrl::new_url(
cx.http_client(),
&ClientSettings::get_global(cx).server_url,
cx.http_client().proxy().cloned(),

View File

@@ -1,10 +1,10 @@
//! socks proxy
use anyhow::{Result, anyhow};
use http_client::Uri;
use http_client::Url;
use tokio_socks::tcp::{Socks4Stream, Socks5Stream};
pub(crate) async fn connect_socks_proxy_stream(
proxy: Option<&Uri>,
proxy: Option<&Url>,
rpc_host: (&str, u16),
) -> Result<Box<dyn AsyncReadWrite>> {
let stream = match parse_socks_proxy(proxy) {
@@ -32,9 +32,9 @@ pub(crate) async fn connect_socks_proxy_stream(
Ok(stream)
}
fn parse_socks_proxy(proxy: Option<&Uri>) -> Option<((String, u16), SocksVersion)> {
let proxy_uri = proxy?;
let scheme = proxy_uri.scheme_str()?;
fn parse_socks_proxy(proxy: Option<&Url>) -> Option<((String, u16), SocksVersion)> {
let proxy_url = proxy?;
let scheme = proxy_url.scheme();
let socks_version = if scheme.starts_with("socks4") {
// socks4
SocksVersion::V4
@@ -44,7 +44,7 @@ fn parse_socks_proxy(proxy: Option<&Uri>) -> Option<((String, u16), SocksVersion
} else {
return None;
};
if let (Some(host), Some(port)) = (proxy_uri.host(), proxy_uri.port_u16()) {
if let Some((host, port)) = proxy_url.host().zip(proxy_url.port_or_known_default()) {
Some(((host.to_string(), port), socks_version))
} else {
None

View File

@@ -0,0 +1,3 @@
alter table billing_preferences
add column model_request_overages_enabled bool not null default false,
add column model_request_overages_spend_limit_in_cents integer not null default 0;

View File

@@ -0,0 +1,8 @@
create table subscription_usage_meters (
id serial primary key,
subscription_usage_id integer not null references subscription_usages (id) on delete cascade,
model_id integer not null references models (id) on delete cascade,
requests integer not null default 0
);
create unique index uix_subscription_usage_meters_on_subscription_usage_model on subscription_usage_meters (subscription_usage_id, model_id);

View File

@@ -0,0 +1,6 @@
alter table subscription_usage_meters
add column mode text not null default 'normal';
drop index uix_subscription_usage_meters_on_subscription_usage_model;
create unique index uix_subscription_usage_meters_on_subscription_usage_model_mode on subscription_usage_meters (subscription_usage_id, model_id, mode);

View File

@@ -152,6 +152,7 @@ struct AuthenticatedUserParams {
struct AuthenticatedUserResponse {
user: User,
metrics_id: String,
feature_flags: Vec<String>,
}
async fn get_authenticated_user(
@@ -172,7 +173,12 @@ async fn get_authenticated_user(
)
.await?;
let metrics_id = app.db.get_user_metrics_id(user.id).await?;
Ok(Json(AuthenticatedUserResponse { user, metrics_id }))
let feature_flags = app.db.get_user_flags(user.id).await?;
Ok(Json(AuthenticatedUserResponse {
user,
metrics_id,
feature_flags,
}))
}
#[derive(Deserialize, Debug)]

View File

@@ -65,6 +65,8 @@ struct GetBillingPreferencesParams {
#[derive(Debug, Serialize)]
struct BillingPreferencesResponse {
max_monthly_llm_usage_spending_in_cents: i32,
model_request_overages_enabled: bool,
model_request_overages_spend_limit_in_cents: i32,
}
async fn get_billing_preferences(
@@ -81,16 +83,30 @@ async fn get_billing_preferences(
Ok(Json(BillingPreferencesResponse {
max_monthly_llm_usage_spending_in_cents: preferences
.as_ref()
.map_or(DEFAULT_MAX_MONTHLY_SPEND.0 as i32, |preferences| {
preferences.max_monthly_llm_usage_spending_in_cents
}),
model_request_overages_enabled: preferences.as_ref().map_or(false, |preferences| {
preferences.model_request_overages_enabled
}),
model_request_overages_spend_limit_in_cents: preferences
.as_ref()
.map_or(0, |preferences| {
preferences.model_request_overages_spend_limit_in_cents
}),
}))
}
#[derive(Debug, Deserialize)]
struct UpdateBillingPreferencesBody {
github_user_id: i32,
#[serde(default)]
max_monthly_llm_usage_spending_in_cents: i32,
#[serde(default)]
model_request_overages_enabled: bool,
#[serde(default)]
model_request_overages_spend_limit_in_cents: i32,
}
async fn update_billing_preferences(
@@ -106,6 +122,8 @@ async fn update_billing_preferences(
let max_monthly_llm_usage_spending_in_cents =
body.max_monthly_llm_usage_spending_in_cents.max(0);
let model_request_overages_spend_limit_in_cents =
body.model_request_overages_spend_limit_in_cents.max(0);
let billing_preferences =
if let Some(_billing_preferences) = app.db.get_billing_preferences(user.id).await? {
@@ -116,6 +134,12 @@ async fn update_billing_preferences(
max_monthly_llm_usage_spending_in_cents: ActiveValue::set(
max_monthly_llm_usage_spending_in_cents,
),
model_request_overages_enabled: ActiveValue::set(
body.model_request_overages_enabled,
),
model_request_overages_spend_limit_in_cents: ActiveValue::set(
model_request_overages_spend_limit_in_cents,
),
},
)
.await?
@@ -125,18 +149,22 @@ async fn update_billing_preferences(
user.id,
&crate::db::CreateBillingPreferencesParams {
max_monthly_llm_usage_spending_in_cents,
model_request_overages_enabled: body.model_request_overages_enabled,
model_request_overages_spend_limit_in_cents,
},
)
.await?
};
SnowflakeRow::new(
"Spend Limit Updated",
"Billing Preferences Updated",
Some(user.metrics_id),
user.admin,
None,
json!({
"user_id": user.id,
"model_request_overages_enabled": billing_preferences.model_request_overages_enabled,
"model_request_overages_spend_limit_in_cents": billing_preferences.model_request_overages_spend_limit_in_cents,
"max_monthly_llm_usage_spending_in_cents": billing_preferences.max_monthly_llm_usage_spending_in_cents,
}),
)
@@ -149,6 +177,9 @@ async fn update_billing_preferences(
Ok(Json(BillingPreferencesResponse {
max_monthly_llm_usage_spending_in_cents: billing_preferences
.max_monthly_llm_usage_spending_in_cents,
model_request_overages_enabled: billing_preferences.model_request_overages_enabled,
model_request_overages_spend_limit_in_cents: billing_preferences
.model_request_overages_spend_limit_in_cents,
}))
}
@@ -291,16 +322,35 @@ async fn create_billing_subscription(
CustomerId::from_str(&existing_customer.stripe_customer_id)
.context("failed to parse customer ID")?
} else {
let customer = Customer::create(
&stripe_client,
CreateCustomer {
email: user.email_address.as_deref(),
..Default::default()
},
)
.await?;
let existing_customer = if let Some(email) = user.email_address.as_deref() {
let customers = Customer::list(
&stripe_client,
&stripe::ListCustomers {
email: Some(email),
..Default::default()
},
)
.await?;
customer.id
customers.data.first().cloned()
} else {
None
};
if let Some(existing_customer) = existing_customer {
existing_customer.id
} else {
let customer = Customer::create(
&stripe_client,
CreateCustomer {
email: user.email_address.as_deref(),
..Default::default()
},
)
.await?;
customer.id
}
};
let success_url = format!(
@@ -343,7 +393,9 @@ async fn create_billing_subscription(
zed_llm_client::LanguageModelProvider::Anthropic,
"claude-3-7-sonnet",
)?;
let stripe_model = stripe_billing.register_model(default_model).await?;
let stripe_model = stripe_billing
.register_model_for_token_based_usage(default_model)
.await?;
stripe_billing
.checkout(customer_id, &user.github_login, &stripe_model, &success_url)
.await?
@@ -1193,9 +1245,9 @@ async fn find_or_create_billing_customer(
Ok(Some(billing_customer))
}
const SYNC_LLM_USAGE_WITH_STRIPE_INTERVAL: Duration = Duration::from_secs(60);
const SYNC_LLM_TOKEN_USAGE_WITH_STRIPE_INTERVAL: Duration = Duration::from_secs(60);
pub fn sync_llm_usage_with_stripe_periodically(app: Arc<AppState>) {
pub fn sync_llm_token_usage_with_stripe_periodically(app: Arc<AppState>) {
let Some(stripe_billing) = app.stripe_billing.clone() else {
log::warn!("failed to retrieve Stripe billing object");
return;
@@ -1210,17 +1262,19 @@ pub fn sync_llm_usage_with_stripe_periodically(app: Arc<AppState>) {
let executor = executor.clone();
async move {
loop {
sync_with_stripe(&app, &llm_db, &stripe_billing)
sync_token_usage_with_stripe(&app, &llm_db, &stripe_billing)
.await
.context("failed to sync LLM usage to Stripe")
.trace_err();
executor.sleep(SYNC_LLM_USAGE_WITH_STRIPE_INTERVAL).await;
executor
.sleep(SYNC_LLM_TOKEN_USAGE_WITH_STRIPE_INTERVAL)
.await;
}
}
});
}
async fn sync_with_stripe(
async fn sync_token_usage_with_stripe(
app: &Arc<AppState>,
llm_db: &Arc<LlmDatabase>,
stripe_billing: &Arc<StripeBilling>,
@@ -1251,15 +1305,120 @@ async fn sync_with_stripe(
.parse()
.context("failed to parse stripe customer id from db")?;
let stripe_model = stripe_billing.register_model(&model).await?;
let stripe_model = stripe_billing
.register_model_for_token_based_usage(&model)
.await?;
stripe_billing
.subscribe_to_model(&stripe_subscription_id, &stripe_model)
.await?;
stripe_billing
.bill_model_usage(&stripe_customer_id, &stripe_model, &event)
.bill_model_token_usage(&stripe_customer_id, &stripe_model, &event)
.await?;
llm_db.consume_billing_event(event.id).await?;
}
Ok(())
}
const SYNC_LLM_REQUEST_USAGE_WITH_STRIPE_INTERVAL: Duration = Duration::from_secs(60);
pub fn sync_llm_request_usage_with_stripe_periodically(app: Arc<AppState>) {
let Some(stripe_billing) = app.stripe_billing.clone() else {
log::warn!("failed to retrieve Stripe billing object");
return;
};
let Some(llm_db) = app.llm_db.clone() else {
log::warn!("failed to retrieve LLM database");
return;
};
let executor = app.executor.clone();
executor.spawn_detached({
let executor = executor.clone();
async move {
loop {
sync_model_request_usage_with_stripe(&app, &llm_db, &stripe_billing)
.await
.context("failed to sync LLM request usage to Stripe")
.trace_err();
executor
.sleep(SYNC_LLM_REQUEST_USAGE_WITH_STRIPE_INTERVAL)
.await;
}
}
});
}
async fn sync_model_request_usage_with_stripe(
app: &Arc<AppState>,
llm_db: &Arc<LlmDatabase>,
stripe_billing: &Arc<StripeBilling>,
) -> anyhow::Result<()> {
let usage_meters = llm_db
.get_current_subscription_usage_meters(Utc::now())
.await?;
let user_ids = usage_meters
.iter()
.map(|(_, usage)| usage.user_id)
.collect::<HashSet<UserId>>();
let billing_subscriptions = app
.db
.get_active_zed_pro_billing_subscriptions(user_ids)
.await?;
let claude_3_5_sonnet = stripe_billing
.find_price_by_lookup_key("claude-3-5-sonnet-requests")
.await?;
let claude_3_7_sonnet = stripe_billing
.find_price_by_lookup_key("claude-3-7-sonnet-requests")
.await?;
for (usage_meter, usage) in usage_meters {
maybe!(async {
let Some((billing_customer, billing_subscription)) =
billing_subscriptions.get(&usage.user_id)
else {
bail!(
"Attempted to sync usage meter for user who is not a Stripe customer: {}",
usage.user_id
);
};
let stripe_customer_id = billing_customer
.stripe_customer_id
.parse::<stripe::CustomerId>()
.context("failed to parse Stripe customer ID from database")?;
let stripe_subscription_id = billing_subscription
.stripe_subscription_id
.parse::<stripe::SubscriptionId>()
.context("failed to parse Stripe subscription ID from database")?;
let model = llm_db.model_by_id(usage_meter.model_id)?;
let (price_id, meter_event_name) = match model.name.as_str() {
"claude-3-5-sonnet" => (&claude_3_5_sonnet.id, "claude_3_5_sonnet/requests"),
"claude-3-7-sonnet" => (&claude_3_7_sonnet.id, "claude_3_7_sonnet/requests"),
model_name => {
bail!("Attempted to sync usage meter for unsupported model: {model_name:?}")
}
};
stripe_billing
.subscribe_to_price(&stripe_subscription_id, price_id)
.await?;
stripe_billing
.bill_model_request_usage(
&stripe_customer_id,
meter_event_name,
usage_meter.requests,
)
.await?;
Ok(())
})
.await
.log_err();
}
Ok(())
}

View File

@@ -800,6 +800,7 @@ impl LocalSettingsKind {
proto::LocalSettingsKind::Settings => Self::Settings,
proto::LocalSettingsKind::Tasks => Self::Tasks,
proto::LocalSettingsKind::Editorconfig => Self::Editorconfig,
proto::LocalSettingsKind::Debug => Self::Debug,
}
}
@@ -808,6 +809,7 @@ impl LocalSettingsKind {
Self::Settings => proto::LocalSettingsKind::Settings,
Self::Tasks => proto::LocalSettingsKind::Tasks,
Self::Editorconfig => proto::LocalSettingsKind::Editorconfig,
Self::Debug => proto::LocalSettingsKind::Debug,
}
}
}

View File

@@ -3,11 +3,15 @@ use super::*;
#[derive(Debug)]
pub struct CreateBillingPreferencesParams {
pub max_monthly_llm_usage_spending_in_cents: i32,
pub model_request_overages_enabled: bool,
pub model_request_overages_spend_limit_in_cents: i32,
}
#[derive(Debug, Default)]
pub struct UpdateBillingPreferencesParams {
pub max_monthly_llm_usage_spending_in_cents: ActiveValue<i32>,
pub model_request_overages_enabled: ActiveValue<bool>,
pub model_request_overages_spend_limit_in_cents: ActiveValue<i32>,
}
impl Database {
@@ -37,6 +41,12 @@ impl Database {
max_monthly_llm_usage_spending_in_cents: ActiveValue::set(
params.max_monthly_llm_usage_spending_in_cents,
),
model_request_overages_enabled: ActiveValue::set(
params.model_request_overages_enabled,
),
model_request_overages_spend_limit_in_cents: ActiveValue::set(
params.model_request_overages_spend_limit_in_cents,
),
..Default::default()
})
.exec_with_returning(&*tx)
@@ -59,6 +69,10 @@ impl Database {
max_monthly_llm_usage_spending_in_cents: params
.max_monthly_llm_usage_spending_in_cents
.clone(),
model_request_overages_enabled: params.model_request_overages_enabled.clone(),
model_request_overages_spend_limit_in_cents: params
.model_request_overages_spend_limit_in_cents
.clone(),
..Default::default()
})
.filter(billing_preference::Column::UserId.eq(user_id))

View File

@@ -191,6 +191,38 @@ impl Database {
.await
}
pub async fn get_active_zed_pro_billing_subscriptions(
&self,
user_ids: HashSet<UserId>,
) -> Result<HashMap<UserId, (billing_customer::Model, billing_subscription::Model)>> {
self.transaction(|tx| {
let user_ids = user_ids.clone();
async move {
let mut rows = billing_subscription::Entity::find()
.inner_join(billing_customer::Entity)
.select_also(billing_customer::Entity)
.filter(billing_customer::Column::UserId.is_in(user_ids))
.filter(
billing_subscription::Column::StripeSubscriptionStatus
.eq(StripeSubscriptionStatus::Active),
)
.filter(billing_subscription::Column::Kind.eq(SubscriptionKind::ZedPro))
.order_by_asc(billing_subscription::Column::Id)
.stream(&*tx)
.await?;
let mut subscriptions = HashMap::default();
while let Some(row) = rows.next().await {
if let (subscription, Some(customer)) = row? {
subscriptions.insert(customer.user_id, (customer, subscription));
}
}
Ok(subscriptions)
}
})
.await
}
/// Returns whether the user has an active billing subscription.
pub async fn has_active_billing_subscription(&self, user_id: UserId) -> Result<bool> {
Ok(self.count_active_billing_subscriptions(user_id).await? > 0)

View File

@@ -9,6 +9,8 @@ pub struct Model {
pub created_at: DateTime,
pub user_id: UserId,
pub max_monthly_llm_usage_spending_in_cents: i32,
pub model_request_overages_enabled: bool,
pub model_request_overages_spend_limit_in_cents: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]

View File

@@ -32,4 +32,6 @@ pub enum LocalSettingsKind {
Tasks,
#[sea_orm(string_value = "editorconfig")]
Editorconfig,
#[sea_orm(string_value = "debug")]
Debug,
}

View File

@@ -2,5 +2,6 @@ use super::*;
pub mod billing_events;
pub mod providers;
pub mod subscription_usage_meters;
pub mod subscription_usages;
pub mod usages;

View File

@@ -0,0 +1,37 @@
use crate::llm::db::queries::subscription_usages::convert_chrono_to_time;
use super::*;
impl LlmDatabase {
/// Returns all current subscription usage meters as of the given timestamp.
pub async fn get_current_subscription_usage_meters(
&self,
now: DateTimeUtc,
) -> Result<Vec<(subscription_usage_meter::Model, subscription_usage::Model)>> {
let now = convert_chrono_to_time(now)?;
self.transaction(|tx| async move {
let result = subscription_usage_meter::Entity::find()
.inner_join(subscription_usage::Entity)
.filter(
subscription_usage::Column::PeriodStartAt
.lte(now)
.and(subscription_usage::Column::PeriodEndAt.gte(now)),
)
.select_also(subscription_usage::Entity)
.all(&*tx)
.await?;
let result = result
.into_iter()
.filter_map(|(meter, usage)| {
let usage = usage?;
Some((meter, usage))
})
.collect();
Ok(result)
})
.await
}
}

View File

@@ -6,7 +6,7 @@ use crate::db::{UserId, billing_subscription};
use super::*;
fn convert_chrono_to_time(datetime: DateTimeUtc) -> anyhow::Result<PrimitiveDateTime> {
pub fn convert_chrono_to_time(datetime: DateTimeUtc) -> anyhow::Result<PrimitiveDateTime> {
use chrono::{Datelike as _, Timelike as _};
let date = time::Date::from_calendar_date(

View File

@@ -3,5 +3,6 @@ pub mod model;
pub mod monthly_usage;
pub mod provider;
pub mod subscription_usage;
pub mod subscription_usage_meter;
pub mod usage;
pub mod usage_measure;

View File

@@ -0,0 +1,43 @@
use sea_orm::entity::prelude::*;
use crate::llm::db::ModelId;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel)]
#[sea_orm(table_name = "subscription_usage_meters")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i32,
pub subscription_usage_id: i32,
pub model_id: ModelId,
pub requests: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::subscription_usage::Entity",
from = "Column::SubscriptionUsageId",
to = "super::subscription_usage::Column::Id"
)]
SubscriptionUsage,
#[sea_orm(
belongs_to = "super::model::Entity",
from = "Column::ModelId",
to = "super::model::Column::Id"
)]
Model,
}
impl Related<super::subscription_usage::Entity> for Entity {
fn to() -> RelationDef {
Relation::SubscriptionUsage.def()
}
}
impl Related<super::model::Entity> for Entity {
fn to() -> RelationDef {
Relation::Model.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -34,6 +34,10 @@ pub struct LlmTokenClaims {
#[serde(default)]
pub subscription_period: Option<(NaiveDateTime, NaiveDateTime)>,
#[serde(default)]
pub enable_model_request_overages: bool,
#[serde(default)]
pub model_request_overages_spend_limit_in_cents: u32,
#[serde(default)]
pub can_use_web_search_tool: bool,
}
@@ -75,6 +79,7 @@ impl LlmTokenClaims {
can_use_web_search_tool: feature_flags.iter().any(|flag| flag == "assistant2"),
has_llm_subscription: has_legacy_llm_subscription,
max_monthly_spend_in_cents: billing_preferences
.as_ref()
.map_or(DEFAULT_MAX_MONTHLY_SPEND.0, |preferences| {
preferences.max_monthly_llm_usage_spending_in_cents as u32
}),
@@ -96,6 +101,16 @@ impl LlmTokenClaims {
Some((period_start_at.naive_utc(), period_end_at.naive_utc()))
}),
enable_model_request_overages: billing_preferences
.as_ref()
.map_or(false, |preferences| {
preferences.model_request_overages_enabled
}),
model_request_overages_spend_limit_in_cents: billing_preferences
.as_ref()
.map_or(0, |preferences| {
preferences.model_request_overages_spend_limit_in_cents as u32
}),
};
Ok(jsonwebtoken::encode(

View File

@@ -8,7 +8,9 @@ use axum::{
};
use collab::api::CloudflareIpCountryHeader;
use collab::api::billing::sync_llm_usage_with_stripe_periodically;
use collab::api::billing::{
sync_llm_request_usage_with_stripe_periodically, sync_llm_token_usage_with_stripe_periodically,
};
use collab::llm::db::LlmDatabase;
use collab::migrations::run_database_migrations;
use collab::user_backfiller::spawn_user_backfiller;
@@ -152,7 +154,8 @@ async fn main() -> Result<()> {
if let Some(mut llm_db) = llm_db {
llm_db.initialize().await?;
sync_llm_usage_with_stripe_periodically(state.clone());
sync_llm_request_usage_with_stripe_periodically(state.clone());
sync_llm_token_usage_with_stripe_periodically(state.clone());
}
app = app

View File

@@ -1,12 +1,13 @@
use std::sync::Arc;
use crate::{Cents, Result, llm};
use anyhow::Context as _;
use anyhow::{Context as _, anyhow};
use chrono::{Datelike, Utc};
use collections::HashMap;
use serde::{Deserialize, Serialize};
use stripe::PriceId;
use tokio::sync::RwLock;
use uuid::Uuid;
pub struct StripeBilling {
state: RwLock<StripeBillingState>,
@@ -17,9 +18,10 @@ pub struct StripeBilling {
struct StripeBillingState {
meters_by_event_name: HashMap<String, StripeMeter>,
price_ids_by_meter_id: HashMap<String, stripe::PriceId>,
prices_by_lookup_key: HashMap<String, stripe::Price>,
}
pub struct StripeModel {
pub struct StripeModelTokenPrices {
input_tokens_price: StripeBillingPrice,
input_cache_creation_tokens_price: StripeBillingPrice,
input_cache_read_tokens_price: StripeBillingPrice,
@@ -62,6 +64,10 @@ impl StripeBilling {
}
for price in prices.data {
if let Some(lookup_key) = price.lookup_key.clone() {
state.prices_by_lookup_key.insert(lookup_key, price.clone());
}
if let Some(recurring) = price.recurring {
if let Some(meter) = recurring.meter {
state.price_ids_by_meter_id.insert(meter, price.id);
@@ -74,36 +80,49 @@ impl StripeBilling {
Ok(())
}
pub async fn register_model(&self, model: &llm::db::model::Model) -> Result<StripeModel> {
pub async fn find_price_by_lookup_key(&self, lookup_key: &str) -> Result<stripe::Price> {
self.state
.read()
.await
.prices_by_lookup_key
.get(lookup_key)
.cloned()
.ok_or_else(|| crate::Error::Internal(anyhow!("no price ID found for {lookup_key:?}")))
}
pub async fn register_model_for_token_based_usage(
&self,
model: &llm::db::model::Model,
) -> Result<StripeModelTokenPrices> {
let input_tokens_price = self
.get_or_insert_price(
.get_or_insert_token_price(
&format!("model_{}/input_tokens", model.id),
&format!("{} (Input Tokens)", model.name),
Cents::new(model.price_per_million_input_tokens as u32),
)
.await?;
let input_cache_creation_tokens_price = self
.get_or_insert_price(
.get_or_insert_token_price(
&format!("model_{}/input_cache_creation_tokens", model.id),
&format!("{} (Input Cache Creation Tokens)", model.name),
Cents::new(model.price_per_million_cache_creation_input_tokens as u32),
)
.await?;
let input_cache_read_tokens_price = self
.get_or_insert_price(
.get_or_insert_token_price(
&format!("model_{}/input_cache_read_tokens", model.id),
&format!("{} (Input Cache Read Tokens)", model.name),
Cents::new(model.price_per_million_cache_read_input_tokens as u32),
)
.await?;
let output_tokens_price = self
.get_or_insert_price(
.get_or_insert_token_price(
&format!("model_{}/output_tokens", model.id),
&format!("{} (Output Tokens)", model.name),
Cents::new(model.price_per_million_output_tokens as u32),
)
.await?;
Ok(StripeModel {
Ok(StripeModelTokenPrices {
input_tokens_price,
input_cache_creation_tokens_price,
input_cache_read_tokens_price,
@@ -111,7 +130,7 @@ impl StripeBilling {
})
}
async fn get_or_insert_price(
async fn get_or_insert_token_price(
&self,
meter_event_name: &str,
price_description: &str,
@@ -207,10 +226,43 @@ impl StripeBilling {
})
}
pub async fn subscribe_to_price(
&self,
subscription_id: &stripe::SubscriptionId,
price_id: &stripe::PriceId,
) -> Result<()> {
let subscription =
stripe::Subscription::retrieve(&self.client, &subscription_id, &[]).await?;
if subscription_contains_price(&subscription, price_id) {
return Ok(());
}
stripe::Subscription::update(
&self.client,
subscription_id,
stripe::UpdateSubscription {
items: Some(vec![stripe::UpdateSubscriptionItems {
price: Some(price_id.to_string()),
..Default::default()
}]),
trial_settings: Some(stripe::UpdateSubscriptionTrialSettings {
end_behavior: stripe::UpdateSubscriptionTrialSettingsEndBehavior {
missing_payment_method: stripe::UpdateSubscriptionTrialSettingsEndBehaviorMissingPaymentMethod::Cancel,
},
}),
..Default::default()
},
)
.await?;
Ok(())
}
pub async fn subscribe_to_model(
&self,
subscription_id: &stripe::SubscriptionId,
model: &StripeModel,
model: &StripeModelTokenPrices,
) -> Result<()> {
let subscription =
stripe::Subscription::retrieve(&self.client, &subscription_id, &[]).await?;
@@ -268,10 +320,10 @@ impl StripeBilling {
Ok(())
}
pub async fn bill_model_usage(
pub async fn bill_model_token_usage(
&self,
customer_id: &stripe::CustomerId,
model: &StripeModel,
model: &StripeModelTokenPrices,
event: &llm::db::billing_event::Model,
) -> Result<()> {
let timestamp = Utc::now().timestamp();
@@ -343,11 +395,37 @@ impl StripeBilling {
Ok(())
}
pub async fn bill_model_request_usage(
&self,
customer_id: &stripe::CustomerId,
event_name: &str,
requests: i32,
) -> Result<()> {
let timestamp = Utc::now().timestamp();
let idempotency_key = Uuid::new_v4();
StripeMeterEvent::create(
&self.client,
StripeCreateMeterEventParams {
identifier: &format!("model_requests/{}", idempotency_key),
event_name,
payload: StripeCreateMeterEventPayload {
value: requests as u64,
stripe_customer_id: customer_id,
},
timestamp: Some(timestamp),
},
)
.await?;
Ok(())
}
pub async fn checkout(
&self,
customer_id: stripe::CustomerId,
github_login: &str,
model: &StripeModel,
model: &StripeModelTokenPrices,
success_url: &str,
) -> Result<String> {
let first_of_next_month = Utc::now()

View File

@@ -680,6 +680,7 @@ async fn test_collaborating_with_code_actions(
editor.toggle_code_actions(
&ToggleCodeActions {
deployed_from_indicator: None,
quick_launch: false,
},
window,
cx,

View File

@@ -1517,10 +1517,7 @@ async fn test_following_across_workspaces(cx_a: &mut TestAppContext, cx_b: &mut
workspace.leader_for_pane(workspace.active_pane())
);
let item = workspace.active_item(cx).unwrap();
assert_eq!(
item.tab_description(0, cx).unwrap(),
SharedString::from("w.rs")
);
assert_eq!(item.tab_content_text(0, cx), SharedString::from("w.rs"));
});
// TODO: in app code, this would be done by the collab_ui.
@@ -1546,10 +1543,7 @@ async fn test_following_across_workspaces(cx_a: &mut TestAppContext, cx_b: &mut
executor.run_until_parked();
workspace_b_project_a.update(&mut cx_b2, |workspace, cx| {
let item = workspace.active_item(cx).unwrap();
assert_eq!(
item.tab_description(0, cx).unwrap(),
SharedString::from("x.rs")
);
assert_eq!(item.tab_content_text(0, cx), SharedString::from("x.rs"));
});
workspace_a.update_in(cx_a, |workspace, window, cx| {
@@ -1564,7 +1558,7 @@ async fn test_following_across_workspaces(cx_a: &mut TestAppContext, cx_b: &mut
workspace.leader_for_pane(workspace.active_pane())
);
let item = workspace.active_pane().read(cx).active_item().unwrap();
assert_eq!(item.tab_description(0, cx).unwrap(), "x.rs");
assert_eq!(item.tab_content_text(0, cx), "x.rs");
});
// b moves to y.rs in b's project, a is still following but can't yet see
@@ -1625,10 +1619,7 @@ async fn test_following_across_workspaces(cx_a: &mut TestAppContext, cx_b: &mut
workspace.leader_for_pane(workspace.active_pane())
);
let item = workspace.active_item(cx).unwrap();
assert_eq!(
item.tab_description(0, cx).unwrap(),
SharedString::from("y.rs")
);
assert_eq!(item.tab_content_text(0, cx), SharedString::from("y.rs"));
});
}
@@ -1885,13 +1876,7 @@ fn pane_summaries(workspace: &Entity<Workspace>, cx: &mut VisualTestContext) ->
items: pane
.items()
.enumerate()
.map(|(ix, item)| {
(
ix == active_ix,
item.tab_description(0, cx)
.map_or(String::new(), |s| s.to_string()),
)
})
.map(|(ix, item)| (ix == active_ix, item.tab_content_text(0, cx).into()))
.collect(),
}
})
@@ -2179,7 +2164,7 @@ async fn test_following_to_channel_notes_other_workspace(
cx_a.run_until_parked();
workspace_a.update(cx_a, |workspace, cx| {
let editor = workspace.active_item(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
// b joins channel and is following a
@@ -2188,7 +2173,7 @@ async fn test_following_to_channel_notes_other_workspace(
let (workspace_b, cx_b) = client_b.active_workspace(cx_b);
workspace_b.update(cx_b, |workspace, cx| {
let editor = workspace.active_item(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
// a opens a second workspace and the channel notes
@@ -2212,13 +2197,13 @@ async fn test_following_to_channel_notes_other_workspace(
workspace_a.update(cx_a, |workspace, cx| {
let editor = workspace.active_item(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
// b should follow a back
workspace_b.update(cx_b, |workspace, cx| {
let editor = workspace.active_item_as::<Editor>(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
}
@@ -2238,7 +2223,7 @@ async fn test_following_while_deactivated(cx_a: &mut TestAppContext, cx_b: &mut
cx_a.run_until_parked();
workspace_a.update(cx_a, |workspace, cx| {
let editor = workspace.active_item(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
// b joins channel and is following a
@@ -2247,7 +2232,7 @@ async fn test_following_while_deactivated(cx_a: &mut TestAppContext, cx_b: &mut
let (workspace_b, cx_b) = client_b.active_workspace(cx_b);
workspace_b.update(cx_b, |workspace, cx| {
let editor = workspace.active_item(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
// stop following
@@ -2260,7 +2245,7 @@ async fn test_following_while_deactivated(cx_a: &mut TestAppContext, cx_b: &mut
workspace_b.update(cx_b, |workspace, cx| {
let editor = workspace.active_item_as::<Editor>(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "1.txt");
assert_eq!(editor.tab_content_text(0, cx), "1.txt");
});
// a opens a file in a new window
@@ -2281,12 +2266,12 @@ async fn test_following_while_deactivated(cx_a: &mut TestAppContext, cx_b: &mut
workspace_a.update(cx_a, |workspace, cx| {
let editor = workspace.active_item(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "2.js");
assert_eq!(editor.tab_content_text(0, cx), "2.js");
});
// b should follow a back
workspace_b.update(cx_b, |workspace, cx| {
let editor = workspace.active_item_as::<Editor>(cx).unwrap();
assert_eq!(editor.tab_description(0, cx).unwrap(), "2.js");
assert_eq!(editor.tab_content_text(0, cx), "2.js");
});
}

View File

@@ -1824,6 +1824,8 @@ async fn test_active_call_events(
server
.create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)])
.await;
executor.run_until_parked();
let active_call_a = cx_a.read(ActiveCall::global);
let active_call_b = cx_b.read(ActiveCall::global);

View File

@@ -540,6 +540,10 @@ impl Item for ChannelView {
fn to_item_events(event: &EditorEvent, f: impl FnMut(ItemEvent)) {
Editor::to_item_events(event, f)
}
fn tab_content_text(&self, _detail: usize, _cx: &App) -> SharedString {
"Channels".into()
}
}
impl FollowableItem for ChannelView {

View File

@@ -65,6 +65,15 @@ pub fn init(cx: &mut App) {
cx.observe_new(|workspace: &mut Workspace, _, _| {
workspace.register_action(|workspace, _: &ToggleFocus, window, cx| {
workspace.toggle_panel_focus::<CollabPanel>(window, cx);
if let Some(collab_panel) = workspace.panel::<CollabPanel>(cx) {
collab_panel.update(cx, |panel, cx| {
panel.filter_editor.update(cx, |editor, cx| {
if editor.snapshot(window, cx).is_focused() {
editor.select_all(&Default::default(), window, cx);
}
});
})
}
});
workspace.register_action(|_, _: &OpenChannelNotes, window, cx| {
let channel_id = ActiveCall::global(cx)

View File

@@ -20,6 +20,7 @@ command_palette_hooks.workspace = true
db.workspace = true
fuzzy.workspace = true
gpui.workspace = true
log.workspace = true
picker.workspace = true
postage.workspace = true
serde.workspace = true

View File

@@ -67,7 +67,12 @@ impl CommandPaletteDB {
command_name: impl Into<String>,
user_query: impl Into<String>,
) -> Result<()> {
self.write_command_invocation_internal(command_name.into(), user_query.into())
let command_name = command_name.into();
let user_query = user_query.into();
log::debug!(
"Writing command invocation: command_name={command_name}, user_query={user_query}"
);
self.write_command_invocation_internal(command_name, user_query)
.await
}
@@ -117,7 +122,7 @@ mod tests {
#[gpui::test]
async fn test_saves_and_retrieves_command_invocation() {
let db =
CommandPaletteDB(db::open_test_db("test_saves_and_retrieves_command_invocation").await);
CommandPaletteDB::open_test_db("test_saves_and_retrieves_command_invocation").await;
let retrieved_cmd = db.get_last_invoked("editor: backspace").unwrap();
@@ -137,7 +142,7 @@ mod tests {
#[gpui::test]
async fn test_gets_usage_history() {
let db = CommandPaletteDB(db::open_test_db("test_gets_usage_history").await);
let db = CommandPaletteDB::open_test_db("test_gets_usage_history").await;
db.write_command_invocation("go to line: toggle", "200")
.await
.unwrap();
@@ -162,7 +167,7 @@ mod tests {
#[gpui::test]
async fn test_lists_ordered_by_usage() {
let db = CommandPaletteDB(db::open_test_db("test_lists_ordered_by_usage").await);
let db = CommandPaletteDB::open_test_db("test_lists_ordered_by_usage").await;
let empty_commands = db.list_commands_used();
match &empty_commands {
@@ -195,7 +200,7 @@ mod tests {
#[gpui::test]
async fn test_handles_max_invocation_entries() {
let db = CommandPaletteDB(db::open_test_db("test_handles_max_invocation_entries").await);
let db = CommandPaletteDB::open_test_db("test_handles_max_invocation_entries").await;
for i in 1..=1001 {
db.write_command_invocation("some-command", &i.to_string())

Some files were not shown because too many files have changed in this diff Show More