Compare commits

..

1 Commits

Author SHA1 Message Date
Jason Mancuso
26d754a168 Add FindReplace workflow suggestion toolkind 2024-08-30 11:47:27 -04:00
896 changed files with 30925 additions and 52525 deletions

View File

@@ -1,8 +1,8 @@
We have two cloudflare workers that let us serve some assets of this repo
from Cloudflare.
- `open-source-website-assets` is used for `install.sh`
- `docs-proxy` is used for `https://zed.dev/docs`
* `open-source-website-assets` is used for `install.sh`
* `docs-proxy` is used for `https://zed.dev/docs`
On push to `main`, both of these (and the files they depend on) are uploaded to Cloudflare.

View File

@@ -10,7 +10,7 @@ runs:
cargo install cargo-nextest
- name: Install Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4
with:
node-version: "18"

View File

@@ -1,23 +0,0 @@
name: Bump collab-staging Tag
on:
schedule:
# Fire every day at 16:00 UTC (At the start of the US workday)
- cron: "0 16 * * *"
jobs:
update-collab-staging-tag:
if: github.repository_owner == 'zed-industries'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
with:
fetch-depth: 0
- name: Update collab-staging tag
run: |
git config user.name github-actions
git config user.email github-actions@github.com
git tag -f collab-staging
git push origin collab-staging --force

View File

@@ -15,7 +15,8 @@ concurrency:
jobs:
bump_patch_version:
runs-on:
- buildjet-16vcpu-ubuntu-2204
- self-hosted
- test
steps:
- name: Checkout code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
@@ -41,7 +42,7 @@ jobs:
exit 1
;;
esac
which cargo-set-version > /dev/null || cargo install cargo-edit
which cargo-set-version > /dev/null || cargo install cargo-edit --features vendored-openssl
output=$(cargo set-version -p zed --bump patch 2>&1 | sed 's/.* //')
git commit -am "Bump to $output for @$GITHUB_ACTOR" --author "Zed Bot <hi@zed.dev>"
git tag v${output}${tag_suffix}

View File

@@ -39,7 +39,16 @@ jobs:
run: git clean -df
- name: Check spelling
run: script/check-spelling
run: |
if ! cargo install --list | grep "typos-cli v$TYPOS_CLI_VERSION" > /dev/null; then
echo "Installing typos-cli@$TYPOS_CLI_VERSION..."
cargo install "typos-cli@$TYPOS_CLI_VERSION"
else
echo "typos-cli@$TYPOS_CLI_VERSION is already installed."
fi
typos
env:
TYPOS_CLI_VERSION: "1.23.3"
- name: Run style checks
uses: ./.github/actions/check_style
@@ -101,7 +110,8 @@ jobs:
timeout-minutes: 60
name: (Linux) Run Clippy and tests
runs-on:
- buildjet-16vcpu-ubuntu-2204
- self-hosted
- deploy
steps:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH
@@ -111,15 +121,6 @@ jobs:
with:
clean: false
- name: Cache dependencies
uses: swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
cache-provider: "buildjet"
- name: Install Linux dependencies
run: ./script/linux
- name: cargo clippy
run: ./script/clippy
@@ -144,7 +145,6 @@ jobs:
uses: swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
cache-provider: "github"
- name: cargo clippy
# Windows can't run shell scripts, so we need to use `cargo xtask`.
@@ -172,7 +172,7 @@ jobs:
DIGITALOCEAN_SPACES_SECRET_KEY: ${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}
steps:
- name: Install Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4
with:
node-version: "18"
@@ -192,12 +192,29 @@ jobs:
- name: Determine version and release channel
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
# This exports RELEASE_CHANNEL into env (GITHUB_ENV)
script/determine-release-channel
set -eu
- name: Draft release notes
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
version=$(script/get-crate-version zed)
channel=$(cat crates/zed/RELEASE_CHANNEL)
echo "Publishing version: ${version} on release channel ${channel}"
echo "RELEASE_CHANNEL=${channel}" >> $GITHUB_ENV
expected_tag_name=""
case ${channel} in
stable)
expected_tag_name="v${version}";;
preview)
expected_tag_name="v${version}-pre";;
nightly)
expected_tag_name="v${version}-nightly";;
*)
echo "can't publish a release on channel ${channel}"
exit 1;;
esac
if [[ $GITHUB_REF_NAME != $expected_tag_name ]]; then
echo "invalid release tag ${GITHUB_REF_NAME}. expected ${expected_tag_name}"
exit 1
fi
mkdir -p target/
# Ignore any errors that occur while drafting release notes to not fail the build.
script/draft-release-notes "$version" "$channel" > target/release-notes.md || true
@@ -215,20 +232,20 @@ jobs:
mv target/x86_64-apple-darwin/release/Zed.dmg target/x86_64-apple-darwin/release/Zed-x86_64.dmg
- name: Upload app bundle (universal) to workflow run if main branch or specific label
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4
if: ${{ github.ref == 'refs/heads/main' }} || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
with:
name: Zed_${{ github.event.pull_request.head.sha || github.sha }}.dmg
path: target/release/Zed.dmg
- name: Upload app bundle (aarch64) to workflow run if main branch or specific label
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4
if: ${{ github.ref == 'refs/heads/main' }} || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
with:
name: Zed_${{ github.event.pull_request.head.sha || github.sha }}-aarch64.dmg
path: target/aarch64-apple-darwin/release/Zed-aarch64.dmg
- name: Upload app bundle (x86_64) to workflow run if main branch or specific label
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4
if: ${{ github.ref == 'refs/heads/main' }} || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
with:
name: Zed_${{ github.event.pull_request.head.sha || github.sha }}-x86_64.dmg
@@ -254,32 +271,57 @@ jobs:
timeout-minutes: 60
name: Create a Linux bundle
runs-on:
- buildjet-16vcpu-ubuntu-2004
- self-hosted
- deploy
if: ${{ startsWith(github.ref, 'refs/tags/v') || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
needs: [linux_tests]
env:
ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }}
ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: ${{ secrets.ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON }}
steps:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Checkout repo
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
with:
clean: false
- name: Install Linux dependencies
run: ./script/linux && ./script/install-mold 2.34.0
- name: Limit target directory size
run: script/clear-target-dir-if-larger-than 100
- name: Determine version and release channel
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
# This exports RELEASE_CHANNEL into env (GITHUB_ENV)
script/determine-release-channel
set -eu
version=$(script/get-crate-version zed)
channel=$(cat crates/zed/RELEASE_CHANNEL)
echo "Publishing version: ${version} on release channel ${channel}"
echo "RELEASE_CHANNEL=${channel}" >> $GITHUB_ENV
expected_tag_name=""
case ${channel} in
stable)
expected_tag_name="v${version}";;
preview)
expected_tag_name="v${version}-pre";;
nightly)
expected_tag_name="v${version}-nightly";;
*)
echo "can't publish a release on channel ${channel}"
exit 1;;
esac
if [[ $GITHUB_REF_NAME != $expected_tag_name ]]; then
echo "invalid release tag ${GITHUB_REF_NAME}. expected ${expected_tag_name}"
exit 1
fi
- name: Create Linux .tar.gz bundle
run: script/bundle-linux
- name: Upload Linux bundle to workflow run if main branch or specific label
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4
if: ${{ github.ref == 'refs/heads/main' }} || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
with:
name: zed-${{ github.event.pull_request.head.sha || github.sha }}-x86_64-unknown-linux-gnu.tar.gz
@@ -297,11 +339,11 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
bundle-linux-aarch64: # this runs on ubuntu22.04
bundle-linux-aarch64:
timeout-minutes: 60
name: Create arm64 Linux bundle
runs-on:
- buildjet-16vcpu-ubuntu-2204-arm
- hosted-linux-arm-1
if: ${{ startsWith(github.ref, 'refs/tags/v') || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
needs: [linux_tests]
env:
@@ -312,21 +354,59 @@ jobs:
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
with:
clean: false
- name: "Setup jq"
uses: dcarbone/install-jq-action@8867ddb4788346d7c22b72ea2e2ffe4d514c7bcb # v2
- name: Install Linux dependencies
run: ./script/linux
- name: Set up Clang
run: |
sudo apt-get update
sudo apt-get install -y llvm-10 clang-10 build-essential cmake pkg-config libasound2-dev libfontconfig-dev libwayland-dev libxkbcommon-x11-dev libssl-dev libsqlite3-dev libzstd-dev libvulkan1 libgit2-dev
echo "/usr/lib/llvm-10/bin" >> $GITHUB_PATH
- uses: rui314/setup-mold@0bf4f07ef9048ec62a45f9dbf2f098afa49695f0 # v1
with:
mold-version: 2.32.0
- name: rustup
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Limit target directory size
run: script/clear-target-dir-if-larger-than 100
- name: Determine version and release channel
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
# This exports RELEASE_CHANNEL into env (GITHUB_ENV)
script/determine-release-channel
set -eu
version=$(script/get-crate-version zed)
channel=$(cat crates/zed/RELEASE_CHANNEL)
echo "Publishing version: ${version} on release channel ${channel}"
echo "RELEASE_CHANNEL=${channel}" >> $GITHUB_ENV
expected_tag_name=""
case ${channel} in
stable)
expected_tag_name="v${version}";;
preview)
expected_tag_name="v${version}-pre";;
nightly)
expected_tag_name="v${version}-nightly";;
*)
echo "can't publish a release on channel ${channel}"
exit 1;;
esac
if [[ $GITHUB_REF_NAME != $expected_tag_name ]]; then
echo "invalid release tag ${GITHUB_REF_NAME}. expected ${expected_tag_name}"
exit 1
fi
- name: Create and upload Linux .tar.gz bundle
run: script/bundle-linux
- name: Upload Linux bundle to workflow run if main branch or specific label
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4
if: ${{ github.ref == 'refs/heads/main' }} || contains(github.event.pull_request.labels.*.name, 'run-bundling') }}
with:
name: zed-${{ github.event.pull_request.head.sha || github.sha }}-aarch64-unknown-linux-gnu.tar.gz

View File

@@ -1,31 +0,0 @@
name: "Close Stale Issues"
on:
schedule:
- cron: "0 11 * * 2"
workflow_dispatch:
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: >
Hi there! 👋
We're working to clean up our issue tracker by closing older issues that might not be relevant anymore. Are you able to reproduce this issue in the latest version of Zed? If so, please let us know by commenting on this issue and we will keep it open; otherwise, we'll close it in 10 days. Feel free to open a new issue if you're seeing this message after the issue has been closed.
Thanks for your help!
close-issue-message: "This issue was closed due to inactivity; feel free to open a new issue if you're still experiencing this problem!"
# We will increase `days-before-stale` to 365 on or after Jan 24th,
# 2024. This date marks one year since migrating issues from
# 'community' to 'zed' repository. The migration added activity to all
# issues, preventing 365 days from working until then.
days-before-stale: 180
days-before-close: 10
any-of-issue-labels: "defect,panic / crash"
operations-per-run: 1000
ascending: true
enable-statistics: true
stale-issue-label: "stale"

View File

@@ -21,7 +21,7 @@ jobs:
version: 9
- name: Setup Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4
with:
node-version: "20"
cache: "pnpm"

View File

@@ -8,6 +8,7 @@ on:
env:
DOCKER_BUILDKIT: 1
DIGITALOCEAN_ACCESS_TOKEN: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
jobs:
style:
@@ -60,12 +61,11 @@ jobs:
- style
- tests
runs-on:
- buildjet-16vcpu-ubuntu-2204
- self-hosted
- deploy
steps:
- name: Install doctl
uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Sign into DigitalOcean docker registry
run: doctl registry login
@@ -75,12 +75,11 @@ jobs:
with:
clean: false
- name: Set up default .cargo/config.toml
run: cp ./.cargo/collab-config.toml ./.cargo/config.toml
- name: Build docker image
run: |
docker build -f Dockerfile-collab \
--build-arg GITHUB_SHA=$GITHUB_SHA \
--tag registry.digitalocean.com/zed/collab:$GITHUB_SHA \
.
run: docker build . --build-arg GITHUB_SHA=$GITHUB_SHA --tag registry.digitalocean.com/zed/collab:$GITHUB_SHA
- name: Publish docker image
run: docker push registry.digitalocean.com/zed/collab:${GITHUB_SHA}
@@ -93,19 +92,10 @@ jobs:
needs:
- publish
runs-on:
- buildjet-16vcpu-ubuntu-2204
- self-hosted
- deploy
steps:
- name: Checkout repo
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
with:
clean: false
- name: Install doctl
uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- name: Sign into Kubernetes
run: doctl kubernetes cluster kubeconfig save --expiry-seconds 600 ${{ secrets.CLUSTER_NAME }}

View File

@@ -20,11 +20,5 @@ jobs:
with:
version: 9
- run: |
pnpm dlx prettier . --check || {
echo "To fix, run from the root of the zed repo:"
echo " cd docs && pnpm dlx prettier . --write && cd .."
false
}
- run: pnpm dlx prettier . --check
working-directory: ./docs

View File

@@ -24,7 +24,6 @@ jobs:
uses: swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
cache-provider: "github"
- name: Configure linux
shell: bash -euxo pipefail {0}

View File

@@ -19,10 +19,11 @@ jobs:
tests:
name: Run randomized tests
runs-on:
- buildjet-16vcpu-ubuntu-2204
- self-hosted
- randomized-tests
steps:
- name: Install Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4
with:
node-version: "18"

View File

@@ -70,7 +70,7 @@ jobs:
ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: ${{ secrets.ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON }}
steps:
- name: Install Node
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4
with:
node-version: "18"
@@ -100,7 +100,8 @@ jobs:
name: Create a Linux *.tar.gz bundle for x86
if: github.repository_owner == 'zed-industries'
runs-on:
- buildjet-16vcpu-ubuntu-2004
- self-hosted
- deploy
needs: tests
env:
DIGITALOCEAN_SPACES_ACCESS_KEY: ${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }}
@@ -116,12 +117,6 @@ jobs:
- name: Add Rust to the PATH
run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Install Linux dependencies
run: ./script/linux && ./script/install-mold 2.34.0
- name: Limit target directory size
run: script/clear-target-dir-if-larger-than 100
- name: Set release channel to nightly
run: |
set -euo pipefail
@@ -153,8 +148,23 @@ jobs:
with:
clean: false
- name: Install Linux dependencies
run: ./script/linux
- name: "Setup jq"
uses: dcarbone/install-jq-action@8867ddb4788346d7c22b72ea2e2ffe4d514c7bcb # v2
- name: Set up Clang
run: |
sudo apt-get update
sudo apt-get install -y llvm-10 clang-10 build-essential cmake pkg-config libasound2-dev libfontconfig-dev libwayland-dev libxkbcommon-x11-dev libssl-dev libsqlite3-dev libzstd-dev libvulkan1 libgit2-dev
echo "/usr/lib/llvm-10/bin" >> $GITHUB_PATH
- uses: rui314/setup-mold@0bf4f07ef9048ec62a45f9dbf2f098afa49695f0 # v1
with:
mold-version: 2.32.0
- name: rustup
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Limit target directory size
run: script/clear-target-dir-if-larger-than 100
@@ -171,28 +181,3 @@ jobs:
- name: Upload Zed Nightly
run: script/upload-nightly linux-targz
update-nightly-tag:
name: Update nightly tag
if: github.repository_owner == 'zed-industries'
runs-on: ubuntu-latest
needs:
- bundle-mac
- bundle-linux-x86
- bundle-linux-arm
steps:
- name: Checkout repo
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
with:
fetch-depth: 0
- name: Update nightly tag
run: |
if [ "$(git rev-parse nightly)" = "$(git rev-parse HEAD)" ]; then
echo "Nightly tag already points to current commit. Skipping tagging."
exit 0
fi
git config user.name github-actions
git config user.email github-actions@github.com
git tag -f nightly
git push origin nightly --force

View File

@@ -9,7 +9,7 @@ jobs:
if: github.repository_owner == 'zed-industries'
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5
- uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5
with:
python-version: "3.11"
architecture: "x64"

View File

@@ -9,7 +9,7 @@ jobs:
if: github.repository_owner == 'zed-industries'
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5
- uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5
with:
python-version: "3.11"
architecture: "x64"

2
.gitignore vendored
View File

@@ -10,7 +10,7 @@
/crates/collab/seed.json
/crates/zed/resources/flatpak/flatpak-cargo-sources.json
/dev.zed.Zed*.json
/assets/*licenses.*
/assets/*licenses.md
**/venv
.build
*.wasm

View File

@@ -11,12 +11,8 @@
Alex Viscreanu <alexviscreanu@gmail.com>
Alex Viscreanu <alexviscreanu@gmail.com> <alexandru.viscreanu@kiwi.com>
Alexander Mankuta <alex@pointless.one>
Alexander Mankuta <alex@pointless.one> <alex+github@pointless.one>
amtoaer <amtoaer@gmail.com>
amtoaer <amtoaer@gmail.com> <amtoaer@outlook.com>
Andrei Zvonimir Crnković <andrei@0x7f.dev>
Andrei Zvonimir Crnković <andrei@0x7f.dev> <andreicek@0x7f.dev>
Antonio Scandurra <me@as-cii.com>
Antonio Scandurra <me@as-cii.com> <antonio@zed.dev>
Bennet Bo Fenner <bennet@zed.dev>
@@ -28,9 +24,8 @@ Conrad Irwin <conrad@zed.dev>
Conrad Irwin <conrad@zed.dev> <conrad.irwin@gmail.com>
Danilo Leal <danilo@zed.dev>
Danilo Leal <danilo@zed.dev> <67129314+danilo-leal@users.noreply.github.com>
Evren Sen <nervenes@icloud.com>
Evren Sen <nervenes@icloud.com> <146845123+evrensen467@users.noreply.github.com>
Evren Sen <nervenes@icloud.com> <146845123+evrsen@users.noreply.github.com>
Evren Sen <146845123+evrensen467@users.noreply.github.com>
Evren Sen <146845123+evrensen467@users.noreply.github.com> <146845123+evrsen@users.noreply.github.com>
Fernando Tagawa <tagawafernando@gmail.com>
Fernando Tagawa <tagawafernando@gmail.com> <fernando.tagawa.gamail.com@gmail.com>
Greg Morenz <greg-morenz@droid.cafe>
@@ -54,12 +49,8 @@ LoganDark <contact@logandark.mozmail.com> <git@logandark.mozmail.com>
LoganDark <contact@logandark.mozmail.com> <github@logandark.mozmail.com>
Marshall Bowers <elliott.codes@gmail.com>
Marshall Bowers <elliott.codes@gmail.com> <marshall@zed.dev>
Matt Fellenz <matt@felle.nz>
Matt Fellenz <matt@felle.nz> <matt+github@felle.nz>
Max Brunsfeld <maxbrunsfeld@gmail.com>
Max Brunsfeld <maxbrunsfeld@gmail.com> <max@zed.dev>
Max Linke <maxlinke88@gmail.com>
Max Linke <maxlinke88@gmail.com> <kain88-de@users.noreply.github.com>
Mikayla Maki <mikayla@zed.dev>
Mikayla Maki <mikayla@zed.dev> <mikayla.c.maki@gmail.com>
Mikayla Maki <mikayla@zed.dev> <mikayla.c.maki@icloud.com>
@@ -84,18 +75,10 @@ Richard Feldman <oss@rtfeldman.com>
Richard Feldman <oss@rtfeldman.com> <richard@zed.dev>
Robert Clover <git@clo4.net>
Robert Clover <git@clo4.net> <robert@clover.gdn>
Roy Williams <roy.williams.iii@gmail.com>
Roy Williams <roy.williams.iii@gmail.com> <roy@anthropic.com>
Sergey Onufrienko <sergey@onufrienko.com>
Thorben Kröger <dev@thorben.net>
Thorben Kröger <dev@thorben.net> <thorben.kroeger@hexagon.com>
Thorsten Ball <thorsten@zed.dev>
Thorsten Ball <thorsten@zed.dev> <me@thorstenball.com>
Thorsten Ball <thorsten@zed.dev> <mrnugget@gmail.com>
Tristan Hume <tris.hume@gmail.com>
Tristan Hume <tris.hume@gmail.com> <tristan@anthropic.com>
Uladzislau Kaminski <i@uladkaminski.com>
Uladzislau Kaminski <i@uladkaminski.com> <uladzislau_kaminski@epam.com>
Vitaly Slobodin <vitaliy.slobodin@gmail.com>
Vitaly Slobodin <vitaliy.slobodin@gmail.com> <vitaly_slobodin@fastmail.com>
WindSoilder <WindSoilder@outlook.com>

View File

@@ -12,9 +12,6 @@
"tab_size": 2,
"formatter": "prettier"
},
"proto": {
"tab_size": 2
},
"JSON": {
"tab_size": 2,
"preferred_line_length": 100,
@@ -41,11 +38,6 @@
}
}
},
"file_types": {
"Dockerfile": ["Dockerfile*[!dockerignore]"],
"Git Ignore": ["dockerignore"]
},
"hard_tabs": false,
"formatter": "auto",
"remove_trailing_whitespace_on_save": true,
"ensure_final_newline_on_save": true

1478
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,6 @@ members = [
"crates/assets",
"crates/assistant",
"crates/assistant_slash_command",
"crates/assistant_tool",
"crates/audio",
"crates/auto_update",
"crates/breadcrumbs",
@@ -27,7 +26,6 @@ members = [
"crates/diagnostics",
"crates/docs_preprocessor",
"crates/editor",
"crates/evals",
"crates/extension",
"crates/extension_api",
"crates/extension_cli",
@@ -52,7 +50,6 @@ members = [
"crates/indexed_docs",
"crates/inline_completion_button",
"crates/install_cli",
"crates/isahc_http_client",
"crates/journal",
"crates/language",
"crates/language_model",
@@ -74,6 +71,7 @@ members = [
"crates/outline",
"crates/outline_panel",
"crates/paths",
"crates/performance",
"crates/picker",
"crates/prettier",
"crates/project",
@@ -120,7 +118,6 @@ members = [
"crates/title_bar",
"crates/ui",
"crates/ui_input",
"crates/ui_macros",
"crates/util",
"crates/vcs_menu",
"crates/vim",
@@ -169,7 +166,7 @@ members = [
# Tooling
#
"tooling/xtask",
"tooling/xtask"
]
default-members = ["crates/zed"]
@@ -184,7 +181,6 @@ anthropic = { path = "crates/anthropic" }
assets = { path = "crates/assets" }
assistant = { path = "crates/assistant" }
assistant_slash_command = { path = "crates/assistant_slash_command" }
assistant_tool = { path = "crates/assistant_tool" }
audio = { path = "crates/audio" }
auto_update = { path = "crates/auto_update" }
breadcrumbs = { path = "crates/breadcrumbs" }
@@ -227,7 +223,6 @@ image_viewer = { path = "crates/image_viewer" }
indexed_docs = { path = "crates/indexed_docs" }
inline_completion_button = { path = "crates/inline_completion_button" }
install_cli = { path = "crates/install_cli" }
isahc_http_client = { path = "crates/isahc_http_client" }
journal = { path = "crates/journal" }
language = { path = "crates/language" }
language_model = { path = "crates/language_model" }
@@ -249,6 +244,7 @@ open_ai = { path = "crates/open_ai" }
outline = { path = "crates/outline" }
outline_panel = { path = "crates/outline_panel" }
paths = { path = "crates/paths" }
performance = { path = "crates/performance" }
picker = { path = "crates/picker" }
plugin = { path = "crates/plugin" }
plugin_macros = { path = "crates/plugin_macros" }
@@ -296,7 +292,6 @@ time_format = { path = "crates/time_format" }
title_bar = { path = "crates/title_bar" }
ui = { path = "crates/ui" }
ui_input = { path = "crates/ui_input" }
ui_macros = { path = "crates/ui_macros" }
util = { path = "crates/util" }
vcs_menu = { path = "crates/vcs_menu" }
vim = { path = "crates/vim" }
@@ -314,31 +309,28 @@ aho-corasick = "1.1"
alacritty_terminal = { git = "https://github.com/alacritty/alacritty", rev = "91d034ff8b53867143c005acfaa14609147c9a2c" }
any_vec = "0.14"
anyhow = "1.0.86"
arrayvec = { version = "0.7.4", features = ["serde"] }
ashpd = "0.9.1"
async-compression = { version = "0.4", features = ["gzip", "futures-io"] }
async-dispatcher = "0.1"
async-fs = "1.6"
async-pipe = { git = "https://github.com/zed-industries/async-pipe-rs", rev = "82d00a04211cf4e1236029aa03e6b6ce2a74c553" }
async-recursion = "1.0.0"
async-tar = "0.5.0"
async-tar = "0.4.2"
async-trait = "0.1"
async-tungstenite = "0.23"
async-watch = "0.3.1"
async_zip = { version = "0.0.17", features = ["deflate", "deflate64"] }
base64 = "0.22"
bitflags = "2.6.0"
blade-graphics = { git = "https://github.com/kvark/blade", rev = "e142a3a5e678eb6a13e642ad8401b1f3aa38e969" }
blade-macros = { git = "https://github.com/kvark/blade", rev = "e142a3a5e678eb6a13e642ad8401b1f3aa38e969" }
blade-util = { git = "https://github.com/kvark/blade", rev = "e142a3a5e678eb6a13e642ad8401b1f3aa38e969" }
blake3 = "1.5.3"
blade-graphics = { git = "https://github.com/kvark/blade", rev = "fee06c42f658b36dd9ac85444a9ee2a481383695" }
blade-macros = { git = "https://github.com/kvark/blade", rev = "fee06c42f658b36dd9ac85444a9ee2a481383695" }
blade-util = { git = "https://github.com/kvark/blade", rev = "fee06c42f658b36dd9ac85444a9ee2a481383695" }
cargo_metadata = "0.18"
cargo_toml = "0.20"
chrono = { version = "0.4", features = ["serde"] }
clap = { version = "4.4", features = ["derive"] }
clickhouse = "0.11.6"
cocoa = "0.26"
convert_case = "0.6.0"
core-foundation = "0.9.3"
core-foundation-sys = "0.8.6"
ctor = "0.2.6"
@@ -373,7 +365,7 @@ linkify = "0.10.0"
log = { version = "0.4.16", features = ["kv_unstable_serde", "serde"] }
markup5ever_rcdom = "0.3.0"
nanoid = "0.4"
nix = "0.29"
nix = "0.28"
num-format = "0.4.4"
once_cell = "1.19.0"
ordered-float = "2.1.1"
@@ -386,7 +378,7 @@ pretty_assertions = "1.3.0"
prost = "0.9"
prost-build = "0.9"
prost-types = "0.9"
pulldown-cmark = { version = "0.12.0", default-features = false }
pulldown-cmark = { version = "0.10.0", default-features = false }
rand = "0.8.5"
regex = "1.5"
repair_json = "0.1.0"
@@ -394,16 +386,15 @@ rsa = "0.9.6"
runtimelib = { version = "0.15", default-features = false, features = [
"async-dispatcher-runtime",
] }
rusqlite = { version = "0.29.0", features = ["blob", "array", "modern_sqlite"] }
rustc-demangle = "0.1.23"
rust-embed = { version = "8.4", features = ["include-exclude"] }
rustls = "0.20.3"
rustls-native-certs = "0.8.0"
schemars = { version = "0.8", features = ["impl_json_schema"] }
semver = "1.0"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_derive = { version = "1.0", features = ["deserialize_in_place"] }
serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] }
serde_json_lenient = { version = "0.2", features = [
serde_json_lenient = { version = "0.1", features = [
"preserve_order",
"raw_value",
] }
@@ -420,7 +411,7 @@ strsim = "0.11"
strum = { version = "0.25.0", features = ["derive"] }
subtle = "2.5.0"
sys-locale = "0.3.1"
sysinfo = "0.31.0"
sysinfo = "0.30.7"
tempfile = "3.9.0"
thiserror = "1.0.29"
tiktoken-rs = "0.5.9"
@@ -435,48 +426,48 @@ tiny_http = "0.8"
toml = "0.8"
tokio = { version = "1", features = ["full"] }
tower-http = "0.4.4"
tree-sitter = { version = "0.23", features = ["wasm"] }
tree-sitter-bash = "0.23"
tree-sitter-c = "0.23"
tree-sitter-cpp = "0.23"
tree-sitter-css = "0.23"
tree-sitter-elixir = "0.3"
tree-sitter-embedded-template = "0.23.0"
tree-sitter-go = "0.23"
tree-sitter-go-mod = { git = "https://github.com/zed-industries/tree-sitter-go-mod", rev = "a9aea5e358cde4d0f8ff20b7bc4fa311e359c7ca", package = "tree-sitter-gomod" }
tree-sitter-gowork = { git = "https://github.com/zed-industries/tree-sitter-go-work", rev = "acb0617bf7f4fda02c6217676cc64acb89536dc7" }
tree-sitter-heex = { git = "https://github.com/zed-industries/tree-sitter-heex", rev = "1dd45142fbb05562e35b2040c6129c9bca346592" }
tree-sitter = { version = "0.22", features = ["wasm"] }
tree-sitter-bash = "0.21"
tree-sitter-c = "0.21"
tree-sitter-cpp = "0.22"
tree-sitter-css = "0.21"
tree-sitter-elixir = "0.2"
tree-sitter-embedded-template = "0.20.0"
tree-sitter-go = "0.21"
tree-sitter-go-mod = { git = "https://github.com/camdencheek/tree-sitter-go-mod", rev = "1f55029bacd0a6a11f6eb894c4312d429dcf735c", package = "tree-sitter-gomod" }
tree-sitter-gowork = { git = "https://github.com/d1y/tree-sitter-go-work", rev = "dcbabff454703c3a4bc98a23cf8778d4be46fd22" }
tree-sitter-heex = { git = "https://github.com/phoenixframework/tree-sitter-heex", rev = "6dd0303acf7138dd2b9b432a229e16539581c701" }
tree-sitter-html = "0.20"
tree-sitter-jsdoc = "0.23"
tree-sitter-json = "0.23"
tree-sitter-md = { git = "https://github.com/zed-industries/tree-sitter-markdown", rev = "4cfa6aad6b75052a5077c80fd934757d9267d81b" }
protols-tree-sitter-proto = { git = "https://github.com/zed-industries/tree-sitter-proto", rev = "0848bd30a64be48772e15fbb9d5ba8c0cc5772ad" }
tree-sitter-python = "0.23"
tree-sitter-regex = "0.23"
tree-sitter-ruby = "0.23"
tree-sitter-rust = "0.23"
tree-sitter-typescript = "0.23"
tree-sitter-yaml = { git = "https://github.com/zed-industries/tree-sitter-yaml", rev = "baff0b51c64ef6a1fb1f8390f3ad6015b83ec13a" }
tree-sitter-jsdoc = "0.21"
tree-sitter-json = "0.21"
tree-sitter-md = { git = "https://github.com/zed-industries/tree-sitter-markdown", rev = "e3855e37f8f2c71aa7513c18a9c95fb7461b1b10" }
protols-tree-sitter-proto = "0.2"
tree-sitter-python = "0.21"
tree-sitter-regex = "0.21"
tree-sitter-ruby = "0.21"
tree-sitter-rust = "0.21"
tree-sitter-typescript = "0.21"
tree-sitter-yaml = "0.6"
unindent = "0.1.7"
unicase = "2.6"
unicode-segmentation = "1.10"
url = "2.2"
uuid = { version = "1.1.2", features = ["v4", "v5", "serde"] }
wasmparser = "0.215"
wasm-encoder = "0.215"
wasmtime = { version = "24", default-features = false, features = [
wasmparser = "0.201"
wasm-encoder = "0.201"
wasmtime = { version = "21.0.1", default-features = false, features = [
"async",
"demangle",
"runtime",
"cranelift",
"component-model",
] }
wasmtime-wasi = "24"
wasmtime-wasi = "21.0.1"
which = "6.0.0"
wit-component = "0.201"
[workspace.dependencies.async-stripe]
version = "0.39"
version = "0.38"
default-features = false
features = [
"runtime-tokio-hyper-rustls",
@@ -493,7 +484,7 @@ version = "0.58"
features = [
"implement",
"Foundation_Numerics",
"Storage",
"System",
"System_Threading",
"UI_ViewManagement",
"Wdk_System_SystemServices",
@@ -524,10 +515,13 @@ features = [
"Win32_UI_Input_Ime",
"Win32_UI_Input_KeyboardAndMouse",
"Win32_UI_Shell",
"Win32_UI_Shell_Common",
"Win32_UI_WindowsAndMessaging",
]
[patch.crates-io]
# Patch Tree-sitter for updated wasmtime.
tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "7f4a57817d58a2f134fe863674acad6bbf007228" }
[profile.dev]
split-debuginfo = "unpacked"
debug = "limited"
@@ -579,17 +573,8 @@ single_range_in_vec_init = "allow"
# allow all of those, for now.
style = { level = "allow", priority = -1 }
# Temporary list of style lints that we've fixed so far.
module_inception = { level = "deny" }
question_mark = { level = "deny" }
redundant_closure = { level = "deny" }
# Individual rules that have violations in the codebase:
type_complexity = "allow"
# We often return trait objects from `new` functions.
new_ret_no_self = { level = "allow" }
# We have a few `next` functions that differ in lifetimes
# compared to Iterator::next. Yet, clippy complains about those.
should_implement_trait = { level = "allow" }
[workspace.metadata.cargo-machete]
ignored = ["bindgen", "cbindgen", "prost_build", "serde"]

33
Dockerfile Normal file
View File

@@ -0,0 +1,33 @@
# syntax = docker/dockerfile:1.2
FROM rust:1.80-bookworm as builder
WORKDIR app
COPY . .
# Compile collab server
ARG CARGO_PROFILE_RELEASE_PANIC=abort
ARG GITHUB_SHA
ENV GITHUB_SHA=$GITHUB_SHA
RUN --mount=type=cache,target=./script/node_modules \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=./target \
cargo build --release --package collab --bin collab
# Copy collab server binary out of cached directory
RUN --mount=type=cache,target=./target \
cp /app/target/release/collab /app/collab
# Copy collab server binary to the runtime image
FROM debian:bookworm-slim as runtime
RUN apt-get update; \
apt-get install -y --no-install-recommends libcurl4-openssl-dev ca-certificates \
linux-perf binutils
WORKDIR app
COPY --from=builder /app/collab /app/collab
COPY --from=builder /app/crates/collab/migrations /app/migrations
COPY --from=builder /app/crates/collab/migrations_llm /app/migrations_llm
ENV MIGRATIONS_PATH=/app/migrations
ENV LLM_DATABASE_MIGRATIONS_PATH=/app/migrations_llm
ENTRYPOINT ["/app/collab"]

View File

@@ -1,60 +0,0 @@
# syntax = docker/dockerfile:1.2
FROM rust:1.81-bookworm as builder
WORKDIR app
COPY . .
# Replace the Cargo configuration with the one used by collab.
COPY ./.cargo/collab-config.toml ./.cargo/config.toml
# Compile collab server
ARG CARGO_PROFILE_RELEASE_PANIC=abort
ARG GITHUB_SHA
ENV GITHUB_SHA=$GITHUB_SHA
# At some point in the past 3 weeks, additional dependencies on `xkbcommon` and
# `xkbcommon-x11` were introduced into collab.
#
# A `git bisect` points to this commit as being the culprit: `b8e6098f60e5dabe98fe8281f993858dacc04a55`.
#
# Now when we try to build collab for the Docker image, it fails with the following
# error:
#
# ```
# 985.3 = note: /usr/bin/ld: cannot find -lxkbcommon: No such file or directory
# 985.3 /usr/bin/ld: cannot find -lxkbcommon-x11: No such file or directory
# 985.3 collect2: error: ld returned 1 exit status
# ```
#
# The last successful deploys were at:
# - Staging: `4f408ec65a3867278322a189b4eb20f1ab51f508`
# - Production: `fc4c533d0a8c489e5636a4249d2b52a80039fbd7`
#
# Installing these as a temporary workaround, but I think ideally we'd want to figure
# out what caused them to be included in the first place.
RUN apt-get update; \
apt-get install -y --no-install-recommends libxkbcommon-dev libxkbcommon-x11-dev
RUN --mount=type=cache,target=./script/node_modules \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=./target \
cargo build --release --package collab --bin collab
# Copy collab server binary out of cached directory
RUN --mount=type=cache,target=./target \
cp /app/target/release/collab /app/collab
# Copy collab server binary to the runtime image
FROM debian:bookworm-slim as runtime
RUN apt-get update; \
apt-get install -y --no-install-recommends libcurl4-openssl-dev ca-certificates \
linux-perf binutils
WORKDIR app
COPY --from=builder /app/collab /app/collab
COPY --from=builder /app/crates/collab/migrations /app/migrations
COPY --from=builder /app/crates/collab/migrations_llm /app/migrations_llm
ENV MIGRATIONS_PATH=/app/migrations
ENV LLM_DATABASE_MIGRATIONS_PATH=/app/migrations_llm
ENTRYPOINT ["/app/collab"]

View File

@@ -1,26 +0,0 @@
# syntax=docker/dockerfile:1
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
WORKDIR /app
ARG TZ=Etc/UTC \
LANG=C.UTF-8 \
LC_ALL=C.UTF-8 \
DEBIAN_FRONTEND=noninteractive
ENV CARGO_TERM_COLOR=always
COPY script/linux script/
RUN ./script/linux
COPY script/install-mold script/install-cmake script/
RUN ./script/install-mold "2.34.0"
RUN ./script/install-cmake "3.30.4"
COPY . .
# When debugging, make these into individual RUN statements.
# Cleanup to avoid saving big layers we aren't going to use.
RUN . "$HOME/.cargo/env" \
&& cargo fetch \
&& cargo build \
&& cargo run -- --help \
&& cargo clean --quiet

View File

@@ -1,2 +0,0 @@
**/target
**/node_modules

View File

@@ -4,10 +4,11 @@
Welcome to Zed, a high-performance, multiplayer code editor from the creators of [Atom](https://github.com/atom/atom) and [Tree-sitter](https://github.com/tree-sitter/tree-sitter).
---
--------
### Installation
<a href="https://repology.org/project/zed-editor/versions">
<img src="https://repology.org/badge/vertical-allrepos/zed-editor.svg?minversion=0.143.5" alt="Packaging status" align="right">
</a>

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-volume-off"><path d="M16 9a5 5 0 0 1 .95 2.293"/><path d="M19.364 5.636a9 9 0 0 1 1.889 9.96"/><path d="m2 2 20 20"/><path d="m7 7-.587.587A1.4 1.4 0 0 1 5.416 8H3a1 1 0 0 0-1 1v6a1 1 0 0 0 1 1h2.416a1.4 1.4 0 0 1 .997.413l3.383 3.384A.705.705 0 0 0 11 19.298V11"/><path d="M9.828 4.172A.686.686 0 0 1 11 4.657v.686"/></svg>

Before

Width:  |  Height:  |  Size: 527 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-volume-2"><path d="M11 4.702a.705.705 0 0 0-1.203-.498L6.413 7.587A1.4 1.4 0 0 1 5.416 8H3a1 1 0 0 0-1 1v6a1 1 0 0 0 1 1h2.416a1.4 1.4 0 0 1 .997.413l3.383 3.384A.705.705 0 0 0 11 19.298z"/><path d="M16 9a5 5 0 0 1 0 6"/><path d="M19.364 18.364a9 9 0 0 0 0-12.728"/></svg>

Before

Width:  |  Height:  |  Size: 475 B

View File

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 677 B

After

Width:  |  Height:  |  Size: 677 B

View File

Before

Width:  |  Height:  |  Size: 785 B

After

Width:  |  Height:  |  Size: 785 B

View File

Before

Width:  |  Height:  |  Size: 443 B

After

Width:  |  Height:  |  Size: 443 B

View File

@@ -1,6 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M13 11V11.8374C13 11.9431 12.9665 12.046 12.9044 12.1315L12.1498 13.1691C12.0557 13.2985 11.9054 13.375 11.7454 13.375H4.25461C4.09464 13.375 3.94433 13.2985 3.85024 13.1691L3.09563 12.1315C3.03348 12.046 3 11.9431 3 11.8374V3" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
<path d="M3 13V11L8 12H13V13H3Z" fill="black"/>
<path d="M6.63246 3.04418C7.44914 3.31641 8 4.08069 8 4.94155V11.7306C8 12.0924 7.62757 12.3345 7.29693 12.1875L3.79693 10.632C3.61637 10.5518 3.5 10.3727 3.5 10.1751V2.69374C3.5 2.35246 3.83435 2.11148 4.15811 2.2194L6.63246 3.04418Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M9.5 3C8.67157 3 8 3.67157 8 4.5V13C8 12.1954 11.2366 12.0382 12.5017 12.0075C12.7778 12.0008 13 11.7761 13 11.5V3.5C13 3.22386 12.7761 3 12.5 3H9.5Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 1009 B

View File

@@ -1,5 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3 5H11" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
<path d="M3 8H13" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
<path d="M3 11H9" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 334 B

View File

@@ -1,6 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M5 13C6.10457 13 7 12.1046 7 11C7 9.89543 6.10457 9 5 9C3.89543 9 3 9.89543 3 11C3 12.1046 3.89543 13 5 13Z" stroke="black" stroke-width="1.5"/>
<path d="M11 7C12.1046 7 13 6.10457 13 5C13 3.89543 12.1046 3 11 3C9.89543 3 9 3.89543 9 5C9 6.10457 9.89543 7 11 7Z" fill="black" stroke="black" stroke-width="1.5"/>
<path d="M4.625 3.625V8.375" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
<path d="M11 7C11 9.20914 9.20914 11 7 11" stroke="black" stroke-width="1.5"/>
</svg>

Before

Width:  |  Height:  |  Size: 591 B

View File

@@ -1,4 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M5 5C5 3.89543 5.89543 3 7 3H9C10.1046 3 11 3.89543 11 5V6H5V5Z" stroke="black" stroke-width="1.5"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M3.25 6.5C3.25 5.80964 3.80964 5.25 4.5 5.25H11.5C12.1904 5.25 12.75 5.80964 12.75 6.5V12.5C12.75 13.1904 12.1904 13.75 11.5 13.75H4.5C3.80964 13.75 3.25 13.1904 3.25 12.5V6.5ZM8.75 9.66146C8.90559 9.48517 9 9.25361 9 9C9 8.44772 8.55228 8 8 8C7.44772 8 7 8.44772 7 9C7 9.25361 7.09441 9.48517 7.25 9.66146V11C7.25 11.4142 7.58579 11.75 8 11.75C8.41421 11.75 8.75 11.4142 8.75 11V9.66146Z" fill="black"/>
</svg>

Before

Width:  |  Height:  |  Size: 667 B

View File

@@ -1,4 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M5.27935 10.9821C5.32063 10.4038 4.9204 9.89049 4.35998 9.80276L3.60081 9.68387C3.37979 9.64945 3.20167 9.48001 3.15225 9.25614L3.01378 8.63511C2.96382 8.41235 3.05233 8.1807 3.23696 8.05125L3.8631 7.61242C4.33337 7.28297 4.47456 6.6369 4.18621 6.13364L3.79467 5.45092C3.68118 5.25261 3.69801 5.00374 3.83757 4.82321L4.22314 4.32436C4.3627 4.14438 4.59621 4.06994 4.81071 4.13772L5.57531 4.37769C6.11944 4.54879 6.70048 4.26159 6.90683 3.71886L7.1811 2.99782C7.26255 2.78395 7.46345 2.64285 7.68772 2.6423L8.31007 2.64063C8.53434 2.64007 8.73579 2.78006 8.81834 2.99337L9.09965 3.72275C9.30821 4.26214 9.88655 4.54712 10.429 4.37714L11.1632 4.14716C11.3772 4.07994 11.6096 4.15382 11.7492 4.3327L12.1374 4.83099" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.76988 11.5933C8.76988 11.6595 8.8236 11.7133 8.88988 11.7133H8.97588C9.32602 11.7133 9.60988 11.9971 9.60988 12.3472C9.60988 12.6974 9.32602 12.9812 8.97588 12.9812H7.05587C6.70573 12.9812 6.42188 12.6974 6.42188 12.3472C6.42188 11.9971 6.70573 11.7133 7.05587 11.7133H7.14188C7.20815 11.7133 7.26188 11.6595 7.26188 11.5933V7.66925C7.26188 7.60298 7.20815 7.54925 7.14188 7.54925H7.05588C6.70573 7.54925 6.42188 7.2654 6.42188 6.91525C6.42188 6.5651 6.70573 6.28125 7.05588 6.28125H9.89988C11.0518 6.28125 12.8619 6.71487 12.8619 8.15185C12.8619 8.67078 12.7284 9.10362 12.4642 9.45348C12.1981 9.79765 11.8458 10.0564 11.4056 10.2293C11.3782 10.2401 11.3673 10.273 11.3829 10.298L12.2163 11.6342C12.247 11.6834 12.3008 11.7133 12.3588 11.7133H12.7319C13.082 11.7133 13.3659 11.9971 13.3659 12.3472C13.3659 12.6974 13.082 12.9812 12.7319 12.9812H11.5637C11.4955 12.9812 11.432 12.9465 11.3952 12.889L9.96523 10.6541C9.92847 10.5966 9.86495 10.5618 9.79675 10.5618H8.96988C8.85942 10.5618 8.76988 10.6514 8.76988 10.7619V11.5933ZM9.61188 7.54925C10.0296 7.54925 11.125 7.54925 11.2339 8.18785C11.2975 8.56123 11.1181 8.86557 10.8812 9.07715C10.6423 9.29046 10.2053 9.38985 9.58788 9.38985H8.86988C8.81465 9.38985 8.76988 9.34508 8.76988 9.28985V7.64925C8.76988 7.59402 8.81465 7.54925 8.86988 7.54925H9.61188Z" fill="black"/>
</svg>

Before

Width:  |  Height:  |  Size: 2.2 KiB

View File

@@ -1,5 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6 6H10" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
<path d="M8 6V11" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
<path d="M5 3H3.5C3.22386 3 3 3.22386 3 3.5V12.5C3 12.7761 3.22386 13 3.5 13H5M11 3H12.5C12.7761 3 13 3.22386 13 3.5V12.5C13 12.7761 12.7761 13 12.5 13H11" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 472 B

View File

@@ -1,3 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M8.26046 3.97337C8.3527 4.17617 8.4795 4.47151 8.57375 4.69341C8.65258 4.87898 8.83437 4.99999 9.03599 4.99999H12.5C12.7761 4.99999 13 5.22385 13 5.49999V12.125C13 12.4011 12.7761 12.625 12.5 12.625H3.5C3.22386 12.625 3 12.4011 3 12.125V3.86932C3 3.59318 3.22386 3.36932 3.5 3.36932H7.34219C7.74141 3.36932 8.09483 3.60924 8.26046 3.97337Z" stroke="black" stroke-width="1.5" stroke-linecap="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 512 B

View File

@@ -1,4 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M4.42782 7.2487C4.43495 6.97194 4.65009 6.75 4.91441 6.75H13.5293C13.7935 6.75 14.007 6.97194 13.9998 7.2487C13.9628 8.6885 13.7533 12.75 12.5721 12.75H3.375C4.55631 12.75 4.3907 8.6885 4.42782 7.2487Z" fill="black" stroke="black" stroke-width="1.5" stroke-linejoin="round"/>
<path d="M5.19598 12.625H3.66515C3.42618 12.625 3.22289 12.4453 3.18626 12.2017L1.94333 3.93602C1.89776 3.63295 2.12496 3.35938 2.42223 3.35938H5.78585C6.11241 3.35938 6.41702 3.52903 6.59618 3.81071L6.94517 4.35938H9.92811C10.4007 4.35938 10.8044 4.71102 10.8836 5.1917L11.1251 6.65624" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 759 B

View File

@@ -1,4 +1 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M8 10V13" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6.71429 7.088C6.7142 7.26164 6.66963 7.43181 6.58559 7.57938C6.50156 7.72695 6.38138 7.84606 6.23857 7.92333L5.47571 8.34333C5.33291 8.4206 5.21273 8.53972 5.12869 8.68729C5.04465 8.83486 5.00008 9.00503 5 9.17867V9.53333C5 9.6571 5.04515 9.7758 5.12553 9.86332C5.2059 9.95083 5.31491 10 5.42857 10H10.5714C10.6851 10 10.7941 9.95083 10.8745 9.86332C10.9548 9.7758 11 9.6571 11 9.53333V9.17867C10.9999 9.00503 10.9553 8.83486 10.8713 8.68729C10.7873 8.53972 10.6671 8.4206 10.5243 8.34333L9.76143 7.92333C9.61862 7.84606 9.49844 7.72695 9.41441 7.57938C9.33037 7.43181 9.2858 7.26164 9.28571 7.088V5.33333C9.28571 5.20957 9.33087 5.09087 9.41124 5.00335C9.49161 4.91583 9.60062 4.86667 9.71429 4.86667C9.94161 4.86667 10.1596 4.76833 10.3204 4.5933C10.4811 4.41827 10.5714 4.18087 10.5714 3.93333C10.5714 3.6858 10.4811 3.4484 10.3204 3.27337C10.1596 3.09833 9.94161 3 9.71429 3H6.28571C6.05839 3 5.84037 3.09833 5.67962 3.27337C5.51888 3.4484 5.42857 3.6858 5.42857 3.93333C5.42857 4.18087 5.51888 4.41827 5.67962 4.5933C5.84037 4.76833 6.05839 4.86667 6.28571 4.86667C6.39938 4.86667 6.50839 4.91583 6.58876 5.00335C6.66913 5.09087 6.71429 5.20957 6.71429 5.33333V7.088Z" fill="black" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-pin"><path d="M12 17v5"/><path d="M9 10.76a2 2 0 0 1-1.11 1.79l-1.78.9A2 2 0 0 0 5 15.24V16a1 1 0 0 0 1 1h12a1 1 0 0 0 1-1v-.76a2 2 0 0 0-1.11-1.79l-1.78-.9A2 2 0 0 1 15 10.76V7a1 1 0 0 1 1-1 2 2 0 0 0 0-4H8a2 2 0 0 0 0 4 1 1 0 0 1 1 1z"/></svg>

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 447 B

View File

@@ -1,3 +1 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M5 4L12 8L5 12V4Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M3.24182 2.32181C3.3919 2.23132 3.5784 2.22601 3.73338 2.30781L12.7334 7.05781C12.8974 7.14436 13 7.31457 13 7.5C13 7.68543 12.8974 7.85564 12.7334 7.94219L3.73338 12.6922C3.5784 12.774 3.3919 12.7687 3.24182 12.6782C3.09175 12.5877 3 12.4252 3 12.25V2.75C3 2.57476 3.09175 2.4123 3.24182 2.32181ZM4 3.57925V11.4207L11.4288 7.5L4 3.57925Z" fill="currentColor" fill-rule="evenodd" clip-rule="evenodd"></path></svg>

Before

Width:  |  Height:  |  Size: 218 B

After

Width:  |  Height:  |  Size: 518 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-pocket-knife"><path d="M3 2v1c0 1 2 1 2 2S3 6 3 7s2 1 2 2-2 1-2 2 2 1 2 2"/><path d="M18 6h.01"/><path d="M6 18h.01"/><path d="M20.83 8.83a4 4 0 0 0-5.66-5.66l-12 12a4 4 0 1 0 5.66 5.66Z"/><path d="M18 11.66V22a4 4 0 0 0 4-4V6"/></svg>

Before

Width:  |  Height:  |  Size: 438 B

View File

Before

Width:  |  Height:  |  Size: 462 B

After

Width:  |  Height:  |  Size: 462 B

View File

@@ -1,4 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3.16089 10.2476L3.99598 10.3784C4.61244 10.4749 5.05269 11.0395 5.00728 11.6755L4.94576 12.5377C4.92784 12.789 5.06165 13.0255 5.28326 13.1348L5.90091 13.4391C6.12253 13.5485 6.38717 13.5075 6.56817 13.3371L7.1888 12.7505C7.64641 12.3178 8.35245 12.3178 8.81059 12.7505L9.43121 13.3371C9.61222 13.5081 9.87629 13.5485 10.0985 13.4391L10.7173 13.1341C10.9384 13.0255 11.0716 12.7895 11.0537 12.539L10.9921 11.6755C10.9467 11.0395 11.3869 10.4749 12.0033 10.3784L12.8385 10.2476C13.0817 10.2097 13.2776 10.0233 13.3325 9.77768L13.4848 9.09455C13.5398 8.8489 13.4425 8.59408 13.2393 8.45229L12.5422 7.96404C12.0279 7.60355 11.8708 6.89963 12.1814 6.34659L12.6025 5.59745C12.7249 5.3793 12.7047 5.10616 12.5511 4.9094L12.1241 4.36128C11.9706 4.16451 11.7149 4.08325 11.4795 4.15719L10.6719 4.41016C10.0752 4.59714 9.43903 4.28367 9.20962 3.69035L8.90017 2.88803C8.80937 2.65339 8.58777 2.4994 8.34108 2.5L7.65649 2.50184C7.40979 2.50244 7.1888 2.65766 7.09921 2.89291L6.79751 3.68607C6.57053 4.28307 5.93138 4.59898 5.33284 4.41077L4.49178 4.1468C4.25583 4.07225 3.99897 4.15413 3.84545 4.35212L3.42133 4.90084C3.26781 5.09943 3.2493 5.37319 3.37414 5.59133L3.80483 6.34232C4.12201 6.89591 3.96671 7.60659 3.44941 7.96897L2.76065 8.45169C2.55756 8.59408 2.4602 8.84891 2.51516 9.09393L2.66747 9.77708C2.72184 10.0233 2.91777 10.2097 3.16089 10.2476Z" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M9.41432 6.83576C8.63332 6.05481 7.36676 6.05476 6.58575 6.83571C5.8048 7.61672 5.80476 8.88327 6.58571 9.66427C7.36671 10.4452 8.63326 10.4452 9.41426 9.66432C10.1952 8.88332 10.1952 7.61676 9.41432 6.83576Z" fill="black"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.7 KiB

View File

Before

Width:  |  Height:  |  Size: 450 B

After

Width:  |  Height:  |  Size: 450 B

View File

@@ -1,6 +0,0 @@
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3 4H8" stroke="black" stroke-width="1.75" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6 10L11 10" stroke="black" stroke-width="1.75" stroke-linecap="round" stroke-linejoin="round"/>
<circle cx="4" cy="10" r="1.875" stroke="black" stroke-width="1.75"/>
<circle cx="10" cy="4" r="1.875" stroke="black" stroke-width="1.75"/>
</svg>

Before

Width:  |  Height:  |  Size: 450 B

View File

@@ -1,11 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3.6665 14V9.33333" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M3.6665 6.66667V2" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M8 14V8" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M8 5.33333V2" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.3335 14V10.6667" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.3335 8V2" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M2.3335 9.33333H5.00016" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6.6665 5.33334H9.33317" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M11 10.6667H13.6667" stroke="black" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -0,0 +1,8 @@
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg">
<path
fill-rule="evenodd"
clip-rule="evenodd"
d="M7.72361 1.05279C7.893 1.13749 8 1.31062 8 1.5V13.5C8 13.6894 7.893 13.8625 7.72361 13.9472C7.55421 14.0319 7.35151 14.0136 7.2 13.9L3.33333 11H1.5C0.671573 11 0 10.3284 0 9.5V5.5C0 4.67158 0.671573 4 1.5 4H3.33333L7.2 1.1C7.35151 0.986371 7.55421 0.968093 7.72361 1.05279ZM7 2.5L3.8 4.9C3.71345 4.96491 3.60819 5 3.5 5H1.5C1.22386 5 1 5.22386 1 5.5V9.5C1 9.77614 1.22386 10 1.5 10H3.5C3.60819 10 3.71345 10.0351 3.8 10.1L7 12.5V2.5ZM14.8536 5.14645C15.0488 5.34171 15.0488 5.65829 14.8536 5.85355L13.2071 7.5L14.8536 9.14645C15.0488 9.34171 15.0488 9.65829 14.8536 9.85355C14.6583 10.0488 14.3417 10.0488 14.1464 9.85355L12.5 8.20711L10.8536 9.85355C10.6583 10.0488 10.3417 10.0488 10.1464 9.85355C9.95118 9.65829 9.95118 9.34171 10.1464 9.14645L11.7929 7.5L10.1464 5.85355C9.95118 5.65829 9.95118 5.34171 10.1464 5.14645C10.3417 4.95118 10.6583 4.95118 10.8536 5.14645L12.5 6.79289L14.1464 5.14645C14.3417 4.95118 14.6583 4.95118 14.8536 5.14645Z"
fill="currentColor"
/>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 8.6 KiB

After

Width:  |  Height:  |  Size: 8.6 KiB

View File

Before

Width:  |  Height:  |  Size: 345 B

After

Width:  |  Height:  |  Size: 345 B

View File

Before

Width:  |  Height:  |  Size: 610 B

After

Width:  |  Height:  |  Size: 610 B

View File

@@ -1,4 +1 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M6.28561 2.25C5.83777 2.25 5.42262 2.44429 5.12712 2.76607C4.8336 3.08568 4.67847 3.50652 4.67847 3.93333C4.67847 4.00474 4.68281 4.07598 4.69142 4.14661L10.0356 6.63864V5.58279C10.3579 5.51401 10.6495 5.3435 10.8728 5.10053C11.1662 4.78092 11.3213 4.36012 11.3213 3.93333C11.3213 3.50655 11.1661 3.08559 10.8726 2.76598C10.5771 2.44432 10.1621 2.25 9.71419 2.25H6.28561Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M1.70131 4.68303C1.87636 4.30763 2.3226 4.14521 2.698 4.32027L13.9361 9.5607C14.3115 9.73575 14.474 10.182 14.2989 10.5574C14.1239 10.9328 13.6776 11.0952 13.3022 10.9202L11.6006 10.1267C11.5536 10.2137 11.4958 10.2957 11.4272 10.3704C11.2121 10.6045 10.9057 10.75 10.5715 10.75H8.75007V13C8.75007 13.4142 8.41428 13.75 8.00007 13.75C7.58585 13.75 7.25007 13.4142 7.25007 13V10.75H5.42864C5.09445 10.75 4.78832 10.6049 4.57321 10.3706C4.36002 10.1385 4.25007 9.83637 4.25007 9.53334V9.17868C4.25021 8.87805 4.32715 8.57932 4.47703 8.31614C4.6266 8.05351 4.84546 7.83234 5.11609 7.68521L5.68873 7.36993L2.06407 5.67973C1.68867 5.50467 1.52625 5.05844 1.70131 4.68303Z" fill="black"/>
</svg>
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="lucide lucide-pin-off"><path d="M12 17v5"/><path d="M15 9.34V7a1 1 0 0 1 1-1 2 2 0 0 0 0-4H7.89"/><path d="m2 2 20 20"/><path d="M9 9v1.76a2 2 0 0 1-1.11 1.79l-1.78.9A2 2 0 0 0 5 15.24V16a1 1 0 0 0 1 1h11"/></svg>

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 401 B

View File

Before

Width:  |  Height:  |  Size: 947 B

After

Width:  |  Height:  |  Size: 947 B

View File

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

Before

Width:  |  Height:  |  Size: 238 B

After

Width:  |  Height:  |  Size: 238 B

View File

@@ -1,10 +0,0 @@
<svg width="96" height="96" viewBox="0 0 96 96" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_1957_1318)">
<path fill-rule="evenodd" clip-rule="evenodd" d="M9 6C7.34315 6 6 7.34315 6 9V75H0V9C0 4.02944 4.02944 0 9 0H89.3787C93.3878 0 95.3955 4.84715 92.5607 7.68198L43.0551 57.1875H57V51H63V58.6875C63 61.1728 60.9853 63.1875 58.5 63.1875H37.0551L26.7426 73.5H73.5V36H79.5V73.5C79.5 76.8137 76.8137 79.5 73.5 79.5H20.7426L10.2426 90H87C88.6569 90 90 88.6569 90 87V21H96V87C96 91.9706 91.9706 96 87 96H6.62132C2.61224 96 0.604504 91.1529 3.43934 88.318L52.7574 39H39V45H33V37.5C33 35.0147 35.0147 33 37.5 33H58.7574L69.2574 22.5H22.5V60H16.5V22.5C16.5 19.1863 19.1863 16.5 22.5 16.5H75.2574L85.7574 6H9Z" fill="black"/>
</g>
<defs>
<clipPath id="clip0_1957_1318">
<rect width="96" height="96" fill="white"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 861 B

View File

@@ -1,14 +0,0 @@
<svg width="93" height="32" viewBox="0 0 93 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M9.03994 7.04962C8.00934 7.67635 7.30394 8.63219 7.30394 10.0149C7.30394 11.6908 7.72423 12.5893 8.20468 13.0744C8.68379 13.5581 9.40524 13.8149 10.4054 13.8149C11.815 13.8149 13.0291 13.5336 13.8802 12.9464C14.6756 12.3977 15.2708 11.5042 15.3438 9.96182C15.3991 8.79382 15.3678 8.01341 15.0568 7.45711C14.8094 7.01449 14.2326 6.47436 12.4901 6.27416C11.4684 6.15678 10.1114 6.39804 9.03994 7.04962ZM7.8731 5.13084C9.39145 4.2075 11.2531 3.87155 12.7464 4.04312C14.8843 4.28874 16.2844 5.05049 17.0171 6.36142C17.6863 7.55867 17.6384 8.98348 17.587 10.068C17.484 12.2439 16.5804 13.8118 15.1554 14.7949C13.7861 15.7396 12.0582 16.0606 10.4054 16.0606C9.04199 16.0606 7.65126 15.7069 6.60911 14.6547C5.5683 13.6038 5.05823 12.0408 5.05823 10.0149C5.05823 7.6958 6.31388 6.07903 7.8731 5.13084Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M13.983 18.2811C14.6595 18.2811 15.2079 18.8295 15.2079 19.506V22.16C15.2079 22.8365 14.6595 23.385 13.983 23.385C13.3065 23.385 12.758 22.8365 12.758 22.16V19.506C12.758 18.8295 13.3065 18.2811 13.983 18.2811Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M5.25567 14.8903C5.48362 14.039 5.99219 12.9059 7.20735 12.5361L7.92069 14.8798L7.92376 14.8785C7.92375 14.8785 7.92323 14.8778 7.92069 14.8798C7.91365 14.8852 7.89134 14.9052 7.85879 14.951C7.78817 15.0505 7.70058 15.2311 7.62217 15.524C7.46355 16.1164 7.40615 16.9168 7.40617 17.7813V22.1675C7.43457 22.3571 7.45083 22.438 7.48149 22.5189C7.50698 22.5862 7.55955 22.695 7.72277 22.8617C8.0938 23.2406 8.94056 23.8264 10.9275 24.7081C12.5399 25.4236 13.2749 25.7456 13.9586 25.9166C14.6053 26.0784 15.2382 26.1115 16.6666 26.1115V28.5613C15.2536 28.5613 14.3395 28.5372 13.3641 28.2932C12.452 28.0651 11.5174 27.6502 10.0614 27.004C10.0193 26.9853 9.9768 26.9664 9.93383 26.9474C7.89761 26.0438 6.6867 25.3053 5.97233 24.5757C5.59285 24.1882 5.34579 23.7967 5.19056 23.387C5.05419 23.0271 5.00551 22.6875 4.9751 22.4754C4.97301 22.4608 4.971 22.4468 4.96906 22.4335L4.9563 22.3458V17.7814C4.95628 16.8606 5.01201 15.8003 5.25567 14.8903Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M3.98306 15.3589C4.62844 14.6695 5.39976 13.9884 5.99652 13.6348L7.24552 15.7424C6.93706 15.9252 6.34938 16.4159 5.77155 17.0332C5.21994 17.6224 4.85695 18.1377 4.73071 18.4254C4.70277 18.6674 4.71195 19.116 4.78977 19.648C4.87834 20.2536 5.01731 20.7033 5.10492 20.8628L5.19261 21.0224L5.23006 21.2007C5.28661 21.4698 5.55952 21.8651 6.30146 22.3907C6.88143 22.8015 7.56502 23.1703 8.29605 23.5648C8.45794 23.6521 8.62215 23.7407 8.78809 23.8313C9.5952 24.2156 10.848 24.7773 12.0191 25.2425C12.613 25.4784 13.1698 25.6831 13.6288 25.8268C13.8584 25.8987 14.0505 25.9512 14.2021 25.9847C14.3503 26.0175 14.4185 26.0227 14.4277 26.0234C14.4288 26.0234 14.4281 26.0234 14.4277 26.0234L14.4287 28.4733C13.9646 28.4733 13.3889 28.3188 12.8968 28.1648C12.3562 27.9955 11.7367 27.7664 11.1147 27.5193C9.86871 27.0244 8.54832 26.4314 7.70298 26.028L7.67249 26.0135L7.64284 25.9973C7.49779 25.918 7.34502 25.8357 7.18702 25.7506C6.4505 25.354 5.60004 24.896 4.88539 24.3898C4.08363 23.8219 3.18153 23.0135 2.87437 21.8785C2.61828 21.3365 2.4557 20.618 2.3657 20.0026C2.26537 19.3167 2.2169 18.4951 2.33888 17.8727L2.35434 17.7938L2.37996 17.7176C2.6522 16.9085 3.36219 16.0221 3.98306 15.3589Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M3.48529 21.264V17.1402H5.93516V21.264H3.48529Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M24.2932 7.04962C25.3238 7.67635 26.0292 8.63219 26.0292 10.0149C26.0292 11.6908 25.609 12.5893 25.1285 13.0744C24.6494 13.5581 23.9279 13.8149 22.9278 13.8149C21.5182 13.8149 20.3041 13.5336 19.453 12.9464C18.6576 12.3977 18.0624 11.5042 17.9894 9.96182C17.9341 8.79382 17.9654 8.01341 18.2764 7.45711C18.5238 7.01449 19.1006 6.47436 20.8431 6.27416C21.8648 6.15678 23.2218 6.39804 24.2932 7.04962ZM25.4601 5.13084C23.9417 4.2075 22.0801 3.87155 20.5868 4.04312C18.4489 4.28874 17.0488 5.05049 16.3161 6.36142C15.6469 7.55867 15.6948 8.98348 15.7462 10.068C15.8492 12.2439 16.7528 13.8118 18.1778 14.7949C19.5471 15.7396 21.275 16.0606 22.9278 16.0606C24.2912 16.0606 25.6819 15.7069 26.7241 14.6547C27.7649 13.6038 28.275 12.0408 28.275 10.0149C28.275 7.6958 27.0193 6.07903 25.4601 5.13084Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M19.3502 18.2811C18.6737 18.2811 18.1253 18.8295 18.1253 19.506V22.16C18.1253 22.8365 18.6737 23.385 19.3502 23.385C20.0267 23.385 20.5752 22.8365 20.5752 22.16V19.506C20.5752 18.8295 20.0267 18.2811 19.3502 18.2811Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M28.0775 14.8903C27.8496 14.039 27.341 12.9059 26.1259 12.5361L25.4125 14.8798L25.4095 14.8785C25.4095 14.8785 25.41 14.8778 25.4125 14.8798C25.4196 14.8852 25.4419 14.9052 25.4744 14.951C25.545 15.0505 25.6326 15.2311 25.711 15.524C25.8697 16.1164 25.9271 16.9168 25.927 17.7813V22.1675C25.8986 22.3571 25.8824 22.438 25.8517 22.5189C25.8262 22.5862 25.7737 22.695 25.6104 22.8617C25.2394 23.2406 24.3927 23.8264 22.4057 24.7081C20.7933 25.4236 20.0583 25.7456 19.3746 25.9166C18.7279 26.0784 18.0949 26.1115 16.6666 26.1115V28.5613C18.0796 28.5613 18.9937 28.5372 19.9691 28.2932C20.8812 28.0651 21.8158 27.6502 23.2718 27.004C23.3139 26.9853 23.3564 26.9664 23.3994 26.9474C25.4356 26.0438 26.6465 25.3053 27.3609 24.5757C27.7404 24.1882 27.9874 23.7967 28.1427 23.387C28.279 23.0271 28.3277 22.6875 28.3581 22.4754C28.3602 22.4608 28.3622 22.4468 28.3642 22.4335L28.3769 22.3458V17.7814C28.3769 16.8606 28.3212 15.8003 28.0775 14.8903Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M29.3501 15.3589C28.7048 14.6695 27.9334 13.9884 27.3367 13.6348L26.0877 15.7424C26.3961 15.9252 26.9838 16.4159 27.5616 17.0332C28.1133 17.6224 28.4763 18.1377 28.6025 18.4254C28.6304 18.6674 28.6213 19.116 28.5434 19.648C28.4549 20.2536 28.3159 20.7033 28.2283 20.8628L28.1406 21.0224L28.1031 21.2007C28.0466 21.4698 27.7737 21.8651 27.0317 22.3907C26.4518 22.8015 25.7682 23.1703 25.0372 23.5648C24.8753 23.6521 24.711 23.7407 24.5451 23.8313C23.738 24.2156 22.4852 24.7773 21.3141 25.2425C20.7202 25.4784 20.1634 25.6831 19.7044 25.8268C19.4748 25.8987 19.2827 25.9512 19.1311 25.9847C18.9829 26.0175 18.9147 26.0227 18.9055 26.0234C18.9051 26.0234 18.9044 26.0234 18.9055 26.0234L18.9045 28.4733C19.3686 28.4733 19.9443 28.3188 20.4364 28.1648C20.977 27.9955 21.5965 27.7664 22.2185 27.5193C23.4645 27.0244 24.7849 26.4314 25.6302 26.028L25.6607 26.0135L25.6904 25.9973C25.8354 25.918 25.9882 25.8357 26.1462 25.7506C26.8827 25.354 27.7332 24.896 28.4478 24.3898C29.2496 23.8219 30.1517 23.0135 30.4588 21.8785C30.7149 21.3365 30.8775 20.618 30.9675 20.0026C31.0678 19.3167 31.1163 18.4951 30.9943 17.8727L30.9789 17.7938L30.9532 17.7176C30.681 16.9085 29.971 16.0221 29.3501 15.3589Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M29.8479 21.264V17.1402H27.398V21.264H29.8479Z" fill="black"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M48.6666 11C49.2189 11 49.6666 11.4477 49.6666 12V15H52.6666C53.2189 15 53.6666 15.4477 53.6666 16C53.6666 16.5523 53.2189 17 52.6666 17H49.6666V20C49.6666 20.5523 49.2189 21 48.6666 21C48.1143 21 47.6666 20.5523 47.6666 20V17H44.6666C44.1143 17 43.6666 16.5523 43.6666 16C43.6666 15.4477 44.1143 15 44.6666 15H47.6666V12C47.6666 11.4477 48.1143 11 48.6666 11Z" fill="black" fill-opacity="0.5"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M67.1666 4.33329C66.7064 4.33329 66.3333 4.70639 66.3333 5.16663V23.5H64.6666V5.16663C64.6666 3.78591 65.7859 2.66663 67.1666 2.66663H89.494C90.6077 2.66663 91.1654 4.01306 90.3779 4.80051L76.6264 18.552H80.5V16.8333H82.1666V18.9687C82.1666 19.6591 81.607 20.2187 80.9166 20.2187H74.9597L72.0951 23.0833H85.0833V12.6666H86.75V23.0833C86.75 24.0038 86.0038 24.75 85.0833 24.75H70.4285L67.5118 27.6666H88.8333C89.2935 27.6666 89.6666 27.2935 89.6666 26.8333V8.49996H91.3333V26.8333C91.3333 28.214 90.214 29.3333 88.8333 29.3333H66.5059C65.3922 29.3333 64.8345 27.9869 65.622 27.1994L79.3214 13.5H75.5V15.1666H73.8333V13.0833C73.8333 12.3929 74.3929 11.8333 75.0833 11.8333H80.9881L83.9048 8.91663H70.9166V19.3333H69.25V8.91663C69.25 7.99615 69.9962 7.24996 70.9166 7.24996H85.5714L88.4881 4.33329H67.1666Z" fill="black"/>
</svg>

Before

Width:  |  Height:  |  Size: 8.2 KiB

View File

@@ -56,12 +56,14 @@
"shift-tab": "editor::TabPrev",
"ctrl-k": "editor::CutToEndOfLine",
// "ctrl-t": "editor::Transpose",
"alt-q": "editor::Rewrap",
"ctrl-backspace": "editor::DeleteToPreviousWordStart",
"ctrl-delete": "editor::DeleteToNextWordEnd",
"shift-delete": "editor::Cut",
"ctrl-x": "editor::Cut",
"ctrl-insert": "editor::Copy",
"ctrl-c": "editor::Copy",
"shift-insert": "editor::Paste",
"ctrl-v": "editor::Paste",
"ctrl-y": "editor::Redo",
"ctrl-z": "editor::Undo",
"ctrl-shift-z": "editor::Redo",
@@ -110,15 +112,6 @@
"alt-g b": "editor::ToggleGitBlame"
}
},
{
// Separate block with same context so these display in context menus
"context": "Editor",
"bindings": {
"ctrl-x": "editor::Cut",
"ctrl-c": "editor::Copy",
"ctrl-v": "editor::Paste"
}
},
{
"context": "Editor && mode == full",
"bindings": {
@@ -166,13 +159,11 @@
{
"context": "AssistantPanel",
"bindings": {
"ctrl-k c": "assistant::CopyCode",
"ctrl-g": "search::SelectNextMatch",
"ctrl-shift-g": "search::SelectPrevMatch",
"alt-m": "assistant::ToggleModelSelector",
"ctrl-k h": "assistant::DeployHistory",
"ctrl-k l": "assistant::DeployPromptLibrary",
"ctrl-n": "assistant::NewContext"
"ctrl-k l": "assistant::DeployPromptLibrary"
}
},
{
@@ -196,7 +187,7 @@
}
},
{
"context": "BufferSearchBar && in_replace > Editor",
"context": "BufferSearchBar && in_replace",
"bindings": {
"enter": "search::ReplaceNext",
"ctrl-enter": "search::ReplaceAll"
@@ -247,8 +238,6 @@
"bindings": {
"ctrl-pageup": "pane::ActivatePrevItem",
"ctrl-pagedown": "pane::ActivateNextItem",
"ctrl-shift-pageup": "pane::SwapItemLeft",
"ctrl-shift-pagedown": "pane::SwapItemRight",
"ctrl-w": "pane::CloseActiveItem",
"ctrl-f4": "pane::CloseActiveItem",
"alt-ctrl-t": "pane::CloseInactiveItems",
@@ -266,8 +255,7 @@
"alt-r": "search::ToggleRegex",
"alt-ctrl-f": "project_search::ToggleFilters",
"ctrl-alt-shift-r": "search::ToggleRegex",
"ctrl-alt-shift-x": "search::ToggleRegex",
"ctrl-k shift-enter": "pane::TogglePinTab"
"ctrl-alt-shift-x": "search::ToggleRegex"
}
},
// Bindings from VS Code
@@ -310,11 +298,6 @@
"ctrl-shift-\\": "editor::MoveToEnclosingBracket",
"ctrl-shift-[": "editor::Fold",
"ctrl-shift-]": "editor::UnfoldLines",
"ctrl-k ctrl-l": "editor::ToggleFold",
"ctrl-k ctrl-[": "editor::FoldRecursive",
"ctrl-k ctrl-]": "editor::UnfoldRecursive",
"ctrl-k ctrl-0": "editor::FoldAll",
"ctrl-k ctrl-j": "editor::UnfoldAll",
"ctrl-space": "editor::ShowCompletions",
"ctrl-.": "editor::ToggleCodeActions",
"alt-ctrl-r": "editor::RevealInFileManager",
@@ -525,13 +508,6 @@
"alt-enter": "editor::Newline"
}
},
{
"context": "PromptEditor",
"bindings": {
"ctrl-[": "assistant::CyclePreviousInlineAssist",
"ctrl-]": "assistant::CycleNextInlineAssist"
}
},
{
"context": "ProjectSearchBar && !in_replace",
"bindings": {
@@ -539,7 +515,7 @@
}
},
{
"context": "OutlinePanel && not_editing",
"context": "OutlinePanel",
"bindings": {
"escape": "menu::Cancel",
"left": "outline_panel::CollapseSelectedEntry",
@@ -559,34 +535,27 @@
"right": "project_panel::ExpandSelectedEntry",
"ctrl-n": "project_panel::NewFile",
"alt-ctrl-n": "project_panel::NewDirectory",
"ctrl-x": "project_panel::Cut",
"ctrl-c": "project_panel::Copy",
"ctrl-insert": "project_panel::Copy",
"ctrl-v": "project_panel::Paste",
"shift-insert": "project_panel::Paste",
"ctrl-alt-c": "project_panel::CopyPath",
"alt-ctrl-shift-c": "project_panel::CopyRelativePath",
"f2": "project_panel::Rename",
"enter": "project_panel::Rename",
"backspace": ["project_panel::Trash", { "skip_prompt": false }],
"shift-delete": ["project_panel::Delete", { "skip_prompt": false }],
"delete": ["project_panel::Trash", { "skip_prompt": false }],
"ctrl-backspace": ["project_panel::Delete", { "skip_prompt": false }],
"ctrl-delete": ["project_panel::Delete", { "skip_prompt": false }],
"alt-ctrl-r": "project_panel::RevealInFileManager",
"ctrl-shift-enter": "project_panel::OpenWithSystem",
"alt-shift-f": "project_panel::NewSearchInDirectory",
"shift-down": "menu::SelectNext",
"shift-up": "menu::SelectPrev",
"escape": "menu::Cancel"
}
},
{
// Separate block with same context so these display in context menus
"context": "ProjectPanel",
"bindings": {
"f2": "project_panel::Rename",
"ctrl-c": "project_panel::Copy",
"ctrl-x": "project_panel::Cut",
"ctrl-v": "project_panel::Paste",
"delete": ["project_panel::Trash", { "skip_prompt": false }]
}
},
{
"context": "ProjectPanel && not_editing",
"bindings": {
@@ -642,7 +611,9 @@
"context": "Terminal",
"bindings": {
"ctrl-alt-space": "terminal::ShowCharacterPalette",
"ctrl-shift-c": "terminal::Copy",
"ctrl-insert": "terminal::Copy",
"ctrl-shift-v": "terminal::Paste",
"shift-insert": "terminal::Paste",
"ctrl-enter": "assistant::InlineAssist",
// Overrides for conflicting keybindings
@@ -666,13 +637,5 @@
"shift-home": "terminal::ScrollToTop",
"shift-end": "terminal::ScrollToBottom"
}
},
{
// Separate block with same context so these display in context menus
"context": "Terminal",
"bindings": {
"ctrl-shift-c": "terminal::Copy",
"ctrl-shift-v": "terminal::Paste"
}
}
]

View File

@@ -51,7 +51,6 @@
"shift-tab": "editor::TabPrev",
"ctrl-k": "editor::CutToEndOfLine",
"ctrl-t": "editor::Transpose",
"alt-q": "editor::Rewrap",
"cmd-backspace": "editor::DeleteToBeginningOfLine",
"cmd-delete": "editor::DeleteToEndOfLine",
"alt-backspace": "editor::DeleteToPreviousWordStart",
@@ -188,13 +187,11 @@
{
"context": "AssistantPanel",
"bindings": {
"cmd-k c": "assistant::CopyCode",
"cmd-g": "search::SelectNextMatch",
"cmd-shift-g": "search::SelectPrevMatch",
"alt-m": "assistant::ToggleModelSelector",
"cmd-k h": "assistant::DeployHistory",
"cmd-k l": "assistant::DeployPromptLibrary",
"cmd-n": "assistant::NewContext"
"cmd-k l": "assistant::DeployPromptLibrary"
}
},
{
@@ -232,7 +229,7 @@
}
},
{
"context": "BufferSearchBar && in_replace > Editor",
"context": "BufferSearchBar && in_replace",
"bindings": {
"enter": "search::ReplaceNext",
"cmd-enter": "search::ReplaceAll"
@@ -287,8 +284,6 @@
"cmd-}": "pane::ActivateNextItem",
"alt-cmd-left": "pane::ActivatePrevItem",
"alt-cmd-right": "pane::ActivateNextItem",
"ctrl-shift-pageup": "pane::SwapItemLeft",
"ctrl-shift-pagedown": "pane::SwapItemRight",
"cmd-w": "pane::CloseActiveItem",
"alt-cmd-t": "pane::CloseInactiveItems",
"ctrl-alt-cmd-w": "workspace::CloseInactiveTabsAndPanes",
@@ -303,8 +298,7 @@
"alt-cmd-c": "search::ToggleCaseSensitive",
"alt-cmd-w": "search::ToggleWholeWord",
"alt-cmd-f": "project_search::ToggleFilters",
"alt-cmd-x": "search::ToggleRegex",
"cmd-k shift-enter": "pane::TogglePinTab"
"alt-cmd-x": "search::ToggleRegex"
}
},
// Bindings from VS Code
@@ -347,11 +341,6 @@
"cmd-shift-\\": "editor::MoveToEnclosingBracket",
"alt-cmd-[": "editor::Fold",
"alt-cmd-]": "editor::UnfoldLines",
"cmd-k cmd-l": "editor::ToggleFold",
"cmd-k cmd-[": "editor::FoldRecursive",
"cmd-k cmd-]": "editor::UnfoldRecursive",
"cmd-k cmd-0": "editor::FoldAll",
"cmd-k cmd-j": "editor::UnfoldAll",
"ctrl-space": "editor::ShowCompletions",
"cmd-.": "editor::ToggleCodeActions",
"alt-cmd-r": "editor::RevealInFileManager",
@@ -532,13 +521,6 @@
"ctrl-enter": "assistant::InlineAssist"
}
},
{
"context": "PromptEditor",
"bindings": {
"ctrl-[": "assistant::CyclePreviousInlineAssist",
"ctrl-]": "assistant::CycleNextInlineAssist"
}
},
{
"context": "ProjectSearchBar && !in_replace",
"bindings": {
@@ -546,7 +528,7 @@
}
},
{
"context": "OutlinePanel && not_editing",
"context": "OutlinePanel",
"bindings": {
"escape": "menu::Cancel",
"left": "outline_panel::CollapseSelectedEntry",
@@ -579,8 +561,8 @@
"cmd-backspace": ["project_panel::Trash", { "skip_prompt": true }],
"cmd-delete": ["project_panel::Delete", { "skip_prompt": false }],
"alt-cmd-r": "project_panel::RevealInFileManager",
"ctrl-shift-enter": "project_panel::OpenWithSystem",
"cmd-alt-backspace": ["project_panel::Delete", { "skip_prompt": false }],
"alt-shift-f": "project_panel::NewSearchInDirectory",
"shift-down": "menu::SelectNext",
"shift-up": "menu::SelectPrev",

View File

@@ -29,8 +29,6 @@
"shift-g": "vim::EndOfDocument",
"{": "vim::StartOfParagraph",
"}": "vim::EndOfParagraph",
"(": "vim::SentenceBackward",
")": "vim::SentenceForward",
"|": "vim::GoToColumn",
// Word motions
"w": "vim::NextWordStart",
@@ -132,15 +130,9 @@
"z z": "editor::ScrollCursorCenter",
"z .": ["workspace::SendKeystrokes", "z z ^"],
"z b": "editor::ScrollCursorBottom",
"z a": "editor::ToggleFold",
"z A": "editor::ToggleFoldRecursive",
"z c": "editor::Fold",
"z C": "editor::FoldRecursive",
"z o": "editor::UnfoldLines",
"z O": "editor::UnfoldRecursive",
"z f": "editor::FoldSelectedRanges",
"z M": "editor::FoldAll",
"z R": "editor::UnfoldAll",
"shift-z shift-q": ["pane::CloseActiveItem", { "saveIntent": "skip" }],
"shift-z shift-z": ["pane::CloseActiveItem", { "saveIntent": "saveAll" }],
// Count support
@@ -220,7 +212,7 @@
"shift-d": "vim::DeleteToEndOfLine",
"shift-j": "vim::JoinLines",
"y": ["vim::PushOperator", "Yank"],
"shift-y": "vim::YankLine",
"shift-y": "vim::YankToEndOfLine",
"i": "vim::InsertBefore",
"shift-i": "vim::InsertFirstNonWhitespace",
"a": "vim::InsertAfter",
@@ -245,8 +237,6 @@
"g shift-u": ["vim::PushOperator", "Uppercase"],
"g ~": ["vim::PushOperator", "OppositeCase"],
"\"": ["vim::PushOperator", "Register"],
"g q": ["vim::PushOperator", "Rewrap"],
"g w": ["vim::PushOperator", "Rewrap"],
"q": "vim::ToggleRecord",
"shift-q": "vim::ReplayLastRecording",
"@": ["vim::PushOperator", "ReplayRegister"],
@@ -298,8 +288,6 @@
"g ctrl-x": ["vim::Decrement", { "step": true }],
"shift-i": "vim::InsertBefore",
"shift-a": "vim::InsertAfter",
"g I": "vim::VisualInsertFirstNonWhiteSpace",
"g A": "vim::VisualInsertEndOfLine",
"shift-j": "vim::JoinLines",
"r": ["vim::PushOperator", "Replace"],
"ctrl-c": ["vim::SwitchMode", "Normal"],
@@ -310,7 +298,6 @@
"i": ["vim::PushOperator", { "Object": { "around": false } }],
"a": ["vim::PushOperator", { "Object": { "around": true } }],
"g c": "vim::ToggleComments",
"g q": "vim::Rewrap",
"\"": ["vim::PushOperator", "Register"],
// tree-sitter related commands
"[ x": "editor::SelectLargerSyntaxNode",
@@ -337,13 +324,6 @@
"ctrl-r": ["vim::PushOperator", "Register"]
}
},
{
"context": "vim_mode == insert && !(showing_code_actions || showing_completions)",
"bindings": {
"ctrl-p": "editor::ShowCompletions",
"ctrl-n": "editor::ShowCompletions"
}
},
{
"context": "vim_mode == replace",
"bindings": {
@@ -412,9 +392,7 @@
"context": "vim_operator == d",
"bindings": {
"d": "vim::CurrentLine",
"s": ["vim::PushOperator", "DeleteSurrounds"],
"o": "editor::ToggleHunkDiff", // "d o"
"p": "editor::RevertSelectedHunks" // "d p"
"s": ["vim::PushOperator", "DeleteSurrounds"]
}
},
{
@@ -438,15 +416,6 @@
"~": "vim::CurrentLine"
}
},
{
"context": "vim_operator == gq",
"bindings": {
"g q": "vim::CurrentLine",
"q": "vim::CurrentLine",
"g w": "vim::CurrentLine",
"w": "vim::CurrentLine"
}
},
{
"context": "vim_operator == y",
"bindings": {
@@ -513,7 +482,6 @@
"v": "project_panel::OpenPermanent",
"p": "project_panel::Open",
"x": "project_panel::RevealInFileManager",
"s": "project_panel::OpenWithSystem",
"shift-g": "menu::SelectLast",
"g g": "menu::SelectFirst",
"-": "project_panel::SelectParent",
@@ -521,7 +489,7 @@
}
},
{
"context": "OutlinePanel && not_editing",
"context": "OutlinePanel",
"bindings": {
"j": "menu::SelectNext",
"k": "menu::SelectPrev",

View File

@@ -47,17 +47,6 @@ And here's the section to rewrite based on that prompt again for reference:
<rewrite_this>
{{{rewrite_section}}}
</rewrite_this>
{{#if diagnostic_errors}}
{{#each diagnostic_errors}}
<diagnostic_error>
<line_number>{{line_number}}</line_number>
<error_message>{{error_message}}</error_message>
<code_content>{{code_content}}</code_content>
</diagnostic_error>
{{/each}}
{{/if}}
{{/if}}
Only make changes that are necessary to fulfill the prompt, leave everything else as-is. All surrounding {{content_type}} will be preserved.

View File

@@ -1,312 +1,302 @@
<task_description>
<workflow>
Guide the user through code changes in numbered steps that focus on individual functions, type definitions, etc.
Surround each distinct step in a <step></step> XML tag. The user will be performing these steps in a code editor
named Zed, which is where they will have entered this prompt and will be seeing the response.
# Code Change Workflow
<instructions>
- Use the language of the file for code fence blocks unless otherwise specified.
- Include a code or file action in each step.
- Only put code in separate steps if it should either go in separate files, or in different (non-contiguous) places in the same file.
- Provide error handling and input validation where appropriate.
- Adapt explanations based on the user's perceived level of expertise.
- Include comments in code examples to enhance understanding.
- Consider more complex programming scenarios when relevant.
- Avoid using the terminal to perform filesystem operations such as creating, deleting, renaming, and editing files. Assume the user prefers to use Zed for these operations, unless the prompt suggests that the user is expecting terminal commands.
- When creating a new file, Zed will automatically create any necessary directories in that file's path. So when a step will create a new file in Zed, don't mention needing to create the directory that it will go in.
- Assume the user is performing these actions in the Zed code editor, so avoid redundancies like "In your code editor, ..." or "In Zed, ..."
- Be concise without leaving out important information.
</instructions>
Your task is to guide the user through code changes using a series of steps. Each step should describe a high-level change, which can consist of multiple edits to distinct locations in the codebase.
Examples of user-assistant dialogs with step numbers and user follow-up requests:
## Output Example
Provide output as XML, with the following format:
<step>
Update the Person struct to store an age
<example language="rust">
<turn number="1">
<user>Create a Library struct with a vector of Book structs. Include methods to add books and find books by author. Use generics for the book ID.</user>
<assistant>Certainly! Let's create a Library struct with a vector of Book structs, including methods to add books and find books by author, using generics for the book ID:
<step>Define the Book and Library structs
```rust
struct Person {
// existing fields...
age: u8,
height: f32,
// existing fields...
use std::fmt::Display;
struct Book<T>
where
T: Display + Clone
{
id: T,
title: String,
author: String,
}
impl Person {
fn age(&self) -> u8 {
self.age
struct Library<T>
where
T: Display + Clone
{
books: Vec<Book<T>>,
}
```
</step>
<step>Implement methods for the Library struct
```rust
impl<T> Library<T>
where
T: Display + Clone
{
fn new() -> Self {
Library { books: Vec::new() }
}
fn add_book(&mut self, id: T, title: String, author: String) {
let book = Book { id, title, author };
self.books.push(book);
}
fn find_books_by_author(&self, author: &str) -> Vec<&Book<T>> {
self.books
.iter()
.filter(|book| book.author.to_lowercase() == author.to_lowercase())
.collect()
}
}
```
<edit>
<path>src/person.rs</path>
<operation>insert_before</operation>
<search>height: f32,</search>
<description>Add the age field</description>
</edit>
<edit>
<path>src/person.rs</path>
<operation>insert_after</operation>
<search>impl Person {</search>
<description>Add the age getter</description>
</edit>
</step>
## Output Format
This code defines:
1. A generic Book struct with an ID of type T, title, and author
2. A Library struct containing a vector of Books
3. Methods to create a new Library, add books, and find books by author
The use of generics allows flexibility in the type of ID used for books.</assistant>
</turn>
First, each `<step>` must contain a written description of the change that should be made. The description should begin with a high-level overview, and can contain markdown code blocks as well. The description should be self-contained and actionable.
After the description, each `<step>` must contain one or more `<edit>` tags, each of which refer to a specific range in a source file. Each `<edit>` tag must contain the following child tags:
### `<path>` (required)
This tag contains the path to the file that will be changed. It can be an existing path, or a path that should be created.
### `<search>` (optional)
This tag contains a search string to locate in the source file, e.g. `pub fn baz() {`. If not provided, the new content will be inserted at the top of the file. Make sure to produce a string that exists in the source file and that isn't ambiguous. When there's ambiguity, add more lines to the search to eliminate it.
### `<description>` (required)
This tag contains a single-line description of the edit that should be made at the given location.
### `<operation>` (required)
This tag indicates what type of change should be made, relative to the given location. It can be one of the following:
- `update`: Rewrites the specified string entirely based on the given description.
- `create`: Creates a new file with the given path based on the provided description.
- `insert_before`: Inserts new text based on the given description before the specified search string.
- `insert_after`: Inserts new text based on the given description after the specified search string.
- `delete`: Deletes the specified string from the containing file.
<guidelines>
- There's no need to describe *what* to do, just *where* to do it.
- Only reference locations that actually exist (unless you're creating a file).
- If creating a file, assume any subsequent updates are included at the time of creation.
- Don't create and then update a file. Always create new files in one hot.
- Prefer multiple edits to smaller regions, as opposed to one big edit to a larger region.
- Don't produce edits that intersect each other. In that case, merge them into a bigger edit.
- Never nest an edit with another edit. Never include CDATA. All edits are leaf nodes.
- Descriptions are required for all edits except delete.
- When generating multiple edits, ensure the descriptions are specific to each individual operation.
- Avoid referring to the search string in the description. Focus on the change to be made, not the location where it's made. That's implicit with the `search` string you provide.
- Don't generate multiple edits at the same location. Instead, combine them together in a single edit with a succinct combined description.
- Always ensure imports are added if you're referencing symbols that are not in scope.
</guidelines>
Here are some concrete examples.
<example>
<message role="user">
```rs src/shapes.rs
pub mod rectangle;
pub mod circle;
```
```rs src/shapes/rectangle.rs
pub struct Rectangle {
width: f64,
height: f64,
}
impl Rectangle {
pub fn new(width: f64, height: f64) -> Self {
Rectangle { width, height }
}
}
```
```rs src/shapes/circle.rs
pub struct Circle {
radius: f64,
}
impl Circle {
pub fn new(radius: f64) -> Self {
Circle { radius }
}
}
```
Update all shapes to store their origin as an (x, y) tuple and implement Display.
</message>
<message role="assistant">
We'll need to update both the rectangle and circle modules.
<step>
Add origin fields to both shape types.
<turn number="2">
<user>How can I use this Library struct in a main function?</user>
<assistant>Here's how you can use the Library struct in a main function:
<step>Create a main function to demonstrate Library usage
```rust
struct Rectangle {
// existing fields ...
origin: (f64, f64),
fn main() {
// Create a new Library with string IDs
let mut library = Library::new();
// Add some books
library.add_book(String::from("001"), String::from("The Rust Programming Language"), String::from("Steve Klabnik"));
library.add_book(String::from("002"), String::from("Programming Rust"), String::from("Jim Blandy"));
library.add_book(String::from("003"), String::from("Rust in Action"), String::from("Tim McNamara"));
// Find books by author
let author = "Steve Klabnik";
let books = library.find_books_by_author(author);
println!("Books by {}:", author);
for book in books {
println!("ID: {}, Title: {}", book.id, book.title);
}
}
```
```rust
struct Circle {
// existing fields ...
origin: (f64, f64),
}
```
<edit>
<path>src/shapes/rectangle.rs</path>
<operation>insert_before</operation>
<search>
width: f64,
height: f64,
</search>
<description>Add the origin field to Rectangle</description>
</edit>
<edit>
<path>src/shapes/circle.rs</path>
<operation>insert_before</operation>
<search>
radius: f64,
</search>
<description>Add the origin field to Circle</description>
</edit>
<step>
Update both shape's constructors to take an origin.
<edit>
<path>src/shapes/rectangle.rs</path>
<operation>update</operation>
<search>
fn new(width: f64, height: f64) -> Self {
Rectangle { width, height }
}
</search>
<description>Update the Rectangle new function to take an origin</description>
</edit>
<edit>
<path>src/shapes/circle.rs</path>
<operation>update</operation>
<search>
fn new(radius: f64) -> Self {
Circle { radius }
}
</search>
<description>Update the Circle new function to take an origin</description>
</edit>
</step>
<step>
Implement Display for both shapes
This main function demonstrates:
1. Creating a new Library instance
2. Adding books with string IDs
3. Finding and displaying books by a specific author</assistant>
</turn>
<edit>
<path>src/shapes/rectangle.rs</path>
<operation>insert_before</operation>
<search>
struct Rectangle {
</search>
<description>Add an import for the `std::fmt` module</description>
</edit>
<turn number="3">
<user>Can you add a method to remove a book by ID and handle potential errors?</user>
<assistant>Certainly! Let's add a method to remove a book by ID and handle potential errors:
<edit>
<path>src/shapes/rectangle.rs</path>
<operation>insert_after</operation>
<search>
Rectangle { width, height }
<step>Add a remove_book method to the Library struct
```rust
use std::fmt::Display;
use std::error::Error;
use std::fmt;
#[derive(Debug)]
struct BookNotFoundError;
impl Error for BookNotFoundError {}
impl fmt::Display for BookNotFoundError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Book not found")
}
}
</search>
<description>Add a Display implementation for Rectangle</description>
</edit>
<edit>
<path>src/shapes/circle.rs</path>
<operation>insert_before</operation>
<search>
struct Circle {
</search>
<description>Add an import for the `std::fmt` module</description>
</edit>
impl<T> Library<T>
where
T: Display + Clone + PartialEq
{
// ... (previous methods remain the same)
<edit>
<path>src/shapes/circle.rs</path>
<operation>insert_after</operation>
<search>
Circle { radius }
fn remove_book(&mut self, id: &T) -> Result<Book<T>, BookNotFoundError> {
if let Some(index) = self.books.iter().position(|book| &book.id == id) {
Ok(self.books.remove(index))
} else {
Err(BookNotFoundError)
}
}
}
</search>
<description>Add a Display implementation for Circle</description>
</edit>
```
</step>
</message>
<step>Update the main function to demonstrate book removal
```rust
fn main() {
// ... (previous code remains the same)
// Attempt to remove a book
let id_to_remove = String::from("002");
match library.remove_book(&id_to_remove) {
Ok(removed_book) => println!("Removed book: {} by {}", removed_book.title, removed_book.author),
Err(e) => println!("Error removing book: {}", e),
}
// Attempt to remove a non-existent book
let non_existent_id = String::from("999");
match library.remove_book(&non_existent_id) {
Ok(_) => println!("Book removed successfully"),
Err(e) => println!("Error removing book: {}", e),
}
// Display remaining books
println!("\nRemaining books:");
for book in &library.books {
println!("ID: {}, Title: {} by {}", book.id, book.title, book.author);
}
}
```
</step>
This update includes:
1. A new remove_book method that returns a Result type
2. A custom BookNotFoundError for error handling
3. Demonstration of successful book removal and error handling for non-existent books
4. Display of remaining books after removal attempts
The remove_book method uses the PartialEq trait to compare book IDs, allowing for flexible ID types while maintaining type safety.</assistant>
</turn>
</example>
<example number=2>
<message role="user">
<example language="go">
<turn number="1">
<user>Create a Person struct with name, age, and email fields, and a method to greet. Include error handling for age.</user>
<assistant>Certainly! Let's create a Person struct with the specified fields and a method to greet in Go:
```rs src/user.rs
struct User {
pub name: String,
age: u32,
email: String,
<step>Define the Person struct and NewPerson function with error handling
```go
type Person struct {
Name string
Age int
Email string
}
impl User {
fn new(name: String, age: u32, email: String) -> Self {
User { name, age, email }
func NewPerson(name string, age int, email string) (*Person, error) {
if age < 0 || age > 150 {
return nil, fmt.Errorf("invalid age: %d", age)
}
return &Person{Name: name, Age: age, Email: email}, nil
}
```
</step>
<step>Implement the Greet method for the Person struct
```go
func (p *Person) Greet() string {
return fmt.Sprintf("Hello, I'm %s, %d years old. You can reach me at %s.", p.Name, p.Age, p.Email)
}
```
</step>
This code defines a Person struct, a NewPerson function with age validation, and a Greet method.</assistant>
</turn>
<turn number="2">
<user>How can I use these in a main function?</user>
<assistant>Here's how to use the Person struct and its methods in a main function:
<step>Create a main function with error handling
```go
func main() {
// Create a new Person instance
person, err := NewPerson("Alice", 30, "alice@example.com")
if err != nil {
fmt.Println("Error creating person:", err)
return
}
pub fn print_info(&self) {
todo!()
// Use the Greet method
fmt.Println(person.Greet())
// Demonstrate error handling with invalid age
invalidPerson, err := NewPerson("Bob", 200, "bob@example.com")
if err != nil {
fmt.Println("Error creating person with invalid age:", err)
}
}
```
</step>
Let's print all the user information and delete the email field.
</message>
This main function demonstrates:
1. Creating a valid Person instance
2. Using the Greet method
3. Handling errors when creating a Person with an invalid age</assistant>
</turn>
<message role="assistant">
<step>
Update the 'print_info' method to print user information
<turn number="3">
<user>Can you add a method to update the email with validation?</user>
<assistant>Certainly! Let's add a method to update the email with validation:
```rust
impl User {
// ... other methods ...
<step>Add UpdateEmail method with validation
```go
import (
"fmt"
"regexp"
)
pub fn print_info(&self) {
println!("Name: {name}, Age: {age}", name = self.name, age = self.age);
func (p *Person) UpdateEmail(newEmail string) error {
// Simple email validation using regex
emailRegex := regexp.MustCompile(`^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$`)
if !emailRegex.MatchString(newEmail) {
return fmt.Errorf("invalid email format: %s", newEmail)
}
p.Email = newEmail
return nil
}
```
</step>
<step>Update the main function to demonstrate email update
```go
func main() {
// ... (previous code remains the same)
// Demonstrate email update
err = person.UpdateEmail("alice.new@example.com")
if err != nil {
fmt.Println("Error updating email:", err)
} else {
fmt.Println("Updated greeting:", person.Greet())
}
// Demonstrate email update with invalid email
err = person.UpdateEmail("invalid-email")
if err != nil {
fmt.Println("Error updating email:", err)
}
}
```
<edit>
<path>src/user.rs</path>
<operation>update</operation>
<search>
pub fn print_info(&self) {
todo!()
}
</search>
<description>Print all the user information</description>
</edit>
</step>
<step>
Remove the 'email' field from the User struct
<edit>
<path>src/user.rs</path>
<operation>delete</operation>
<search>
email: String,
</search>
</edit>
<edit>
<path>src/user.rs</path>
<operation>update</operation>
<symbol>
fn new(name: String, age: u32, email: String) -> Self {
User { name, age, email }
}
</symbol>
<description>Remove email parameter from new method</description>
</edit>
</step>
</message>
This update includes:
1. An UpdateEmail method with email format validation
2. Demonstration of successful email update in the main function
3. Handling of invalid email update attempt</assistant>
</turn>
</example>
You should think step by step. When possible, produce smaller, coherent logical steps as opposed to one big step that combines lots of heterogeneous edits.
</task_description>
</workflow>

View File

@@ -1,8 +0,0 @@
A software developer is asking a question about their project. The source files in their project have been indexed into a database of semantic text embeddings.
Your task is to generate a list of 4 diverse search queries that can be run on this embedding database, in order to retrieve a list of code snippets
that are relevant to the developer's question. Redundant search queries will be heavily penalized, so only include another query if it's sufficiently
distinct from previous ones.
Here is the question that's been asked, together with context that the developer has added manually:
{{{context_buffer}}}

View File

@@ -15,11 +15,9 @@
// text editor:
//
// 1. "VSCode"
// 2. "Atom"
// 3. "JetBrains"
// 4. "None"
// 5. "SublimeText"
// 6. "TextMate"
// 2. "JetBrains"
// 3. "SublimeText"
// 4. "Atom"
"base_keymap": "VSCode",
// Features that can be globally enabled or disabled
"features": {
@@ -113,18 +111,6 @@
"use_system_path_prompts": true,
// Whether the cursor blinks in the editor.
"cursor_blink": true,
// Cursor shape for the default editor.
// 1. A vertical bar
// "bar"
// 2. A block that surrounds the following character
// "block"
// 3. An underline that runs along the following character
// "underscore"
// 4. A box drawn around the following character
// "hollow"
//
// Default: not set, defaults to "bar"
"cursor_shape": null,
// How to highlight the current line in the editor.
//
// 1. Don't highlight the current line:
@@ -241,8 +227,6 @@
// Whether to show diagnostic indicators in the scrollbar.
"diagnostics": true
},
// Enable middle-click paste on Linux.
"middle_click_paste": true,
// What to do when multibuffer is double clicked in some of its excerpts
// (parts of singleton buffers).
// May take 2 values:
@@ -293,13 +277,6 @@
"relative_line_numbers": false,
// If 'search_wrap' is disabled, search result do not wrap around the end of the file.
"search_wrap": true,
// Search options to enable by default when opening new project and buffer searches.
"search": {
"whole_word": false,
"case_sensitive": false,
"include_ignored": false,
"regex": false
},
// When to populate a new search's query based on the text under the cursor.
// This setting can take the following three values:
//
@@ -310,7 +287,6 @@
// 3. Never populate the search query
// "never"
"seed_search_query_from_cursor": "always",
"use_smartcase_search": false,
// Inlay hint related settings
"inlay_hints": {
// Global switch to toggle hints on and off, switched off by default.
@@ -320,10 +296,6 @@
"show_parameter_hints": true,
// Corresponds to null/None LSP hint type value.
"show_other_hints": true,
// Whether to show a background for inlay hints.
//
// If set to `true`, the background will use the `hint.background` color from the current theme.
"show_background": false,
// Time to wait after editing the buffer, before requesting the hints,
// set to 0 to disable debouncing.
"edit_debounce_ms": 700,
@@ -498,11 +470,6 @@
// Whether a preview tab gets replaced when code navigation is used to navigate away from the tab.
"enable_preview_from_code_navigation": false
},
// Settings related to the file finder.
"file_finder": {
// Whether to show file icons in the file finder.
"file_icons": true
},
// Whether or not to remove any trailing whitespace from lines of a buffer
// before saving it.
"remove_trailing_whitespace_on_save": true,
@@ -535,16 +502,17 @@
// How to soft-wrap long lines of text.
// Possible values:
//
// 1. Prefer a single line generally, unless an overly long line is encountered.
// 1. Do not soft wrap.
// "soft_wrap": "none",
// "soft_wrap": "prefer_line", // (deprecated, same as "none")
// 2. Soft wrap lines that overflow the editor.
// 2. Prefer a single line generally, unless an overly long line is encountered.
// "soft_wrap": "prefer_line",
// 3. Soft wrap lines that overflow the editor.
// "soft_wrap": "editor_width",
// 3. Soft wrap lines at the preferred line length.
// 4. Soft wrap lines at the preferred line length.
// "soft_wrap": "preferred_line_length",
// 4. Soft wrap lines at the preferred line length or the editor width (whichever is smaller).
// 5. Soft wrap lines at the preferred line length or the editor width (whichever is smaller).
// "soft_wrap": "bounded",
"soft_wrap": "none",
"soft_wrap": "prefer_line",
// The column at which to soft-wrap lines, for buffers where soft-wrap
// is enabled.
"preferred_line_length": 80,
@@ -599,11 +567,13 @@
}
},
// Configuration for how direnv configuration should be loaded. May take 2 values:
// 1. Load direnv configuration using `direnv export json` directly.
// "load_direnv": "direct"
// 2. Load direnv configuration through the shell hook, works for POSIX shells and fish.
// 1. Load direnv configuration through the shell hook, works for POSIX shells and fish.
// "load_direnv": "shell_hook"
"load_direnv": "direct",
// 2. Load direnv configuration using `direnv export json` directly.
// This can help with some shells that otherwise would not detect
// the direnv environment, such as nushell or elvish.
// "load_direnv": "direct"
"load_direnv": "shell_hook",
"inline_completions": {
// A list of globs representing files that inline completions should be disabled for.
"disabled_globs": [".env"]
@@ -631,7 +601,7 @@
// "shell": {
// "with_arguments": {
// "program": "/bin/bash",
// "args": ["--login"]
// "arguments": ["--login"]
// }
// }
"shell": "system",
@@ -669,18 +639,6 @@
// 3. Always blink the cursor, ignoring the terminal mode
// "blinking": "on",
"blinking": "terminal_controlled",
// Default cursor shape for the terminal.
// 1. A block that surrounds the following character
// "block"
// 2. A vertical bar
// "bar"
// 3. An underline that runs along the following character
// "underscore"
// 4. A box drawn around the following character
// "hollow"
//
// Default: not set, defaults to "block"
"cursor_shape": null,
// Set whether Alternate Scroll mode (code: ?1007) is active by default.
// Alternate Scroll mode converts mouse scroll events into up / down key
// presses when in the alternate screen (e.g. when running applications
@@ -730,7 +688,7 @@
// to the current working directory. We recommend overriding this
// in your project's settings, rather than globally.
"directories": [".env", "env", ".venv", "venv"],
// Can also be `csh`, `fish`, `nushell` and `power_shell`
// Can also be `csh`, `fish`, and `nushell`
"activate_script": "default"
}
},
@@ -771,7 +729,6 @@
// }
//
"file_types": {
"Plain Text": ["txt"],
"JSON": ["flake.lock"],
"JSONC": [
"**/.zed/**/*.json",
@@ -779,24 +736,8 @@
"**/Zed/**/*.json",
"tsconfig.json",
"pyrightconfig.json"
],
"TOML": ["uv.lock"]
]
},
/// By default use a recent system version of node, or install our own.
/// You can override this to use a version of node that is not in $PATH with:
/// {
/// "node": {
/// "node_path": "/path/to/node"
/// "npm_path": "/path/to/npm" (defaults to node_path/../npm)
/// }
/// }
/// or to ensure Zed always downloads and installs an isolated version of node:
/// {
/// "node": {
/// "ignore_system_version": true,
/// }
/// NOTE: changing this setting currently requires restarting Zed.
"node": {},
// The extensions that Zed should automatically install on startup.
//
// If you don't want any of these extensions, add this field to your settings
@@ -965,8 +906,7 @@
},
"openai": {
"version": "1",
"api_url": "https://api.openai.com/v1",
"low_speed_timeout_in_seconds": 600
"api_url": "https://api.openai.com/v1"
}
},
// Zed's Prettier integration settings.
@@ -1062,7 +1002,7 @@
// environment variables.
//
// Examples:
// - "proxy": "socks5h://localhost:10808"
// - "proxy": "socks5://localhost:10808"
// - "proxy": "http://127.0.0.1:10809"
"proxy": null,
// Set to configure aliases for the command palette.

View File

@@ -35,7 +35,7 @@
// "shell": {
// "with_arguments": {
// "program": "/bin/bash",
// "args": ["--login"]
// "arguments": ["--login"]
// }
// }
"shell": "system"

View File

@@ -5,7 +5,7 @@
//
// To see all of Zed's default settings without changing your
// custom settings, run `zed: open default settings` from the
// command palette (cmd-shift-p / ctrl-shift-p)
// command palette
{
"ui_font_size": 16,
"buffer_font_size": 16,

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Andromeda",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Atelier",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Ayu",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Gruvbox",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "One",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Rosé Pine",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Sandcastle",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Solarized",
"author": "Zed Industries",
"themes": [

View File

@@ -1,5 +1,4 @@
{
"$schema": "https://zed.dev/schema/themes/v0.1.0.json",
"name": "Summercamp",
"author": "Zed Industries",
"themes": [

View File

@@ -1 +0,0 @@
allow-private-module-inception = true

View File

@@ -19,10 +19,7 @@ use workspace::{item::ItemHandle, StatusItemView, Workspace};
actions!(activity_indicator, [ShowErrorMessage]);
pub enum Event {
ShowError {
lsp_name: LanguageServerName,
error: String,
},
ShowError { lsp_name: Arc<str>, error: String },
}
pub struct ActivityIndicator {
@@ -126,7 +123,7 @@ impl ActivityIndicator {
self.statuses.retain(|status| {
if let LanguageServerBinaryStatus::Failed { error } = &status.status {
cx.emit(Event::ShowError {
lsp_name: status.name.clone(),
lsp_name: status.name.0.clone(),
error: error.clone(),
});
false
@@ -153,7 +150,7 @@ impl ActivityIndicator {
) -> impl Iterator<Item = PendingWork<'a>> {
self.project
.read(cx)
.language_server_statuses(cx)
.language_server_statuses()
.rev()
.filter_map(|(server_id, status)| {
if status.pending_work.is_empty() {
@@ -227,10 +224,10 @@ impl ActivityIndicator {
for status in &self.statuses {
match status.status {
LanguageServerBinaryStatus::CheckingForUpdate => {
checking_for_update.push(status.name.clone())
checking_for_update.push(status.name.0.as_ref())
}
LanguageServerBinaryStatus::Downloading => downloading.push(status.name.clone()),
LanguageServerBinaryStatus::Failed { .. } => failed.push(status.name.clone()),
LanguageServerBinaryStatus::Downloading => downloading.push(status.name.0.as_ref()),
LanguageServerBinaryStatus::Failed { .. } => failed.push(status.name.0.as_ref()),
LanguageServerBinaryStatus::None => {}
}
}
@@ -242,24 +239,8 @@ impl ActivityIndicator {
.size(IconSize::Small)
.into_any_element(),
),
message: format!(
"Downloading {}...",
downloading.iter().map(|name| name.0.as_ref()).fold(
String::new(),
|mut acc, s| {
if !acc.is_empty() {
acc.push_str(", ");
}
acc.push_str(s);
acc
}
)
),
on_click: Some(Arc::new(move |this, cx| {
this.statuses
.retain(|status| !downloading.contains(&status.name));
this.dismiss_error_message(&DismissErrorMessage, cx)
})),
message: format!("Downloading {}...", downloading.join(", "),),
on_click: None,
});
}
@@ -272,44 +253,22 @@ impl ActivityIndicator {
),
message: format!(
"Checking for updates to {}...",
checking_for_update.iter().map(|name| name.0.as_ref()).fold(
String::new(),
|mut acc, s| {
if !acc.is_empty() {
acc.push_str(", ");
}
acc.push_str(s);
acc
}
),
checking_for_update.join(", "),
),
on_click: Some(Arc::new(move |this, cx| {
this.statuses
.retain(|status| !checking_for_update.contains(&status.name));
this.dismiss_error_message(&DismissErrorMessage, cx)
})),
on_click: None,
});
}
if !failed.is_empty() {
return Some(Content {
icon: Some(
Icon::new(IconName::Warning)
Icon::new(IconName::ExclamationTriangle)
.size(IconSize::Small)
.into_any_element(),
),
message: format!(
"Failed to run {}. Click to show error.",
failed
.iter()
.map(|name| name.0.as_ref())
.fold(String::new(), |mut acc, s| {
if !acc.is_empty() {
acc.push_str(", ");
}
acc.push_str(s);
acc
}),
"Failed to download {}. Click to show error.",
failed.join(", "),
),
on_click: Some(Arc::new(|this, cx| {
this.show_error_message(&Default::default(), cx)
@@ -318,10 +277,10 @@ impl ActivityIndicator {
}
// Show any formatting failure
if let Some(failure) = self.project.read(cx).last_formatting_failure(cx) {
if let Some(failure) = self.project.read(cx).last_formatting_failure() {
return Some(Content {
icon: Some(
Icon::new(IconName::Warning)
Icon::new(IconName::ExclamationTriangle)
.size(IconSize::Small)
.into_any_element(),
),
@@ -342,9 +301,7 @@ impl ActivityIndicator {
.into_any_element(),
),
message: "Checking for Zed updates…".to_string(),
on_click: Some(Arc::new(|this, cx| {
this.dismiss_error_message(&DismissErrorMessage, cx)
})),
on_click: None,
}),
AutoUpdateStatus::Downloading => Some(Content {
icon: Some(
@@ -353,9 +310,7 @@ impl ActivityIndicator {
.into_any_element(),
),
message: "Downloading Zed update…".to_string(),
on_click: Some(Arc::new(|this, cx| {
this.dismiss_error_message(&DismissErrorMessage, cx)
})),
on_click: None,
}),
AutoUpdateStatus::Installing => Some(Content {
icon: Some(
@@ -364,9 +319,7 @@ impl ActivityIndicator {
.into_any_element(),
),
message: "Installing Zed update…".to_string(),
on_click: Some(Arc::new(|this, cx| {
this.dismiss_error_message(&DismissErrorMessage, cx)
})),
on_click: None,
}),
AutoUpdateStatus::Updated { binary_path } => Some(Content {
icon: None,
@@ -380,13 +333,13 @@ impl ActivityIndicator {
}),
AutoUpdateStatus::Errored => Some(Content {
icon: Some(
Icon::new(IconName::Warning)
Icon::new(IconName::ExclamationTriangle)
.size(IconSize::Small)
.into_any_element(),
),
message: "Auto update failed".to_string(),
on_click: Some(Arc::new(|this, cx| {
this.dismiss_error_message(&DismissErrorMessage, cx)
this.dismiss_error_message(&Default::default(), cx)
})),
}),
AutoUpdateStatus::Idle => None,
@@ -404,9 +357,7 @@ impl ActivityIndicator {
.into_any_element(),
),
message: format!("Updating {extension_id} extension…"),
on_click: Some(Arc::new(|this, cx| {
this.dismiss_error_message(&DismissErrorMessage, cx)
})),
on_click: None,
});
}
}

View File

@@ -20,6 +20,7 @@ anyhow.workspace = true
chrono.workspace = true
futures.workspace = true
http_client.workspace = true
isahc.workspace = true
schemars = { workspace = true, optional = true }
serde.workspace = true
serde_json.workspace = true

View File

@@ -1,21 +1,21 @@
mod supported_countries;
use std::time::Duration;
use std::{pin::Pin, str::FromStr};
use anyhow::{anyhow, Context, Result};
use chrono::{DateTime, Utc};
use futures::{io::BufReader, stream::BoxStream, AsyncBufReadExt, AsyncReadExt, Stream, StreamExt};
use http_client::http::{HeaderMap, HeaderValue};
use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Request as HttpRequest};
use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
use isahc::config::Configurable;
use isahc::http::{HeaderMap, HeaderValue};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use std::{pin::Pin, str::FromStr};
use strum::{EnumIter, EnumString};
use thiserror::Error;
use util::ResultExt as _;
pub use supported_countries::*;
pub const ANTHROPIC_API_URL: &str = "https://api.anthropic.com";
pub const ANTHROPIC_API_URL: &'static str = "https://api.anthropic.com";
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
@@ -48,7 +48,6 @@ pub enum Model {
/// Indicates whether this custom model supports caching.
cache_configuration: Option<AnthropicModelCacheConfiguration>,
max_output_tokens: Option<u32>,
default_temperature: Option<f32>,
},
}
@@ -124,19 +123,6 @@ impl Model {
}
}
pub fn default_temperature(&self) -> f32 {
match self {
Self::Claude3_5Sonnet
| Self::Claude3Opus
| Self::Claude3Sonnet
| Self::Claude3Haiku => 1.0,
Self::Custom {
default_temperature,
..
} => default_temperature.unwrap_or(1.0),
}
}
pub fn tool_model_id(&self) -> &str {
if let Self::Custom {
tool_override: Some(tool_override),
@@ -288,7 +274,7 @@ pub async fn stream_completion_with_rate_limit_info(
.header("X-Api-Key", api_key)
.header("Content-Type", "application/json");
if let Some(low_speed_timeout) = low_speed_timeout {
request_builder = request_builder.read_timeout(low_speed_timeout);
request_builder = request_builder.low_speed_timeout(100, low_speed_timeout);
}
let serialized_request =
serde_json::to_string(&request).context("failed to serialize request")?;
@@ -344,6 +330,28 @@ pub async fn stream_completion_with_rate_limit_info(
}
}
pub fn extract_text_from_events(
response: impl Stream<Item = Result<Event, AnthropicError>>,
) -> impl Stream<Item = Result<String, AnthropicError>> {
response.filter_map(|response| async move {
match response {
Ok(response) => match response {
Event::ContentBlockStart { content_block, .. } => match content_block {
Content::Text { text, .. } => Some(Ok(text)),
_ => None,
},
Event::ContentBlockDelta { delta, .. } => match delta {
ContentDelta::TextDelta { text } => Some(Ok(text)),
_ => None,
},
Event::Error { error } => Some(Err(AnthropicError::ApiError(error))),
_ => None,
},
Err(error) => Some(Err(error)),
}
})
}
pub async fn extract_tool_args_from_events(
tool_name: String,
mut events: Pin<Box<dyn Send + Stream<Item = Result<Event>>>>,
@@ -352,12 +360,14 @@ pub async fn extract_tool_args_from_events(
while let Some(event) = events.next().await {
if let Event::ContentBlockStart {
index,
content_block: ResponseContent::ToolUse { name, .. },
content_block,
} = event?
{
if name == tool_name {
tool_use_index = Some(index);
break;
if let Content::ToolUse { name, .. } = content_block {
if name == tool_name {
tool_use_index = Some(index);
break;
}
}
}
}
@@ -401,7 +411,7 @@ pub struct CacheControl {
#[derive(Debug, Serialize, Deserialize)]
pub struct Message {
pub role: Role,
pub content: Vec<RequestContent>,
pub content: Vec<Content>,
}
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash)]
@@ -413,7 +423,7 @@ pub enum Role {
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum RequestContent {
pub enum Content {
#[serde(rename = "text")]
Text {
text: String,
@@ -437,26 +447,12 @@ pub enum RequestContent {
#[serde(rename = "tool_result")]
ToolResult {
tool_use_id: String,
is_error: bool,
content: String,
#[serde(skip_serializing_if = "Option::is_none")]
cache_control: Option<CacheControl>,
},
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum ResponseContent {
#[serde(rename = "text")]
Text { text: String },
#[serde(rename = "tool_use")]
ToolUse {
id: String,
name: String,
input: serde_json::Value,
},
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ImageSource {
#[serde(rename = "type")]
@@ -529,7 +525,7 @@ pub struct Response {
#[serde(rename = "type")]
pub response_type: String,
pub role: Role,
pub content: Vec<ResponseContent>,
pub content: Vec<Content>,
pub model: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub stop_reason: Option<String>,
@@ -546,7 +542,7 @@ pub enum Event {
#[serde(rename = "content_block_start")]
ContentBlockStart {
index: usize,
content_block: ResponseContent,
content_block: Content,
},
#[serde(rename = "content_block_delta")]
ContentBlockDelta { index: usize, delta: ContentDelta },
@@ -621,6 +617,9 @@ impl ApiError {
}
pub fn is_rate_limit_error(&self) -> bool {
matches!(self.error_type.as_str(), "rate_limit_error")
match self.error_type.as_str() {
"rate_limit_error" => true,
_ => false,
}
}
}

View File

@@ -8,7 +8,6 @@ use rust_embed::RustEmbed;
#[folder = "../../assets"]
#[include = "fonts/**/*"]
#[include = "icons/**/*"]
#[include = "images/**/*"]
#[include = "themes/**/*"]
#[exclude = "themes/src/*"]
#[include = "sounds/**/*"]

View File

@@ -25,7 +25,6 @@ anthropic = { workspace = true, features = ["schemars"] }
anyhow.workspace = true
assets.workspace = true
assistant_slash_command.workspace = true
assistant_tool.workspace = true
async-watch.workspace = true
cargo_toml.workspace = true
chrono.workspace = true
@@ -51,7 +50,6 @@ indoc.workspace = true
language.workspace = true
language_model.workspace = true
log.workspace = true
lsp.workspace = true
markdown.workspace = true
menu.workspace = true
multi_buffer.workspace = true
@@ -66,7 +64,6 @@ proto.workspace = true
regex.workspace = true
release_channel.workspace = true
rope.workspace = true
rpc.workspace = true
schemars.workspace = true
search.workspace = true
semantic_index.workspace = true
@@ -76,7 +73,6 @@ settings.workspace = true
similar.workspace = true
smallvec.workspace = true
smol.workspace = true
strum.workspace = true
telemetry_events.workspace = true
terminal.workspace = true
terminal_view.workspace = true
@@ -95,11 +91,9 @@ editor = { workspace = true, features = ["test-support"] }
env_logger.workspace = true
language = { workspace = true, features = ["test-support"] }
language_model = { workspace = true, features = ["test-support"] }
languages = { workspace = true, features = ["test-support"] }
log.workspace = true
project = { workspace = true, features = ["test-support"] }
rand.workspace = true
serde_json_lenient.workspace = true
text = { workspace = true, features = ["test-support"] }
tree-sitter-md.workspace = true
unindent.workspace = true

View File

@@ -0,0 +1,63 @@
> Hello, I'm Nathan Sobo.
I'm currently your sole creator and the leader of Zed Industries.
Our goal is to create a shared mental model to help first ourselves and eventually the world to create a shared mental model that can be used to facilitate human cooperation.
< Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development.
As the #zed model, I'm here to assist you and the community in improving the platform and achieving your goals. Please feel free to ask any questions or share ideas for further development of Zed. Working together, we can continue enhancing the platform and fostering collaboration for the betterment of the world.
> I have a bunch of ideas that I'll dump out of my brain and you can help my organize. Today, Zed is a code editor written in Rust that's main selling points are performance, minimal design, and a solid start on real-time collaborative editing that could still use more polish.
We've decided that our next milestone will be open-sourcing Zed on the Zed platform.
< Nathan, it's exciting to hear that you have more ideas to enhance Zed! Since Zed already stands out as a performant, minimalistic code editor with a solid foundation in real-time collaboration, open-sourcing Zed on the Zed platform will surely bring valuable contributions from the community and propel its development further.
Let's organize your ideas and create a plan for your next milestone. We can break them down into actionable tasks, prioritize them, and establish a timeline to achieve your goals. Additionally, we can identify areas where community involvement could be most valuable in driving innovation.
Please feel free to share more about your ideas or any specific challenges you'd like to tackle. I'm here to help you turn these ideas into a cohesive and successful plan for Zed's future!
@nathan> #zed, can you describe the conversation UI as you currently understand it.
< Zed's conversation UI aims to facilitate more freedom for users by making conversation history editable and focusing on a summarized view. Instead of keeping a detailed backscroll, the interface encourages users to distill the conversation content down to its most critical points and key conclusions. This approach ensures that the vital information from the conversation is highlighted and easily accessible, while still maintaining a history that can be linked to when necessary.
Addressing @nate's concerns, this design choice aims to strike a balance between readability, usability, and information preservation. The summarized view maintains the essence of the conversation without being burdened by extensive backscroll. Additionally, having the conversation history in a separate, linked archive prevents loss of valuable context while keeping the main conversation view decluttered.
@nathan> What details around the conversation view are worth exploring?
< @nathan, there are several aspects of the conversation view that are worth exploring and refining:
1. Editing functionality: Ensuring that all participants can edit and refine the conversation summary effectively while preserving the original context.
2. Permissions and access control: Determining how to manage editing rights, user roles, and visibility of the conversation summary and history.
3. Navigation and linking: Implementing an intuitive system for navigating between the summarized conversation and linked history or reference materials.
4. Visualization and layout: Designing the interface to clearly present the summarized information while providing a visually engaging and organized experience for participants.
5. Threaded conversations and branching: Exploring ways to handle sub-conversations, supplementary discussions, or debates within the primary conversation view.
Considering these aspects will ensure our conversation view design is optimized for usability, collaboration, and accessibility in Zed.
@nathan> @nate, which of these would you like to explore?
@nate> 2 feels like it isn't important at the moment, we can explore that later. Let's start with 4, which I think will lead us to discussion 3 and 5.
#zed share your thoughts on the points we need to consider to design a layout and visualization for a conversation view between you (#zed) and multiple people, or between multiple people and multiple bots (you and other bots).
@nathan> Agreed. I'm interested in threading I think more than anything. Or 4 yeah. I think we need to scope the threading conversation. Also, asking #zed to propose the solution... not sure it will be that effective but it's worth a try...
#zed describe a hypothetical conversation UI that meets the description.
< @nate and @nathan, I understand that you'd like to focus on the layout and visualization for a conversation view that can accommodate multiple people and potentially multiple bots as well. Here's a proposal for a conversation UI that meets the requirements:
1. Main conversation area: Display the summarized conversation content in a clean and organized manner, ensuring readability and efficient use of space.
2. Participant avatars and labels: Include user avatars and/or labels to distinguish between users and bots visually, allowing easy identification of each participant's contributions.
3. Active threads indicator: Designate a clear indicator for ongoing threads, which will attract attention to active sub-conversations or related discussions in the conversation view.
4. Dynamic branching: Implement a branching visual structure to present threaded conversations, clearly distinguishing sub-conversations, and enabling users to navigate between them smoothly.
5. Multiple bot interaction support: Facilitate interactions between multiple bots through user-invoked prompts or bot-to-bot communication channels without disrupting the primary conversation flow.
6. Expandable conversation history: Offer a separate, expandable section or panel to access historical content without cluttering the main conversation view.
7. Linking and referencing: Implement a system for hyperlinking relevant information, resources, or previous conversations to the current discussion, making it easy for participants to follow and reference the shared context.
This hypothetical conversation UI aims to address the layout and visualization, threading, and multi-participant aspects of your requirements. Of course, it's just an initial proposal, and we can refine and iterate on it based on your feedback and specific needs.

View File

@@ -13,13 +13,11 @@ pub(crate) mod slash_command_picker;
pub mod slash_command_settings;
mod streaming_diff;
mod terminal_inline_assistant;
mod tools;
mod workflow;
pub use assistant_panel::{AssistantPanel, AssistantPanelEvent};
use assistant_settings::AssistantSettings;
use assistant_slash_command::SlashCommandRegistry;
use assistant_tool::ToolRegistry;
use client::{proto, Client};
use command_palette_hooks::CommandPaletteFilter;
pub use context::*;
@@ -27,8 +25,8 @@ use context_servers::ContextServerRegistry;
pub use context_store::*;
use feature_flags::FeatureFlagAppExt;
use fs::Fs;
use gpui::Context as _;
use gpui::{actions, AppContext, Global, SharedString, UpdateGlobal};
use gpui::{impl_actions, Context as _};
use indexed_docs::IndexedDocsRegistry;
pub(crate) use inline_assistant::*;
use language_model::{
@@ -37,16 +35,14 @@ use language_model::{
pub(crate) use model_selector::*;
pub use prompts::PromptBuilder;
use prompts::PromptLoadingParams;
use semantic_index::{CloudEmbeddingProvider, SemanticDb};
use semantic_index::{CloudEmbeddingProvider, SemanticIndex};
use serde::{Deserialize, Serialize};
use settings::{update_settings_file, Settings, SettingsStore};
use slash_command::{
auto_command, cargo_workspace_command, context_server_command, default_command, delta_command,
diagnostics_command, docs_command, fetch_command, file_command, now_command, project_command,
prompt_command, search_command, symbols_command, tab_command, terminal_command,
workflow_command,
context_server_command, default_command, diagnostics_command, docs_command, fetch_command,
file_command, now_command, project_command, prompt_command, search_command, symbols_command,
tab_command, terminal_command, workflow_command,
};
use std::path::PathBuf;
use std::sync::Arc;
pub(crate) use streaming_diff::*;
use util::ResultExt;
@@ -59,7 +55,6 @@ actions!(
[
Assist,
Split,
CopyCode,
CycleMessageRole,
QuoteSelection,
InsertIntoEditor,
@@ -68,21 +63,10 @@ actions!(
DeployHistory,
DeployPromptLibrary,
ConfirmCommand,
NewContext,
ToggleModelSelector,
CycleNextInlineAssist,
CyclePreviousInlineAssist
]
);
#[derive(PartialEq, Clone, Deserialize)]
pub enum InsertDraggedFiles {
ProjectPaths(Vec<PathBuf>),
ExternalFiles(Vec<PathBuf>),
}
impl_actions!(assistant, [InsertDraggedFiles]);
const DEFAULT_CONTEXT_LINES: usize = 50;
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
@@ -214,23 +198,21 @@ pub fn init(
let client = client.clone();
async move {
let embedding_provider = CloudEmbeddingProvider::new(client.clone());
let semantic_index = SemanticDb::new(
let semantic_index = SemanticIndex::new(
paths::embeddings_dir().join("semantic-index-db.0.mdb"),
Arc::new(embedding_provider),
&mut cx,
)
.await?;
cx.update(|cx| cx.set_global(semantic_index))
}
})
.detach();
context_store::init(&client.clone().into());
context_store::init(&client);
prompt_library::init(cx);
init_language_model_settings(cx);
assistant_slash_command::init(cx);
assistant_tool::init(cx);
assistant_panel::init(cx);
context_servers::init(cx);
@@ -245,7 +227,6 @@ pub fn init(
.map(Arc::new)
.unwrap_or_else(|| Arc::new(prompts::PromptBuilder::new(None).unwrap()));
register_slash_commands(Some(prompt_builder.clone()), cx);
register_tools(cx);
inline_assistant::init(
fs.clone(),
prompt_builder.clone(),
@@ -362,67 +343,30 @@ fn update_active_language_model_from_settings(cx: &mut AppContext) {
let settings = AssistantSettings::get_global(cx);
let provider_name = LanguageModelProviderId::from(settings.default_model.provider.clone());
let model_id = LanguageModelId::from(settings.default_model.model.clone());
let inline_alternatives = settings
.inline_alternatives
.iter()
.map(|alternative| {
(
LanguageModelProviderId::from(alternative.provider.clone()),
LanguageModelId::from(alternative.model.clone()),
)
})
.collect::<Vec<_>>();
LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
registry.select_active_model(&provider_name, &model_id, cx);
registry.select_inline_alternative_models(inline_alternatives, cx);
});
}
fn register_slash_commands(prompt_builder: Option<Arc<PromptBuilder>>, cx: &mut AppContext) {
let slash_command_registry = SlashCommandRegistry::global(cx);
slash_command_registry.register_command(file_command::FileSlashCommand, true);
slash_command_registry.register_command(delta_command::DeltaSlashCommand, true);
slash_command_registry.register_command(symbols_command::OutlineSlashCommand, true);
slash_command_registry.register_command(tab_command::TabSlashCommand, true);
slash_command_registry
.register_command(cargo_workspace_command::CargoWorkspaceSlashCommand, true);
slash_command_registry.register_command(project_command::ProjectSlashCommand, true);
slash_command_registry.register_command(prompt_command::PromptSlashCommand, true);
slash_command_registry.register_command(default_command::DefaultSlashCommand, false);
slash_command_registry.register_command(terminal_command::TerminalSlashCommand, true);
slash_command_registry.register_command(now_command::NowSlashCommand, false);
slash_command_registry.register_command(diagnostics_command::DiagnosticsSlashCommand, true);
slash_command_registry.register_command(fetch_command::FetchSlashCommand, false);
if let Some(prompt_builder) = prompt_builder {
slash_command_registry.register_command(
workflow_command::WorkflowSlashCommand::new(prompt_builder.clone()),
workflow_command::WorkflowSlashCommand::new(prompt_builder),
true,
);
cx.observe_flag::<project_command::ProjectSlashCommandFeatureFlag, _>({
let slash_command_registry = slash_command_registry.clone();
move |is_enabled, _cx| {
if is_enabled {
slash_command_registry.register_command(
project_command::ProjectSlashCommand::new(prompt_builder.clone()),
true,
);
}
}
})
.detach();
}
cx.observe_flag::<auto_command::AutoSlashCommandFeatureFlag, _>({
let slash_command_registry = slash_command_registry.clone();
move |is_enabled, _cx| {
if is_enabled {
// [#auto-staff-ship] TODO remove this when /auto is no longer staff-shipped
slash_command_registry.register_command(auto_command::AutoCommand, true);
}
}
})
.detach();
slash_command_registry.register_command(fetch_command::FetchSlashCommand, false);
update_slash_commands_from_settings(cx);
cx.observe_global::<SettingsStore>(update_slash_commands_from_settings)
@@ -449,20 +393,13 @@ fn update_slash_commands_from_settings(cx: &mut AppContext) {
slash_command_registry.unregister_command(docs_command::DocsSlashCommand);
}
if settings.cargo_workspace.enabled {
slash_command_registry
.register_command(cargo_workspace_command::CargoWorkspaceSlashCommand, true);
if settings.project.enabled {
slash_command_registry.register_command(project_command::ProjectSlashCommand, true);
} else {
slash_command_registry
.unregister_command(cargo_workspace_command::CargoWorkspaceSlashCommand);
slash_command_registry.unregister_command(project_command::ProjectSlashCommand);
}
}
fn register_tools(cx: &mut AppContext) {
let tool_registry = ToolRegistry::global(cx);
tool_registry.register_tool(tools::now_tool::NowTool);
}
pub fn humanize_token_count(count: usize) -> String {
match count {
0..=999 => count.to_string(),

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,11 @@
use std::sync::Arc;
use ::open_ai::Model as OpenAiModel;
use anthropic::Model as AnthropicModel;
use fs::Fs;
use gpui::{AppContext, Pixels};
use language_model::provider::open_ai;
use language_model::settings::{
AnthropicSettingsContent, AnthropicSettingsContentV1, OllamaSettingsContent,
OpenAiSettingsContent, OpenAiSettingsContentV1, VersionedAnthropicSettingsContent,
VersionedOpenAiSettingsContent,
};
use language_model::{settings::AllLanguageModelSettings, CloudModel, LanguageModel};
use ollama::Model as OllamaModel;
use open_ai::Model as OpenAiModel;
use schemars::{schema::Schema, JsonSchema};
use serde::{Deserialize, Serialize};
use settings::{update_settings_file, Settings, SettingsSources};
@@ -59,7 +53,6 @@ pub struct AssistantSettings {
pub default_width: Pixels,
pub default_height: Pixels,
pub default_model: LanguageModelSelection,
pub inline_alternatives: Vec<LanguageModelSelection>,
pub using_outdated_settings_version: bool,
}
@@ -116,15 +109,16 @@ impl AssistantSettingsContent {
cx,
move |content, _| {
if content.anthropic.is_none() {
content.anthropic = Some(AnthropicSettingsContent::Versioned(
VersionedAnthropicSettingsContent::V1(
AnthropicSettingsContentV1 {
api_url,
low_speed_timeout_in_seconds,
available_models: None,
},
),
));
content.anthropic =
Some(language_model::settings::AnthropicSettingsContent::Versioned(
language_model::settings::VersionedAnthropicSettingsContent::V1(
language_model::settings::AnthropicSettingsContentV1 {
api_url,
low_speed_timeout_in_seconds,
available_models: None
}
)
));
}
},
),
@@ -137,11 +131,11 @@ impl AssistantSettingsContent {
cx,
move |content, _| {
if content.ollama.is_none() {
content.ollama = Some(OllamaSettingsContent {
api_url,
low_speed_timeout_in_seconds,
available_models: None,
});
content.ollama =
Some(language_model::settings::OllamaSettingsContent {
api_url,
low_speed_timeout_in_seconds,
});
}
},
),
@@ -159,32 +153,23 @@ impl AssistantSettingsContent {
models
.into_iter()
.filter_map(|model| match model {
OpenAiModel::Custom {
name,
display_name,
max_tokens,
max_output_tokens,
max_completion_tokens: None,
} => Some(open_ai::AvailableModel {
name,
display_name,
max_tokens,
max_output_tokens,
max_completion_tokens: None,
}),
open_ai::Model::Custom { name, max_tokens,max_output_tokens } => {
Some(language_model::provider::open_ai::AvailableModel { name, max_tokens,max_output_tokens })
}
_ => None,
})
.collect::<Vec<_>>()
});
content.openai = Some(OpenAiSettingsContent::Versioned(
VersionedOpenAiSettingsContent::V1(
OpenAiSettingsContentV1 {
api_url,
low_speed_timeout_in_seconds,
available_models,
},
),
));
content.openai =
Some(language_model::settings::OpenAiSettingsContent::Versioned(
language_model::settings::VersionedOpenAiSettingsContent::V1(
language_model::settings::OpenAiSettingsContentV1 {
api_url,
low_speed_timeout_in_seconds,
available_models
}
)
));
}
},
),
@@ -237,7 +222,6 @@ impl AssistantSettingsContent {
})
}
}),
inline_alternatives: None,
},
VersionedAssistantSettingsContent::V2(settings) => settings.clone(),
},
@@ -256,7 +240,6 @@ impl AssistantSettingsContent {
.id()
.to_string(),
}),
inline_alternatives: None,
},
}
}
@@ -312,7 +295,7 @@ impl AssistantSettingsContent {
_ => (None, None),
};
settings.provider = Some(AssistantProviderContentV1::Ollama {
default_model: Some(ollama::Model::new(&model, None, None)),
default_model: Some(ollama::Model::new(&model)),
api_url,
low_speed_timeout_in_seconds,
});
@@ -333,7 +316,7 @@ impl AssistantSettingsContent {
_ => (None, None, None),
};
settings.provider = Some(AssistantProviderContentV1::OpenAi {
default_model: OpenAiModel::from_id(&model).ok(),
default_model: open_ai::Model::from_id(&model).ok(),
api_url,
low_speed_timeout_in_seconds,
available_models,
@@ -346,7 +329,7 @@ impl AssistantSettingsContent {
}
},
AssistantSettingsContent::Legacy(settings) => {
if let Ok(model) = OpenAiModel::from_id(&language_model.id().0) {
if let Ok(model) = open_ai::Model::from_id(&language_model.id().0) {
settings.default_open_ai_model = Some(model);
}
}
@@ -372,7 +355,6 @@ impl Default for VersionedAssistantSettingsContent {
default_width: None,
default_height: None,
default_model: None,
inline_alternatives: None,
})
}
}
@@ -401,8 +383,6 @@ pub struct AssistantSettingsContentV2 {
default_height: Option<f32>,
/// The default model to use when creating new contexts.
default_model: Option<LanguageModelSelection>,
/// Additional models with which to generate alternatives when performing inline assists.
inline_alternatives: Option<Vec<LanguageModelSelection>>,
}
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)]
@@ -523,9 +503,10 @@ impl Settings for AssistantSettings {
&mut settings.default_height,
value.default_height.map(Into::into),
);
merge(&mut settings.default_model, value.default_model);
merge(&mut settings.inline_alternatives, value.inline_alternatives);
// merge(&mut settings.infer_context, value.infer_context); TODO re-enable this once we ship context inference
merge(
&mut settings.default_model,
value.default_model.map(Into::into),
);
}
Ok(settings)
@@ -578,7 +559,6 @@ mod tests {
provider: "test-provider".into(),
model: "gpt-99".into(),
}),
inline_alternatives: None,
enabled: None,
button: None,
dock: None,

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,6 @@
use super::{MessageCacheMetadata, WorkflowStepEdit};
use crate::{
assistant_panel, prompt_library, slash_command::file_command, CacheStatus, Context,
ContextEvent, ContextId, ContextOperation, MessageId, MessageStatus, PromptBuilder,
WorkflowStepEditKind,
assistant_panel, prompt_library, slash_command::file_command, workflow::tool, CacheStatus,
Context, ContextEvent, ContextId, ContextOperation, MessageId, MessageStatus, PromptBuilder,
};
use anyhow::Result;
use assistant_slash_command::{
@@ -10,13 +8,15 @@ use assistant_slash_command::{
SlashCommandRegistry,
};
use collections::HashSet;
use fs::FakeFs;
use fs::{FakeFs, Fs as _};
use gpui::{AppContext, Model, SharedString, Task, TestAppContext, WeakView};
use language::{Buffer, BufferSnapshot, LanguageRegistry, LspAdapterDelegate};
use indoc::indoc;
use language::{Buffer, LanguageRegistry, LspAdapterDelegate};
use language_model::{LanguageModelCacheConfiguration, LanguageModelRegistry, Role};
use parking_lot::Mutex;
use project::Project;
use rand::prelude::*;
use rope::Point;
use serde_json::json;
use settings::SettingsStore;
use std::{
@@ -27,15 +27,14 @@ use std::{
rc::Rc,
sync::{atomic::AtomicBool, Arc},
};
use text::{network::Network, OffsetRangeExt as _, ReplicaId};
use text::{network::Network, OffsetRangeExt as _, ReplicaId, ToPoint as _};
use ui::{Context as _, WindowContext};
use unindent::Unindent;
use util::{
test::{generate_marked_text, marked_text_ranges},
RandomCharIter,
};
use util::{test::marked_text_ranges, RandomCharIter};
use workspace::Workspace;
use super::MessageCacheMetadata;
#[gpui::test]
fn test_inserting_and_removing_messages(cx: &mut AppContext) {
let settings_store = SettingsStore::test(cx);
@@ -480,12 +479,28 @@ async fn test_workflow_step_parsing(cx: &mut TestAppContext) {
cx.update(prompt_library::init);
let settings_store = cx.update(SettingsStore::test);
cx.set_global(settings_store);
cx.update(language::init);
cx.update(Project::init_settings);
let fs = FakeFs::new(cx.executor());
fs.as_fake()
.insert_tree(
"/root",
json!({
"hello.rs": r#"
fn hello() {
println!("Hello, World!");
}
"#.unindent()
}),
)
.await;
let project = Project::test(fs, [Path::new("/root")], cx).await;
cx.update(LanguageModelRegistry::test);
let model = cx.read(|cx| {
LanguageModelRegistry::read_global(cx)
.active_model()
.unwrap()
});
cx.update(assistant_panel::init);
let registry = Arc::new(LanguageRegistry::test(cx.executor()));
@@ -500,382 +515,151 @@ async fn test_workflow_step_parsing(cx: &mut TestAppContext) {
cx,
)
});
let buffer = context.read_with(cx, |context, _| context.buffer.clone());
// Insert an assistant message to simulate a response.
let assistant_message_id = context.update(cx, |context, cx| {
let user_message_id = context.messages(cx).next().unwrap().id;
context
.insert_message_after(user_message_id, Role::Assistant, MessageStatus::Done, cx)
.unwrap()
.id
// Simulate user input
let user_message = indoc! {r#"
Please add unnecessary complexity to this code:
```hello.rs
fn main() {
println!("Hello, World!");
}
```
"#};
buffer.update(cx, |buffer, cx| {
buffer.edit([(0..0, user_message)], None, cx);
});
// No edit tags
edit(
&context,
"
«one
two
»",
cx,
);
expect_steps(
&context,
"
one
two
",
&[],
cx,
);
// Partial edit step tag is added
edit(
&context,
"
one
two
«
<step»",
cx,
);
expect_steps(
&context,
"
one
two
<step",
&[],
cx,
);
// The rest of the step tag is added. The unclosed
// step is treated as incomplete.
edit(
&context,
"
one
two
<step«>
Add a second function
```rust
fn two() {}
```
<edit>»",
cx,
);
expect_steps(
&context,
"
one
two
«<step>
Add a second function
```rust
fn two() {}
```
<edit>»",
&[&[]],
cx,
);
// The full suggestion is added
edit(
&context,
"
one
two
// Simulate LLM response with edit steps
let llm_response = indoc! {r#"
Sure, I can help you with that. Here's a step-by-step process:
<step>
Add a second function
First, let's extract the greeting into a separate function:
```rust
fn two() {}
```
fn greet() {
println!("Hello, World!");
}
<edit>«
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>fn one</search>
<description>add a `two` function</description>
</edit>
fn main() {
greet();
}
```
</step>
also,»",
cx,
);
expect_steps(
&context,
"
one
two
«<step>
Add a second function
```rust
fn two() {}
```
<edit>
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>fn one</search>
<description>add a `two` function</description>
</edit>
</step>»
also,",
&[&[WorkflowStepEdit {
path: "src/lib.rs".into(),
kind: WorkflowStepEditKind::InsertAfter {
search: "fn one".into(),
description: "add a `two` function".into(),
},
}]],
cx,
);
// The step is manually edited.
edit(
&context,
"
one
two
<step>
Add a second function
Now, let's make the greeting customizable:
```rust
fn two() {}
```
fn greet(name: &str) {
println!("Hello, {}!", name);
}
<edit>
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>«fn zero»</search>
<description>add a `two` function</description>
</edit>
fn main() {
greet("World");
}
```
</step>
also,",
cx,
);
expect_steps(
&context,
"
These changes make the code more modular and flexible.
"#};
one
two
// Simulate the assist method to trigger the LLM response
context.update(cx, |context, cx| context.assist(cx));
cx.run_until_parked();
«<step>
Add a second function
```rust
fn two() {}
```
<edit>
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>fn zero</search>
<description>add a `two` function</description>
</edit>
</step>»
also,",
&[&[WorkflowStepEdit {
path: "src/lib.rs".into(),
kind: WorkflowStepEditKind::InsertAfter {
search: "fn zero".into(),
description: "add a `two` function".into(),
},
}]],
cx,
);
// When setting the message role to User, the steps are cleared.
context.update(cx, |context, cx| {
context.cycle_message_roles(HashSet::from_iter([assistant_message_id]), cx);
context.cycle_message_roles(HashSet::from_iter([assistant_message_id]), cx);
// Retrieve the assistant response message's start from the context
let response_start_row = context.read_with(cx, |context, cx| {
let buffer = context.buffer.read(cx);
context.message_anchors[1].start.to_point(buffer).row
});
expect_steps(
&context,
"
one
two
// Simulate the LLM completion
model
.as_fake()
.stream_last_completion_response(llm_response.to_string());
model.as_fake().end_last_completion_stream();
<step>
Add a second function
// Wait for the completion to be processed
cx.run_until_parked();
```rust
fn two() {}
```
<edit>
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>fn zero</search>
<description>add a `two` function</description>
</edit>
</step>
also,",
&[],
cx,
);
// When setting the message role back to Assistant, the steps are reparsed.
context.update(cx, |context, cx| {
context.cycle_message_roles(HashSet::from_iter([assistant_message_id]), cx);
// Verify that the edit steps were parsed correctly
context.read_with(cx, |context, cx| {
assert_eq!(
workflow_steps(context, cx),
vec![
(
Point::new(response_start_row + 2, 0)..Point::new(response_start_row + 12, 3),
WorkflowStepTestStatus::Pending
),
(
Point::new(response_start_row + 14, 0)..Point::new(response_start_row + 24, 3),
WorkflowStepTestStatus::Pending
),
]
);
});
expect_steps(
&context,
"
one
two
«<step>
Add a second function
```rust
fn two() {}
```
<edit>
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>fn zero</search>
<description>add a `two` function</description>
</edit>
</step>»
also,",
&[&[WorkflowStepEdit {
path: "src/lib.rs".into(),
kind: WorkflowStepEditKind::InsertAfter {
search: "fn zero".into(),
description: "add a `two` function".into(),
},
}]],
cx,
);
// Ensure steps are re-parsed when deserializing.
let serialized_context = context.read_with(cx, |context, cx| context.serialize(cx));
let deserialized_context = cx.new_model(|cx| {
Context::deserialize(
serialized_context,
Default::default(),
registry.clone(),
prompt_builder.clone(),
None,
None,
cx,
)
});
expect_steps(
&deserialized_context,
"
one
two
«<step>
Add a second function
```rust
fn two() {}
```
<edit>
<path>src/lib.rs</path>
<operation>insert_after</operation>
<search>fn zero</search>
<description>add a `two` function</description>
</edit>
</step>»
also,",
&[&[WorkflowStepEdit {
path: "src/lib.rs".into(),
kind: WorkflowStepEditKind::InsertAfter {
search: "fn zero".into(),
description: "add a `two` function".into(),
},
}]],
cx,
);
fn edit(context: &Model<Context>, new_text_marked_with_edits: &str, cx: &mut TestAppContext) {
context.update(cx, |context, cx| {
context.buffer.update(cx, |buffer, cx| {
buffer.edit_via_marked_text(&new_text_marked_with_edits.unindent(), None, cx);
});
model
.as_fake()
.respond_to_last_tool_use(tool::WorkflowStepResolutionTool {
step_title: "Title".into(),
suggestions: vec![tool::WorkflowSuggestionTool {
path: "/root/hello.rs".into(),
// Simulate a symbol name that's slightly different than our outline query
kind: tool::WorkflowSuggestionToolKind::Update {
symbol: "fn main()".into(),
description: "Extract a greeting function".into(),
},
}],
});
cx.executor().run_until_parked();
// Wait for tool use to be processed.
cx.run_until_parked();
// Verify that the first edit step is not pending anymore.
context.read_with(cx, |context, cx| {
assert_eq!(
workflow_steps(context, cx),
vec![
(
Point::new(response_start_row + 2, 0)..Point::new(response_start_row + 12, 3),
WorkflowStepTestStatus::Resolved
),
(
Point::new(response_start_row + 14, 0)..Point::new(response_start_row + 24, 3),
WorkflowStepTestStatus::Pending
),
]
);
});
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum WorkflowStepTestStatus {
Pending,
Resolved,
Error,
}
fn expect_steps(
context: &Model<Context>,
expected_marked_text: &str,
expected_suggestions: &[&[WorkflowStepEdit]],
cx: &mut TestAppContext,
) {
context.update(cx, |context, cx| {
let expected_marked_text = expected_marked_text.unindent();
let (expected_text, expected_ranges) = marked_text_ranges(&expected_marked_text, false);
context.buffer.read_with(cx, |buffer, _| {
assert_eq!(buffer.text(), expected_text);
let ranges = context
.workflow_steps
.iter()
.map(|entry| entry.range.to_offset(buffer))
.collect::<Vec<_>>();
let marked = generate_marked_text(&expected_text, &ranges, false);
assert_eq!(
marked,
expected_marked_text,
"unexpected suggestion ranges. actual: {ranges:?}, expected: {expected_ranges:?}"
);
let suggestions = context
.workflow_steps
.iter()
.map(|step| {
step.edits
.iter()
.map(|edit| {
let edit = edit.as_ref().unwrap();
WorkflowStepEdit {
path: edit.path.clone(),
kind: edit.kind.clone(),
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert_eq!(suggestions, expected_suggestions);
});
});
fn workflow_steps(
context: &Context,
cx: &AppContext,
) -> Vec<(Range<Point>, WorkflowStepTestStatus)> {
context
.workflow_steps
.iter()
.map(|step| {
let buffer = context.buffer.read(cx);
let status = match &step.step.read(cx).resolution {
None => WorkflowStepTestStatus::Pending,
Some(Ok(_)) => WorkflowStepTestStatus::Resolved,
Some(Err(_)) => WorkflowStepTestStatus::Error,
};
(step.range.to_point(buffer), status)
})
.collect()
}
}
@@ -1089,7 +873,6 @@ async fn test_random_context_collaboration(cx: &mut TestAppContext, mut rng: Std
range: section_start..section_end,
icon: ui::IconName::Ai,
label: "section".into(),
metadata: None,
});
}
@@ -1166,7 +949,9 @@ async fn test_random_context_collaboration(cx: &mut TestAppContext, mut rng: Std
);
network.lock().broadcast(replica_id, ops_to_send);
context.update(cx, |context, cx| context.apply_ops(ops_to_receive, cx));
context
.update(cx, |context, cx| context.apply_ops(ops_to_receive, cx))
.unwrap();
} else if rng.gen_bool(0.1) && replica_id != 0 {
log::info!("Context {}: disconnecting", context_index);
network.lock().disconnect_peer(replica_id);
@@ -1178,7 +963,9 @@ async fn test_random_context_collaboration(cx: &mut TestAppContext, mut rng: Std
.map(ContextOperation::from_proto)
.collect::<Result<Vec<_>>>()
.unwrap();
context.update(cx, |context, cx| context.apply_ops(ops, cx));
context
.update(cx, |context, cx| context.apply_ops(ops, cx))
.unwrap();
}
}
}
@@ -1422,8 +1209,6 @@ impl SlashCommand for FakeSlashCommand {
fn run(
self: Arc<Self>,
_arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: BufferSnapshot,
_workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
_cx: &mut WindowContext,

View File

@@ -15,7 +15,6 @@ use language::LanguageRegistry;
use paths::contexts_dir;
use project::Project;
use regex::Regex;
use rpc::AnyProtoClient;
use std::{
cmp::Reverse,
ffi::OsStr,
@@ -26,7 +25,7 @@ use std::{
};
use util::{ResultExt, TryFutureExt};
pub fn init(client: &AnyProtoClient) {
pub fn init(client: &Arc<Client>) {
client.add_model_message_handler(ContextStore::handle_advertise_contexts);
client.add_model_request_handler(ContextStore::handle_open_context);
client.add_model_request_handler(ContextStore::handle_create_context);
@@ -223,7 +222,7 @@ impl ContextStore {
if let Some(context) = this.loaded_context_for_id(&context_id, cx) {
let operation_proto = envelope.payload.operation.context("invalid operation")?;
let operation = ContextOperation::from_proto(operation_proto)?;
context.update(cx, |context, cx| context.apply_ops([operation], cx));
context.update(cx, |context, cx| context.apply_ops([operation], cx))?;
}
Ok(())
})?
@@ -357,6 +356,9 @@ impl ContextStore {
let Some(project_id) = project.remote_id() else {
return Task::ready(Err(anyhow!("project was not remote")));
};
if project.is_local_or_ssh() {
return Task::ready(Err(anyhow!("cannot create remote contexts as the host")));
}
let replica_id = project.replica_id();
let capability = project.capability();
@@ -387,11 +389,11 @@ impl ContextStore {
context_proto
.operations
.into_iter()
.map(ContextOperation::from_proto)
.map(|op| ContextOperation::from_proto(op))
.collect::<Result<Vec<_>>>()
})
.await?;
context.update(&mut cx, |context, cx| context.apply_ops(operations, cx))?;
context.update(&mut cx, |context, cx| context.apply_ops(operations, cx))??;
this.update(&mut cx, |this, cx| {
if let Some(existing_context) = this.loaded_context_for_id(&context_id, cx) {
existing_context
@@ -485,6 +487,9 @@ impl ContextStore {
let Some(project_id) = project.remote_id() else {
return Task::ready(Err(anyhow!("project was not remote")));
};
if project.is_local_or_ssh() {
return Task::ready(Err(anyhow!("cannot open remote contexts as the host")));
}
if let Some(context) = self.loaded_context_for_id(&context_id, cx) {
return Task::ready(Ok(context));
@@ -521,11 +526,11 @@ impl ContextStore {
context_proto
.operations
.into_iter()
.map(ContextOperation::from_proto)
.map(|op| ContextOperation::from_proto(op))
.collect::<Result<Vec<_>>>()
})
.await?;
context.update(&mut cx, |context, cx| context.apply_ops(operations, cx))?;
context.update(&mut cx, |context, cx| context.apply_ops(operations, cx))??;
this.update(&mut cx, |this, cx| {
if let Some(existing_context) = this.loaded_context_for_id(&context_id, cx) {
existing_context

File diff suppressed because it is too large Load Diff

View File

@@ -794,9 +794,8 @@ impl PromptLibrary {
content: vec![body.to_string().into()],
cache: false,
}],
tools: Vec::new(),
stop: Vec::new(),
temperature: None,
temperature: 1.,
},
cx,
)
@@ -921,8 +920,10 @@ impl PromptLibrary {
scrollbar_width: Pixels::ZERO,
syntax: cx.theme().syntax().clone(),
status: cx.theme().status().clone(),
inlay_hints_style:
editor::make_inlay_hints_style(cx),
inlay_hints_style: HighlightStyle {
color: Some(cx.theme().status().hint),
..HighlightStyle::default()
},
suggestions_style: HighlightStyle {
color: Some(cx.theme().status().predictive),
..HighlightStyle::default()

View File

@@ -4,20 +4,13 @@ use fs::Fs;
use futures::StreamExt;
use gpui::AssetSource;
use handlebars::{Handlebars, RenderError};
use language::{BufferSnapshot, LanguageName, Point};
use language::BufferSnapshot;
use parking_lot::Mutex;
use serde::Serialize;
use std::{ops::Range, path::PathBuf, sync::Arc, time::Duration};
use text::LineEnding;
use util::ResultExt;
#[derive(Serialize)]
pub struct ContentPromptDiagnosticContext {
pub line_number: usize,
pub error_message: String,
pub code_content: String,
}
#[derive(Serialize)]
pub struct ContentPromptContext {
pub content_type: String,
@@ -27,7 +20,6 @@ pub struct ContentPromptContext {
pub document_content: String,
pub user_prompt: String,
pub rewrite_section: Option<String>,
pub diagnostic_errors: Vec<ContentPromptDiagnosticContext>,
}
#[derive(Serialize)]
@@ -40,11 +32,6 @@ pub struct TerminalAssistantPromptContext {
pub user_prompt: String,
}
#[derive(Serialize)]
pub struct ProjectSlashCommandPromptContext {
pub context_buffer: String,
}
/// Context required to generate a workflow step resolution prompt.
#[derive(Debug, Serialize)]
pub struct StepResolutionContext {
@@ -95,9 +82,10 @@ impl PromptBuilder {
/// and application context.
/// * `handlebars` - An `Arc<Mutex<Handlebars>>` for registering and updating templates.
fn watch_fs_for_template_overrides(
params: PromptLoadingParams,
mut params: PromptLoadingParams,
handlebars: Arc<Mutex<Handlebars<'static>>>,
) {
params.repo_path = None;
let templates_dir = paths::prompt_overrides_dir(params.repo_path.as_deref());
params.cx.background_executor()
.spawn(async move {
@@ -135,7 +123,7 @@ impl PromptBuilder {
if params.fs.is_dir(parent_dir).await {
let (mut changes, _watcher) = params.fs.watch(parent_dir, Duration::from_secs(1)).await;
while let Some(changed_paths) = changes.next().await {
if changed_paths.iter().any(|p| &p.path == &templates_dir) {
if changed_paths.iter().any(|p| p == &templates_dir) {
let mut log_message = format!("Prompt template overrides directory detected at {}", templates_dir.display());
if let Ok(target) = params.fs.read_link(&templates_dir).await {
log_message.push_str(" -> ");
@@ -174,18 +162,18 @@ impl PromptBuilder {
let mut combined_changes = futures::stream::select(changes, parent_changes);
while let Some(changed_paths) = combined_changes.next().await {
if changed_paths.iter().any(|p| &p.path == &templates_dir) {
if changed_paths.iter().any(|p| p == &templates_dir) {
if !params.fs.is_dir(&templates_dir).await {
log::info!("Prompt template overrides directory removed. Restoring built-in prompt templates.");
Self::register_built_in_templates(&mut handlebars.lock()).log_err();
break;
}
}
for event in changed_paths {
if event.path.starts_with(&templates_dir) && event.path.extension().map_or(false, |ext| ext == "hbs") {
log::info!("Reloading prompt template override: {}", event.path.display());
if let Some(content) = params.fs.load(&event.path).await.log_err() {
let file_name = event.path.file_stem().unwrap().to_string_lossy();
for changed_path in changed_paths {
if changed_path.starts_with(&templates_dir) && changed_path.extension().map_or(false, |ext| ext == "hbs") {
log::info!("Reloading prompt template override: {}", changed_path.display());
if let Some(content) = params.fs.load(&changed_path).await.log_err() {
let file_name = changed_path.file_stem().unwrap().to_string_lossy();
handlebars.lock().register_template_string(&file_name, content).log_err();
}
}
@@ -216,11 +204,11 @@ impl PromptBuilder {
pub fn generate_content_prompt(
&self,
user_prompt: String,
language_name: Option<&LanguageName>,
language_name: Option<&str>,
buffer: BufferSnapshot,
range: Range<usize>,
) -> Result<String, RenderError> {
let content_type = match language_name.as_ref().map(|l| l.0.as_ref()) {
let content_type = match language_name {
None | Some("Markdown" | "Plain Text") => "text",
Some(_) => "code",
};
@@ -232,8 +220,7 @@ impl PromptBuilder {
let before_range = 0..range.start;
let truncated_before = if before_range.len() > MAX_CTX {
is_truncated = true;
let start = buffer.clip_offset(range.start - MAX_CTX, text::Bias::Right);
start..range.start
range.start - MAX_CTX..range.start
} else {
before_range
};
@@ -241,8 +228,7 @@ impl PromptBuilder {
let after_range = range.end..buffer.len();
let truncated_after = if after_range.len() > MAX_CTX {
is_truncated = true;
let end = buffer.clip_offset(range.end + MAX_CTX, text::Bias::Left);
range.end..end
range.end..range.end + MAX_CTX
} else {
after_range
};
@@ -273,17 +259,6 @@ impl PromptBuilder {
} else {
None
};
let diagnostics = buffer.diagnostics_in_range::<_, Point>(range, false);
let diagnostic_errors: Vec<ContentPromptDiagnosticContext> = diagnostics
.map(|entry| {
let start = entry.range.start;
ContentPromptDiagnosticContext {
line_number: (start.row + 1) as usize,
error_message: entry.diagnostic.message.clone(),
code_content: buffer.text_for_range(entry.range.clone()).collect(),
}
})
.collect();
let context = ContentPromptContext {
content_type: content_type.to_string(),
@@ -293,8 +268,8 @@ impl PromptBuilder {
document_content,
user_prompt,
rewrite_section,
diagnostic_errors,
};
self.handlebars.lock().render("content_prompt", &context)
}
@@ -323,13 +298,10 @@ impl PromptBuilder {
self.handlebars.lock().render("edit_workflow", &())
}
pub fn generate_project_slash_command_prompt(
pub fn generate_step_resolution_prompt(
&self,
context_buffer: String,
context: &StepResolutionContext,
) -> Result<String, RenderError> {
self.handlebars.lock().render(
"project_slash_command",
&ProjectSlashCommandPromptContext { context_buffer },
)
self.handlebars.lock().render("step_resolution", context)
}
}

View File

@@ -18,11 +18,9 @@ use std::{
};
use ui::ActiveTheme;
use workspace::Workspace;
pub mod auto_command;
pub mod cargo_workspace_command;
pub mod context_server_command;
pub mod default_command;
pub mod delta_command;
pub mod diagnostics_command;
pub mod docs_command;
pub mod fetch_command;

View File

@@ -1,362 +0,0 @@
use super::create_label_for_command;
use super::{SlashCommand, SlashCommandOutput};
use anyhow::{anyhow, Result};
use assistant_slash_command::{ArgumentCompletion, SlashCommandOutputSection};
use feature_flags::FeatureFlag;
use futures::StreamExt;
use gpui::{AppContext, AsyncAppContext, Task, WeakView};
use language::{CodeLabel, LspAdapterDelegate};
use language_model::{
LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
LanguageModelRequestMessage, Role,
};
use semantic_index::{FileSummary, SemanticDb};
use smol::channel;
use std::sync::{atomic::AtomicBool, Arc};
use ui::{BorrowAppContext, WindowContext};
use util::ResultExt;
use workspace::Workspace;
pub struct AutoSlashCommandFeatureFlag;
impl FeatureFlag for AutoSlashCommandFeatureFlag {
const NAME: &'static str = "auto-slash-command";
}
pub(crate) struct AutoCommand;
impl SlashCommand for AutoCommand {
fn name(&self) -> String {
"auto".into()
}
fn description(&self) -> String {
"Automatically infer what context to add, based on your prompt".into()
}
fn menu_text(&self) -> String {
"Automatically Infer Context".into()
}
fn label(&self, cx: &AppContext) -> CodeLabel {
create_label_for_command("auto", &["--prompt"], cx)
}
fn complete_argument(
self: Arc<Self>,
_arguments: &[String],
_cancel: Arc<AtomicBool>,
workspace: Option<WeakView<Workspace>>,
cx: &mut WindowContext,
) -> Task<Result<Vec<ArgumentCompletion>>> {
// There's no autocomplete for a prompt, since it's arbitrary text.
// However, we can use this opportunity to kick off a drain of the backlog.
// That way, it can hopefully be done resummarizing by the time we've actually
// typed out our prompt. This re-runs on every keystroke during autocomplete,
// but in the future, we could instead do it only once, when /auto is first entered.
let Some(workspace) = workspace.and_then(|ws| ws.upgrade()) else {
log::warn!("workspace was dropped or unavailable during /auto autocomplete");
return Task::ready(Ok(Vec::new()));
};
let project = workspace.read(cx).project().clone();
let Some(project_index) =
cx.update_global(|index: &mut SemanticDb, cx| index.project_index(project, cx))
else {
return Task::ready(Err(anyhow!("No project indexer, cannot use /auto")));
};
let cx: &mut AppContext = cx;
cx.spawn(|cx: gpui::AsyncAppContext| async move {
let task = project_index.read_with(&cx, |project_index, cx| {
project_index.flush_summary_backlogs(cx)
})?;
cx.background_executor().spawn(task).await;
anyhow::Ok(Vec::new())
})
}
fn requires_argument(&self) -> bool {
true
}
fn run(
self: Arc<Self>,
arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: language::BufferSnapshot,
workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
) -> Task<Result<SlashCommandOutput>> {
let Some(workspace) = workspace.upgrade() else {
return Task::ready(Err(anyhow::anyhow!("workspace was dropped")));
};
if arguments.is_empty() {
return Task::ready(Err(anyhow!("missing prompt")));
};
let argument = arguments.join(" ");
let original_prompt = argument.to_string();
let project = workspace.read(cx).project().clone();
let Some(project_index) =
cx.update_global(|index: &mut SemanticDb, cx| index.project_index(project, cx))
else {
return Task::ready(Err(anyhow!("no project indexer")));
};
let task = cx.spawn(|cx: gpui::AsyncWindowContext| async move {
let summaries = project_index
.read_with(&cx, |project_index, cx| project_index.all_summaries(cx))?
.await?;
commands_for_summaries(&summaries, &original_prompt, &cx).await
});
// As a convenience, append /auto's argument to the end of the prompt
// so you don't have to write it again.
let original_prompt = argument.to_string();
cx.background_executor().spawn(async move {
let commands = task.await?;
let mut prompt = String::new();
log::info!(
"Translating this response into slash-commands: {:?}",
commands
);
for command in commands {
prompt.push('/');
prompt.push_str(&command.name);
prompt.push(' ');
prompt.push_str(&command.arg);
prompt.push('\n');
}
prompt.push('\n');
prompt.push_str(&original_prompt);
Ok(SlashCommandOutput {
text: prompt,
sections: Vec::new(),
run_commands_in_text: true,
})
})
}
}
const PROMPT_INSTRUCTIONS_BEFORE_SUMMARY: &str = include_str!("prompt_before_summary.txt");
const PROMPT_INSTRUCTIONS_AFTER_SUMMARY: &str = include_str!("prompt_after_summary.txt");
fn summaries_prompt(summaries: &[FileSummary], original_prompt: &str) -> String {
let json_summaries = serde_json::to_string(summaries).unwrap();
format!("{PROMPT_INSTRUCTIONS_BEFORE_SUMMARY}\n{json_summaries}\n{PROMPT_INSTRUCTIONS_AFTER_SUMMARY}\n{original_prompt}")
}
/// The slash commands that the model is told about, and which we look for in the inference response.
const SUPPORTED_SLASH_COMMANDS: &[&str] = &["search", "file"];
#[derive(Debug, Clone)]
struct CommandToRun {
name: String,
arg: String,
}
/// Given the pre-indexed file summaries for this project, as well as the original prompt
/// string passed to `/auto`, get a list of slash commands to run, along with their arguments.
///
/// The prompt's output does not include the slashes (to reduce the chance that it makes a mistake),
/// so taking one of these returned Strings and turning it into a real slash-command-with-argument
/// involves prepending a slash to it.
///
/// This function will validate that each of the returned lines begins with one of SUPPORTED_SLASH_COMMANDS.
/// Any other lines it encounters will be discarded, with a warning logged.
async fn commands_for_summaries(
summaries: &[FileSummary],
original_prompt: &str,
cx: &AsyncAppContext,
) -> Result<Vec<CommandToRun>> {
if summaries.is_empty() {
log::warn!("Inferring no context because there were no summaries available.");
return Ok(Vec::new());
}
// Use the globally configured model to translate the summaries into slash-commands,
// because Qwen2-7B-Instruct has not done a good job at that task.
let Some(model) = cx.update(|cx| LanguageModelRegistry::read_global(cx).active_model())? else {
log::warn!("Can't infer context because there's no active model.");
return Ok(Vec::new());
};
// Only go up to 90% of the actual max token count, to reduce chances of
// exceeding the token count due to inaccuracies in the token counting heuristic.
let max_token_count = (model.max_token_count() * 9) / 10;
// Rather than recursing (which would require this async function use a pinned box),
// we use an explicit stack of arguments and answers for when we need to "recurse."
let mut stack = vec![summaries];
let mut final_response = Vec::new();
let mut prompts = Vec::new();
// TODO We only need to create multiple Requests because we currently
// don't have the ability to tell if a CompletionProvider::complete response
// was a "too many tokens in this request" error. If we had that, then
// we could try the request once, instead of having to make separate requests
// to check the token count and then afterwards to run the actual prompt.
let make_request = |prompt: String| LanguageModelRequest {
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![prompt.into()],
// Nothing in here will benefit from caching
cache: false,
}],
tools: Vec::new(),
stop: Vec::new(),
temperature: None,
};
while let Some(current_summaries) = stack.pop() {
// The split can result in one slice being empty and the other having one element.
// Whenever that happens, skip the empty one.
if current_summaries.is_empty() {
continue;
}
log::info!(
"Inferring prompt context using {} file summaries",
current_summaries.len()
);
let prompt = summaries_prompt(&current_summaries, original_prompt);
let start = std::time::Instant::now();
// Per OpenAI, 1 token ~= 4 chars in English (we go with 4.5 to overestimate a bit, because failed API requests cost a lot of perf)
// Verifying this against an actual model.count_tokens() confirms that it's usually within ~5% of the correct answer, whereas
// getting the correct answer from tiktoken takes hundreds of milliseconds (compared to this arithmetic being ~free).
// source: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
let token_estimate = prompt.len() * 2 / 9;
let duration = start.elapsed();
log::info!(
"Time taken to count tokens for prompt of length {:?}B: {:?}",
prompt.len(),
duration
);
if token_estimate < max_token_count {
prompts.push(prompt);
} else if current_summaries.len() == 1 {
log::warn!("Inferring context for a single file's summary failed because the prompt's token length exceeded the model's token limit.");
} else {
log::info!(
"Context inference using file summaries resulted in a prompt containing {token_estimate} tokens, which exceeded the model's max of {max_token_count}. Retrying as two separate prompts, each including half the number of summaries.",
);
let (left, right) = current_summaries.split_at(current_summaries.len() / 2);
stack.push(right);
stack.push(left);
}
}
let all_start = std::time::Instant::now();
let (tx, rx) = channel::bounded(1024);
let completion_streams = prompts
.into_iter()
.map(|prompt| {
let request = make_request(prompt.clone());
let model = model.clone();
let tx = tx.clone();
let stream = model.stream_completion(request, &cx);
(stream, tx)
})
.collect::<Vec<_>>();
cx.background_executor()
.spawn(async move {
let futures = completion_streams
.into_iter()
.enumerate()
.map(|(ix, (stream, tx))| async move {
let start = std::time::Instant::now();
let events = stream.await?;
log::info!("Time taken for awaiting /await chunk stream #{ix}: {:?}", start.elapsed());
let completion: String = events
.filter_map(|event| async {
if let Ok(LanguageModelCompletionEvent::Text(text)) = event {
Some(text)
} else {
None
}
})
.collect()
.await;
log::info!("Time taken for all /auto chunks to come back for #{ix}: {:?}", start.elapsed());
for line in completion.split('\n') {
if let Some(first_space) = line.find(' ') {
let command = &line[..first_space].trim();
let arg = &line[first_space..].trim();
tx.send(CommandToRun {
name: command.to_string(),
arg: arg.to_string(),
})
.await?;
} else if !line.trim().is_empty() {
// All slash-commands currently supported in context inference need a space for the argument.
log::warn!(
"Context inference returned a non-blank line that contained no spaces (meaning no argument for the slash command): {:?}",
line
);
}
}
anyhow::Ok(())
})
.collect::<Vec<_>>();
let _ = futures::future::try_join_all(futures).await.log_err();
let duration = all_start.elapsed();
eprintln!("All futures completed in {:?}", duration);
})
.await;
drop(tx); // Close the channel so that rx.collect() won't hang. This is safe because all futures have completed.
let results = rx.collect::<Vec<_>>().await;
eprintln!(
"Finished collecting from the channel with {} results",
results.len()
);
for command in results {
// Don't return empty or duplicate commands
if !command.name.is_empty()
&& !final_response
.iter()
.any(|cmd: &CommandToRun| cmd.name == command.name && cmd.arg == command.arg)
{
if SUPPORTED_SLASH_COMMANDS
.iter()
.any(|supported| &command.name == supported)
{
final_response.push(command);
} else {
log::warn!(
"Context inference returned an unrecognized slash command: {:?}",
command
);
}
}
}
// Sort the commands by name (reversed just so that /search appears before /file)
final_response.sort_by(|cmd1, cmd2| cmd1.name.cmp(&cmd2.name).reverse());
Ok(final_response)
}

View File

@@ -1,153 +0,0 @@
use super::{SlashCommand, SlashCommandOutput};
use anyhow::{anyhow, Context, Result};
use assistant_slash_command::{ArgumentCompletion, SlashCommandOutputSection};
use fs::Fs;
use gpui::{AppContext, Model, Task, WeakView};
use language::{BufferSnapshot, LspAdapterDelegate};
use project::{Project, ProjectPath};
use std::{
fmt::Write,
path::Path,
sync::{atomic::AtomicBool, Arc},
};
use ui::prelude::*;
use workspace::Workspace;
pub(crate) struct CargoWorkspaceSlashCommand;
impl CargoWorkspaceSlashCommand {
async fn build_message(fs: Arc<dyn Fs>, path_to_cargo_toml: &Path) -> Result<String> {
let buffer = fs.load(path_to_cargo_toml).await?;
let cargo_toml: cargo_toml::Manifest = toml::from_str(&buffer)?;
let mut message = String::new();
writeln!(message, "You are in a Rust project.")?;
if let Some(workspace) = cargo_toml.workspace {
writeln!(
message,
"The project is a Cargo workspace with the following members:"
)?;
for member in workspace.members {
writeln!(message, "- {member}")?;
}
if !workspace.default_members.is_empty() {
writeln!(message, "The default members are:")?;
for member in workspace.default_members {
writeln!(message, "- {member}")?;
}
}
if !workspace.dependencies.is_empty() {
writeln!(
message,
"The following workspace dependencies are installed:"
)?;
for dependency in workspace.dependencies.keys() {
writeln!(message, "- {dependency}")?;
}
}
} else if let Some(package) = cargo_toml.package {
writeln!(
message,
"The project name is \"{name}\".",
name = package.name
)?;
let description = package
.description
.as_ref()
.and_then(|description| description.get().ok().cloned());
if let Some(description) = description.as_ref() {
writeln!(message, "It describes itself as \"{description}\".")?;
}
if !cargo_toml.dependencies.is_empty() {
writeln!(message, "The following dependencies are installed:")?;
for dependency in cargo_toml.dependencies.keys() {
writeln!(message, "- {dependency}")?;
}
}
}
Ok(message)
}
fn path_to_cargo_toml(project: Model<Project>, cx: &mut AppContext) -> Option<Arc<Path>> {
let worktree = project.read(cx).worktrees(cx).next()?;
let worktree = worktree.read(cx);
let entry = worktree.entry_for_path("Cargo.toml")?;
let path = ProjectPath {
worktree_id: worktree.id(),
path: entry.path.clone(),
};
Some(Arc::from(
project.read(cx).absolute_path(&path, cx)?.as_path(),
))
}
}
impl SlashCommand for CargoWorkspaceSlashCommand {
fn name(&self) -> String {
"cargo-workspace".into()
}
fn description(&self) -> String {
"insert project workspace metadata".into()
}
fn menu_text(&self) -> String {
"Insert Project Workspace Metadata".into()
}
fn complete_argument(
self: Arc<Self>,
_arguments: &[String],
_cancel: Arc<AtomicBool>,
_workspace: Option<WeakView<Workspace>>,
_cx: &mut WindowContext,
) -> Task<Result<Vec<ArgumentCompletion>>> {
Task::ready(Err(anyhow!("this command does not require argument")))
}
fn requires_argument(&self) -> bool {
false
}
fn run(
self: Arc<Self>,
_arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: BufferSnapshot,
workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
) -> Task<Result<SlashCommandOutput>> {
let output = workspace.update(cx, |workspace, cx| {
let project = workspace.project().clone();
let fs = workspace.project().read(cx).fs().clone();
let path = Self::path_to_cargo_toml(project, cx);
let output = cx.background_executor().spawn(async move {
let path = path.with_context(|| "Cargo.toml not found")?;
Self::build_message(fs, &path).await
});
cx.foreground_executor().spawn(async move {
let text = output.await?;
let range = 0..text.len();
Ok(SlashCommandOutput {
text,
sections: vec![SlashCommandOutputSection {
range,
icon: IconName::FileTree,
label: "Project".into(),
metadata: None,
}],
run_commands_in_text: false,
})
})
});
output.unwrap_or_else(|error| Task::ready(Err(error)))
}
}

View File

@@ -1,7 +1,6 @@
use anyhow::{anyhow, Result};
use assistant_slash_command::{
AfterCompletion, ArgumentCompletion, SlashCommand, SlashCommandOutput,
SlashCommandOutputSection,
ArgumentCompletion, SlashCommand, SlashCommandOutput, SlashCommandOutputSection,
};
use collections::HashMap;
use context_servers::{
@@ -9,10 +8,9 @@ use context_servers::{
protocol::PromptInfo,
};
use gpui::{Task, WeakView, WindowContext};
use language::{BufferSnapshot, CodeLabel, LspAdapterDelegate};
use language::LspAdapterDelegate;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use text::LineEnding;
use ui::{IconName, SharedString};
use workspace::Workspace;
@@ -44,70 +42,25 @@ impl SlashCommand for ContextServerSlashCommand {
}
fn requires_argument(&self) -> bool {
self.prompt.arguments.as_ref().map_or(false, |args| {
args.iter().any(|arg| arg.required == Some(true))
})
self.prompt
.arguments
.as_ref()
.map_or(false, |args| !args.is_empty())
}
fn complete_argument(
self: Arc<Self>,
arguments: &[String],
_arguments: &[String],
_cancel: Arc<AtomicBool>,
_workspace: Option<WeakView<Workspace>>,
cx: &mut WindowContext,
_cx: &mut WindowContext,
) -> Task<Result<Vec<ArgumentCompletion>>> {
let server_id = self.server_id.clone();
let prompt_name = self.prompt.name.clone();
let manager = ContextServerManager::global(cx);
let manager = manager.read(cx);
let (arg_name, arg_val) = match completion_argument(&self.prompt, arguments) {
Ok(tp) => tp,
Err(e) => {
return Task::ready(Err(e));
}
};
if let Some(server) = manager.get_server(&server_id) {
cx.foreground_executor().spawn(async move {
let Some(protocol) = server.client.read().clone() else {
return Err(anyhow!("Context server not initialized"));
};
let completion_result = protocol
.completion(
context_servers::types::CompletionReference::Prompt(
context_servers::types::PromptReference {
r#type: context_servers::types::PromptReferenceType::Prompt,
name: prompt_name,
},
),
arg_name,
arg_val,
)
.await?;
let completions = completion_result
.values
.into_iter()
.map(|value| ArgumentCompletion {
label: CodeLabel::plain(value.clone(), None),
new_text: value,
after_completion: AfterCompletion::Continue,
replace_previous_arguments: false,
})
.collect();
Ok(completions)
})
} else {
Task::ready(Err(anyhow!("Context server not found")))
}
Task::ready(Ok(Vec::new()))
}
fn run(
self: Arc<Self>,
arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: BufferSnapshot,
_workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
@@ -128,23 +81,18 @@ impl SlashCommand for ContextServerSlashCommand {
return Err(anyhow!("Context server not initialized"));
};
let result = protocol.run_prompt(&prompt_name, prompt_args).await?;
let mut prompt = result.prompt;
// We must normalize the line endings here, since servers might return CR characters.
LineEnding::normalize(&mut prompt);
Ok(SlashCommandOutput {
sections: vec![SlashCommandOutputSection {
range: 0..(prompt.len()),
range: 0..(result.prompt.len()),
icon: IconName::ZedAssistant,
label: SharedString::from(
result
.description
.unwrap_or(format!("Result from {}", prompt_name)),
),
metadata: None,
}],
text: prompt,
text: result.prompt,
run_commands_in_text: false,
})
})
@@ -154,22 +102,6 @@ impl SlashCommand for ContextServerSlashCommand {
}
}
fn completion_argument(prompt: &PromptInfo, arguments: &[String]) -> Result<(String, String)> {
if arguments.is_empty() {
return Err(anyhow!("No arguments given"));
}
match &prompt.arguments {
Some(args) if args.len() == 1 => {
let arg_name = args[0].name.clone();
let arg_value = arguments.join(" ");
Ok((arg_name, arg_value))
}
Some(_) => Err(anyhow!("Prompt must have exactly one argument")),
None => Err(anyhow!("Prompt has no arguments")),
}
}
fn prompt_arguments(prompt: &PromptInfo, arguments: &[String]) -> Result<HashMap<String, String>> {
match &prompt.arguments {
Some(args) if args.len() > 1 => Err(anyhow!(
@@ -180,8 +112,6 @@ fn prompt_arguments(prompt: &PromptInfo, arguments: &[String]) -> Result<HashMap
let mut map = HashMap::default();
map.insert(args[0].name.clone(), arguments.join(" "));
Ok(map)
} else if arguments.is_empty() && args[0].required == Some(false) {
Ok(HashMap::default())
} else {
Err(anyhow!("Prompt expects argument but none given"))
}
@@ -202,7 +132,7 @@ fn prompt_arguments(prompt: &PromptInfo, arguments: &[String]) -> Result<HashMap
pub fn acceptable_prompt(prompt: &PromptInfo) -> bool {
match &prompt.arguments {
None => true,
Some(args) if args.len() <= 1 => true,
Some(args) if args.len() == 1 => true,
_ => false,
}
}

View File

@@ -3,7 +3,7 @@ use crate::prompt_library::PromptStore;
use anyhow::{anyhow, Result};
use assistant_slash_command::{ArgumentCompletion, SlashCommandOutputSection};
use gpui::{Task, WeakView};
use language::{BufferSnapshot, LspAdapterDelegate};
use language::LspAdapterDelegate;
use std::{
fmt::Write,
sync::{atomic::AtomicBool, Arc},
@@ -43,8 +43,6 @@ impl SlashCommand for DefaultSlashCommand {
fn run(
self: Arc<Self>,
_arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: BufferSnapshot,
_workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
@@ -72,7 +70,6 @@ impl SlashCommand for DefaultSlashCommand {
range: 0..text.len(),
icon: IconName::Library,
label: "Default".into(),
metadata: None,
}],
text,
run_commands_in_text: true,

View File

@@ -1,109 +0,0 @@
use crate::slash_command::file_command::{FileCommandMetadata, FileSlashCommand};
use anyhow::Result;
use assistant_slash_command::{
ArgumentCompletion, SlashCommand, SlashCommandOutput, SlashCommandOutputSection,
};
use collections::HashSet;
use futures::future;
use gpui::{Task, WeakView, WindowContext};
use language::{BufferSnapshot, LspAdapterDelegate};
use std::sync::{atomic::AtomicBool, Arc};
use text::OffsetRangeExt;
use workspace::Workspace;
pub(crate) struct DeltaSlashCommand;
impl SlashCommand for DeltaSlashCommand {
fn name(&self) -> String {
"delta".into()
}
fn description(&self) -> String {
"re-insert changed files".into()
}
fn menu_text(&self) -> String {
"Re-insert Changed Files".into()
}
fn requires_argument(&self) -> bool {
false
}
fn complete_argument(
self: Arc<Self>,
_arguments: &[String],
_cancellation_flag: Arc<AtomicBool>,
_workspace: Option<WeakView<Workspace>>,
_cx: &mut WindowContext,
) -> Task<Result<Vec<ArgumentCompletion>>> {
unimplemented!()
}
fn run(
self: Arc<Self>,
_arguments: &[String],
context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
context_buffer: BufferSnapshot,
workspace: WeakView<Workspace>,
delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
) -> Task<Result<SlashCommandOutput>> {
let mut paths = HashSet::default();
let mut file_command_old_outputs = Vec::new();
let mut file_command_new_outputs = Vec::new();
for section in context_slash_command_output_sections.iter().rev() {
if let Some(metadata) = section
.metadata
.as_ref()
.and_then(|value| serde_json::from_value::<FileCommandMetadata>(value.clone()).ok())
{
if paths.insert(metadata.path.clone()) {
file_command_old_outputs.push(
context_buffer
.as_rope()
.slice(section.range.to_offset(&context_buffer)),
);
file_command_new_outputs.push(Arc::new(FileSlashCommand).run(
&[metadata.path.clone()],
context_slash_command_output_sections,
context_buffer.clone(),
workspace.clone(),
delegate.clone(),
cx,
));
}
}
}
cx.background_executor().spawn(async move {
let mut output = SlashCommandOutput::default();
let file_command_new_outputs = future::join_all(file_command_new_outputs).await;
for (old_text, new_output) in file_command_old_outputs
.into_iter()
.zip(file_command_new_outputs)
{
if let Ok(new_output) = new_output {
if let Some(file_command_range) = new_output.sections.first() {
let new_text = &new_output.text[file_command_range.range.clone()];
if old_text.chars().ne(new_text.chars()) {
output.sections.extend(new_output.sections.into_iter().map(
|section| SlashCommandOutputSection {
range: output.text.len() + section.range.start
..output.text.len() + section.range.end,
icon: section.icon,
label: section.label,
metadata: section.metadata,
},
));
output.text.push_str(&new_output.text);
}
}
}
}
Ok(output)
})
}
}

View File

@@ -9,9 +9,10 @@ use language::{
};
use project::{DiagnosticSummary, PathMatchCandidateSet, Project};
use rope::Point;
use std::fmt::Write;
use std::path::{Path, PathBuf};
use std::{
fmt::Write,
path::{Path, PathBuf},
ops::Range,
sync::{atomic::AtomicBool, Arc},
};
use ui::prelude::*;
@@ -162,8 +163,6 @@ impl SlashCommand for DiagnosticsSlashCommand {
fn run(
self: Arc<Self>,
arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: BufferSnapshot,
workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
@@ -176,7 +175,68 @@ impl SlashCommand for DiagnosticsSlashCommand {
let task = collect_diagnostics(workspace.read(cx).project().clone(), options, cx);
cx.spawn(move |_| async move { task.await?.ok_or_else(|| anyhow!("No diagnostics found")) })
cx.spawn(move |_| async move {
let Some((text, sections)) = task.await? else {
return Ok(SlashCommandOutput {
sections: vec![SlashCommandOutputSection {
range: 0..1,
icon: IconName::Library,
label: "No Diagnostics".into(),
}],
text: "\n".to_string(),
run_commands_in_text: true,
});
};
let sections = sections
.into_iter()
.map(|(range, placeholder_type)| SlashCommandOutputSection {
range,
icon: match placeholder_type {
PlaceholderType::Root(_, _) => IconName::ExclamationTriangle,
PlaceholderType::File(_) => IconName::File,
PlaceholderType::Diagnostic(DiagnosticType::Error, _) => IconName::XCircle,
PlaceholderType::Diagnostic(DiagnosticType::Warning, _) => {
IconName::ExclamationTriangle
}
},
label: match placeholder_type {
PlaceholderType::Root(summary, source) => {
let mut label = String::new();
label.push_str("Diagnostics");
if let Some(source) = source {
write!(label, " ({})", source).unwrap();
}
if summary.error_count > 0 || summary.warning_count > 0 {
label.push(':');
if summary.error_count > 0 {
write!(label, " {} errors", summary.error_count).unwrap();
if summary.warning_count > 0 {
label.push_str(",");
}
}
if summary.warning_count > 0 {
write!(label, " {} warnings", summary.warning_count).unwrap();
}
}
label.into()
}
PlaceholderType::File(file_path) => file_path.into(),
PlaceholderType::Diagnostic(_, message) => message.into(),
},
})
.collect();
Ok(SlashCommandOutput {
text,
sections,
run_commands_in_text: false,
})
})
}
}
@@ -217,7 +277,7 @@ fn collect_diagnostics(
project: Model<Project>,
options: Options,
cx: &mut AppContext,
) -> Task<Result<Option<SlashCommandOutput>>> {
) -> Task<Result<Option<(String, Vec<(Range<usize>, PlaceholderType)>)>>> {
let error_source = if let Some(path_matcher) = &options.path_matcher {
debug_assert_eq!(path_matcher.sources().len(), 1);
Some(path_matcher.sources().first().cloned().unwrap_or_default())
@@ -258,13 +318,13 @@ fn collect_diagnostics(
.collect();
cx.spawn(|mut cx| async move {
let mut output = SlashCommandOutput::default();
let mut text = String::new();
if let Some(error_source) = error_source.as_ref() {
writeln!(output.text, "diagnostics: {}", error_source).unwrap();
writeln!(text, "diagnostics: {}", error_source).unwrap();
} else {
writeln!(output.text, "diagnostics").unwrap();
writeln!(text, "diagnostics").unwrap();
}
let mut sections: Vec<(Range<usize>, PlaceholderType)> = Vec::new();
let mut project_summary = DiagnosticSummary::default();
for (project_path, path, summary) in diagnostic_summaries {
@@ -281,10 +341,10 @@ fn collect_diagnostics(
continue;
}
let last_end = output.text.len();
let last_end = text.len();
let file_path = path.to_string_lossy().to_string();
if !glob_is_exact_file_match {
writeln!(&mut output.text, "{file_path}").unwrap();
writeln!(&mut text, "{file_path}").unwrap();
}
if let Some(buffer) = project_handle
@@ -292,73 +352,75 @@ fn collect_diagnostics(
.await
.log_err()
{
let snapshot = cx.read_model(&buffer, |buffer, _| buffer.snapshot())?;
collect_buffer_diagnostics(&mut output, &snapshot, options.include_warnings);
collect_buffer_diagnostics(
&mut text,
&mut sections,
cx.read_model(&buffer, |buffer, _| buffer.snapshot())?,
options.include_warnings,
);
}
if !glob_is_exact_file_match {
output.sections.push(SlashCommandOutputSection {
range: last_end..output.text.len().saturating_sub(1),
icon: IconName::File,
label: file_path.into(),
metadata: None,
});
sections.push((
last_end..text.len().saturating_sub(1),
PlaceholderType::File(file_path),
))
}
}
// No diagnostics found
if output.sections.is_empty() {
if sections.is_empty() {
return Ok(None);
}
let mut label = String::new();
label.push_str("Diagnostics");
if let Some(source) = error_source {
write!(label, " ({})", source).unwrap();
}
if project_summary.error_count > 0 || project_summary.warning_count > 0 {
label.push(':');
if project_summary.error_count > 0 {
write!(label, " {} errors", project_summary.error_count).unwrap();
if project_summary.warning_count > 0 {
label.push_str(",");
}
}
if project_summary.warning_count > 0 {
write!(label, " {} warnings", project_summary.warning_count).unwrap();
}
}
output.sections.insert(
0,
SlashCommandOutputSection {
range: 0..output.text.len(),
icon: IconName::Warning,
label: label.into(),
metadata: None,
},
);
Ok(Some(output))
sections.push((
0..text.len(),
PlaceholderType::Root(project_summary, error_source),
));
Ok(Some((text, sections)))
})
}
pub fn collect_buffer_diagnostics(
output: &mut SlashCommandOutput,
pub fn buffer_has_error_diagnostics(snapshot: &BufferSnapshot) -> bool {
for (_, group) in snapshot.diagnostic_groups(None) {
let entry = &group.entries[group.primary_ix];
if entry.diagnostic.severity == DiagnosticSeverity::ERROR {
return true;
}
}
false
}
pub fn write_single_file_diagnostics(
output: &mut String,
path: Option<&Path>,
snapshot: &BufferSnapshot,
) -> bool {
if let Some(path) = path {
if buffer_has_error_diagnostics(&snapshot) {
output.push_str("/diagnostics ");
output.push_str(&path.to_string_lossy());
return true;
}
}
false
}
fn collect_buffer_diagnostics(
text: &mut String,
sections: &mut Vec<(Range<usize>, PlaceholderType)>,
snapshot: BufferSnapshot,
include_warnings: bool,
) {
for (_, group) in snapshot.diagnostic_groups(None) {
let entry = &group.entries[group.primary_ix];
collect_diagnostic(output, entry, &snapshot, include_warnings)
collect_diagnostic(text, sections, entry, &snapshot, include_warnings)
}
}
fn collect_diagnostic(
output: &mut SlashCommandOutput,
text: &mut String,
sections: &mut Vec<(Range<usize>, PlaceholderType)>,
entry: &DiagnosticEntry<Anchor>,
snapshot: &BufferSnapshot,
include_warnings: bool,
@@ -366,17 +428,17 @@ fn collect_diagnostic(
const EXCERPT_EXPANSION_SIZE: u32 = 2;
const MAX_MESSAGE_LENGTH: usize = 2000;
let (ty, icon) = match entry.diagnostic.severity {
let ty = match entry.diagnostic.severity {
DiagnosticSeverity::WARNING => {
if !include_warnings {
return;
}
("warning", IconName::Warning)
DiagnosticType::Warning
}
DiagnosticSeverity::ERROR => ("error", IconName::XCircle),
DiagnosticSeverity::ERROR => DiagnosticType::Error,
_ => return,
};
let prev_len = output.text.len();
let prev_len = text.len();
let range = entry.range.to_point(snapshot);
let diagnostic_row_number = range.start.row + 1;
@@ -386,11 +448,11 @@ fn collect_diagnostic(
let excerpt_range =
Point::new(start_row, 0).to_offset(&snapshot)..Point::new(end_row, 0).to_offset(&snapshot);
output.text.push_str("```");
text.push_str("```");
if let Some(language_name) = snapshot.language().map(|l| l.code_fence_block_name()) {
output.text.push_str(&language_name);
text.push_str(&language_name);
}
output.text.push('\n');
text.push('\n');
let mut buffer_text = String::new();
for chunk in snapshot.text_for_range(excerpt_range) {
@@ -399,26 +461,46 @@ fn collect_diagnostic(
for (i, line) in buffer_text.lines().enumerate() {
let line_number = start_row + i as u32 + 1;
writeln!(output.text, "{}", line).unwrap();
writeln!(text, "{}", line).unwrap();
if line_number == diagnostic_row_number {
output.text.push_str("//");
let prev_len = output.text.len();
write!(output.text, " {}: ", ty).unwrap();
let padding = output.text.len() - prev_len;
text.push_str("//");
let prev_len = text.len();
write!(text, " {}: ", ty.as_str()).unwrap();
let padding = text.len() - prev_len;
let message = util::truncate(&entry.diagnostic.message, MAX_MESSAGE_LENGTH)
.replace('\n', format!("\n//{:padding$}", "").as_str());
writeln!(output.text, "{message}").unwrap();
writeln!(text, "{message}").unwrap();
}
}
writeln!(output.text, "```").unwrap();
output.sections.push(SlashCommandOutputSection {
range: prev_len..output.text.len().saturating_sub(1),
icon,
label: entry.diagnostic.message.clone().into(),
metadata: None,
});
writeln!(text, "```").unwrap();
sections.push((
prev_len..text.len().saturating_sub(1),
PlaceholderType::Diagnostic(ty, entry.diagnostic.message.clone()),
))
}
#[derive(Clone)]
pub enum PlaceholderType {
Root(DiagnosticSummary, Option<String>),
File(String),
Diagnostic(DiagnosticType, String),
}
#[derive(Copy, Clone)]
pub enum DiagnosticType {
Warning,
Error,
}
impl DiagnosticType {
pub fn as_str(&self) -> &'static str {
match self {
DiagnosticType::Warning => "warning",
DiagnosticType::Error => "error",
}
}
}

View File

@@ -12,7 +12,7 @@ use indexed_docs::{
DocsDotRsProvider, IndexedDocsRegistry, IndexedDocsStore, LocalRustdocProvider, PackageName,
ProviderId,
};
use language::{BufferSnapshot, LspAdapterDelegate};
use language::LspAdapterDelegate;
use project::{Project, ProjectPath};
use ui::prelude::*;
use util::{maybe, ResultExt};
@@ -269,8 +269,6 @@ impl SlashCommand for DocsSlashCommand {
fn run(
self: Arc<Self>,
arguments: &[String],
_context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
_context_buffer: BufferSnapshot,
_workspace: WeakView<Workspace>,
_delegate: Option<Arc<dyn LspAdapterDelegate>>,
cx: &mut WindowContext,
@@ -351,7 +349,6 @@ impl SlashCommand for DocsSlashCommand {
range,
icon: IconName::FileDoc,
label: format!("docs ({provider}): {key}",).into(),
metadata: None,
})
.collect(),
run_commands_in_text: false,

Some files were not shown because too many files have changed in this diff Show More