Compare commits

...

14 Commits

Author SHA1 Message Date
Joseph T Lyons
738c61ed80 v0.150.x stable 2024-08-28 11:43:35 -04:00
Zed Bot
857fb0cb7b Bump to 0.150.4 for @osiewicz 2024-08-27 04:06:04 -07:00
Max Brunsfeld
8a42a3762a Fix git repository state corruption when work dir's metadata is updated (#16926)
Fixes https://github.com/zed-industries/zed/issues/13176

Release Notes:

- Fixed an issue where git state would stop updating if the root
directory of a git repository was updated in certain ways
2024-08-27 12:55:28 +02:00
Piotr Osiewicz
99d9477d91 gpui: Fix performance of app menu opening with large # of windows (#16939)
This is officially my weirdest performance fix to date; With large # of
windows opening app menu could take a lot of time (we're talking few
seconds with 9 windows, a minute with 10 windows). The fix is to make
one method pub(crate).. What?

<img width="981" alt="image"
src="https://github.com/user-attachments/assets/83b26154-0acd-43ef-84b3-4b85cde36120">

We were spending most of the time on clear_pending_keystrokes, which -
funnily enough - called itself recursively. It turned out we have two
methods; `AppContext::clear_pending_keystrokes` and
WindowContext::clear_pending_keystrokes. The former calls the latter,
but - due to the fact that `WindowContext::clear_pending_keystrokes` is
private and `WindowContext` derefs to `AppContext` - `AppContext` one
ended up actually calling itself! The fix is plain and simple - marking
WindowContext one as pub(crate), so that it gets picked up as a method
to call over `AppContext::clear_pending_keystrokes`.

Closes #16895



Release Notes:

- Fixed app menu performance slowdowns when there are multiple windows
open.
2024-08-27 12:54:55 +02:00
Kirill Bulatov
f0d5f16e83 Deduplicate /tab all buffers inserted (#16681)
Closes https://github.com/zed-industries/zed/issues/16678

Release Notes:

- Fixed `/tab all` inserting duplicate buffers
([!16678](https://github.com/zed-industries/zed/issues/16678))
2024-08-23 00:28:20 +03:00
Zed Bot
9dab1e7b72 Bump to 0.150.3 for @SomeoneToIgnore 2024-08-22 13:23:03 -07:00
Zed Bot
1d1ad3f4e1 Bump to 0.150.2 for @SomeoneToIgnore 2024-08-22 11:45:09 -07:00
gcp-cherry-pick-bot[bot]
06d3837c54 Pass through Anthropic cache configuration when using Zed provider (cherry-pick #16685) (#16688)
Cherry-picked Pass through Anthropic cache configuration when using Zed
provider (#16685)

This PR makes it so the model's cache configuration gets passed through
from the base model when using the Zed provider.

Release Notes:

- Fixed caching for Anthropic models when using the Zed provider.

Co-authored-by: Marshall Bowers <elliott.codes@gmail.com>
2024-08-22 14:37:53 -04:00
Zed Bot
70d825868a Bump to 0.150.1 for @SomeoneToIgnore 2024-08-22 09:11:51 -07:00
Kirill Bulatov
c3de6d858a Force Vue and Svelte language servers to be the first in the list for their languages (#16654)
Follow-up of https://github.com/zed-industries/zed/pull/15624

Fixes https://github.com/zed-industries/zed/issues/13769
Fixes https://github.com/zed-industries/zed/issues/16469

This way, those are considered "primary" and serve all LSP requests like
go to definition. Before, Tailwind language server was first and
returned nothing for all LSP requests.

- Fixed Vue and Svelte languages integrations not handling LSP requests
properly ([#13769](https://github.com/zed-industries/zed/issues/13769))
([#16469](https://github.com/zed-industries/zed/issues/16469))
2024-08-22 15:41:00 +03:00
gcp-cherry-pick-bot[bot]
94b0cdfca4 Fix a panic when diagnostics contain multiple links (cherry-pick #16601) (#16604)
Cherry-picked Fix a panic when diagnostics contain multiple links
(#16601)

Follow up from #14518

Release Notes:

- Fixed a panic when diagnostics contain multiple links

Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
2024-08-21 11:33:46 -06:00
Stanislav Alekseev
46825f2068 elixir: Make two more files required by lexical executable (#16382)
I still haven't fixed building dev extensions with rust managed by nix,
so I'd appreciate testing this for me

Release Notes:

- N/A
2024-08-19 19:01:28 -04:00
Bennet Bo Fenner
8b7f4316c7 assistant: Set default provider to zed.dev (#16454)
Do NOT merge until tomorrow

Release Notes:

- N/A

---------

Co-authored-by: Thorsten <thorsten@zed.dev>
2024-08-19 19:01:15 -04:00
Joseph T Lyons
ebc521cb07 v0.150.x preview 2024-08-19 18:40:07 -04:00
15 changed files with 284 additions and 61 deletions

2
Cargo.lock generated
View File

@@ -13839,7 +13839,7 @@ dependencies = [
[[package]]
name = "zed"
version = "0.150.0"
version = "0.150.4"
dependencies = [
"activity_indicator",
"anyhow",

View File

@@ -395,9 +395,9 @@
// The default model to use when creating new contexts.
"default_model": {
// The provider to use.
"provider": "openai",
"provider": "zed.dev",
// The model to use.
"model": "gpt-4o"
"model": "claude-3-5-sonnet"
}
},
// The settings for slash commands.
@@ -836,6 +836,7 @@
"language_servers": ["starpls", "!buck2-lsp", "..."]
},
"Svelte": {
"language_servers": ["svelte-language-server", "..."],
"prettier": {
"allowed": true,
"plugins": ["prettier-plugin-svelte"]
@@ -859,6 +860,7 @@
}
},
"Vue.js": {
"language_servers": ["vue-language-server", "..."],
"prettier": {
"allowed": true
}

View File

@@ -543,8 +543,8 @@ mod tests {
assert_eq!(
AssistantSettings::get_global(cx).default_model,
LanguageModelSelection {
provider: "openai".into(),
model: "gpt-4o".into(),
provider: "zed.dev".into(),
model: "claude-3-5-sonnet".into(),
}
);
});

View File

@@ -197,6 +197,7 @@ fn tab_items_for_queries(
}
let mut timestamps_by_entity_id = HashMap::default();
let mut visited_buffers = HashSet::default();
let mut open_buffers = Vec::new();
for pane in workspace.panes() {
@@ -211,9 +212,11 @@ fn tab_items_for_queries(
if let Some(timestamp) =
timestamps_by_entity_id.get(&editor.entity_id())
{
let snapshot = buffer.read(cx).snapshot();
let full_path = snapshot.resolve_file_path(cx, true);
open_buffers.push((full_path, snapshot, *timestamp));
if visited_buffers.insert(buffer.read(cx).remote_id()) {
let snapshot = buffer.read(cx).snapshot();
let full_path = snapshot.resolve_file_path(cx, true);
open_buffers.push((full_path, snapshot, *timestamp));
}
}
}
}

View File

@@ -9090,6 +9090,43 @@ async fn go_to_prev_overlapping_diagnostic(
"});
}
#[gpui::test]
async fn test_diagnostics_with_links(cx: &mut TestAppContext) {
init_test(cx, |_| {});
let mut cx = EditorTestContext::new(cx).await;
cx.set_state(indoc! {"
fn func(abˇc def: i32) -> u32 {
}
"});
let project = cx.update_editor(|editor, _| editor.project.clone().unwrap());
cx.update(|cx| {
project.update(cx, |project, cx| {
project.update_diagnostics(
LanguageServerId(0),
lsp::PublishDiagnosticsParams {
uri: lsp::Url::from_file_path("/root/file").unwrap(),
version: None,
diagnostics: vec![lsp::Diagnostic {
range: lsp::Range::new(lsp::Position::new(0, 8), lsp::Position::new(0, 12)),
severity: Some(lsp::DiagnosticSeverity::ERROR),
message: "we've had problems with <https://link.one>, and <https://link.two> is broken".to_string(),
..Default::default()
}],
},
&[],
cx,
)
})
}).unwrap();
cx.run_until_parked();
cx.update_editor(|editor, cx| hover_popover::hover(editor, &Default::default(), cx));
cx.run_until_parked();
cx.update_editor(|editor, _| assert!(editor.hover_state.diagnostic_popover.is_some()))
}
#[gpui::test]
async fn go_to_hunk(executor: BackgroundExecutor, cx: &mut gpui::TestAppContext) {
init_test(cx, |_| {});

View File

@@ -826,6 +826,35 @@ impl FakeFs {
state.next_mtime = next_mtime;
}
pub async fn touch_path(&self, path: impl AsRef<Path>) {
let mut state = self.state.lock();
let path = path.as_ref();
let new_mtime = state.next_mtime;
let new_inode = state.next_inode;
state.next_inode += 1;
state.next_mtime += Duration::from_nanos(1);
state
.write_path(path, move |entry| {
match entry {
btree_map::Entry::Vacant(e) => {
e.insert(Arc::new(Mutex::new(FakeFsEntry::File {
inode: new_inode,
mtime: new_mtime,
content: Vec::new(),
})));
}
btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut().lock() {
FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
FakeFsEntry::Symlink { .. } => {}
},
}
Ok(())
})
.unwrap();
state.emit_event([path.to_path_buf()]);
}
pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
self.write_file_internal(path, content).unwrap()
}

View File

@@ -3389,7 +3389,7 @@ impl<'a> WindowContext<'a> {
self.window.pending_input.is_some()
}
fn clear_pending_keystrokes(&mut self) {
pub(crate) fn clear_pending_keystrokes(&mut self) {
self.window.pending_input.take();
}

View File

@@ -159,6 +159,24 @@ pub struct CachedLspAdapter {
cached_binary: futures::lock::Mutex<Option<LanguageServerBinary>>,
}
impl Debug for CachedLspAdapter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CachedLspAdapter")
.field("name", &self.name)
.field(
"disk_based_diagnostic_sources",
&self.disk_based_diagnostic_sources,
)
.field(
"disk_based_diagnostics_progress_token",
&self.disk_based_diagnostics_progress_token,
)
.field("language_ids", &self.language_ids)
.field("reinstall_attempt_count", &self.reinstall_attempt_count)
.finish_non_exhaustive()
}
}
impl CachedLspAdapter {
pub fn new(adapter: Arc<dyn LspAdapter>) -> Arc<Self> {
let name = adapter.name();

View File

@@ -444,6 +444,21 @@ impl LanguageModel for CloudLanguageModel {
self.model.max_token_count()
}
fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
match &self.model {
CloudModel::Anthropic(model) => {
model
.cache_configuration()
.map(|cache| LanguageModelCacheConfiguration {
max_cache_anchors: cache.max_cache_anchors,
should_speculate: cache.should_speculate,
min_total_token: cache.min_total_token,
})
}
CloudModel::OpenAi(_) | CloudModel::Google(_) | CloudModel::Zed(_) => None,
}
}
fn count_tokens(
&self,
request: LanguageModelRequest,

View File

@@ -96,8 +96,8 @@ pub fn parse_links_only(text: &str) -> Vec<(Range<usize>, MarkdownEvent)> {
start: 0,
end: text.len(),
};
for link in finder.links(&text[text_range.clone()]) {
let link_range = text_range.start + link.start()..text_range.start + link.end();
for link in finder.links(&text) {
let link_range = link.start()..link.end();
if link_range.start > text_range.start {
events.push((text_range.start..link_range.start, MarkdownEvent::Text));
@@ -118,7 +118,9 @@ pub fn parse_links_only(text: &str) -> Vec<(Range<usize>, MarkdownEvent)> {
text_range.start = link_range.end;
}
events.push((text_range, MarkdownEvent::Text));
if text_range.end > text_range.start {
events.push((text_range, MarkdownEvent::Text));
}
events
}

View File

@@ -41,7 +41,8 @@ use settings::{Settings, SettingsLocation, SettingsStore};
use smol::channel::{self, Sender};
use std::{
any::Any,
cmp::{self, Ordering},
cmp::Ordering,
collections::hash_map,
convert::TryFrom,
ffi::OsStr,
fmt,
@@ -299,7 +300,7 @@ struct BackgroundScannerState {
/// as part of the current update. These entry ids may be re-used
/// if the same inode is discovered at a new path, or if the given
/// path is re-created after being deleted.
removed_entry_ids: HashMap<(u64, SystemTime), ProjectEntryId>,
removed_entries: HashMap<u64, Entry>,
changed_paths: Vec<Arc<Path>>,
prev_snapshot: Snapshot,
}
@@ -1001,7 +1002,7 @@ impl LocalWorktree {
scanned_dirs: Default::default(),
path_prefixes_to_scan: Default::default(),
paths_to_scan: Default::default(),
removed_entry_ids: Default::default(),
removed_entries: Default::default(),
changed_paths: Default::default(),
}),
phase: BackgroundScannerPhase::InitialScan,
@@ -2615,6 +2616,26 @@ impl LocalSnapshot {
}
}
#[cfg(test)]
fn check_git_invariants(&self) {
let dotgit_paths = self
.git_repositories
.iter()
.map(|repo| repo.1.git_dir_path.clone())
.collect::<HashSet<_>>();
let work_dir_paths = self
.repository_entries
.iter()
.map(|repo| repo.0.clone().0)
.collect::<HashSet<_>>();
assert_eq!(dotgit_paths.len(), work_dir_paths.len());
assert_eq!(self.repository_entries.iter().count(), work_dir_paths.len());
assert_eq!(self.git_repositories.iter().count(), work_dir_paths.len());
for (_, entry) in self.repository_entries.iter() {
self.git_repositories.get(&entry.work_directory).unwrap();
}
}
#[cfg(test)]
pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
let mut paths = Vec::new();
@@ -2683,8 +2704,17 @@ impl BackgroundScannerState {
fn reuse_entry_id(&mut self, entry: &mut Entry) {
if let Some(mtime) = entry.mtime {
if let Some(removed_entry_id) = self.removed_entry_ids.remove(&(entry.inode, mtime)) {
entry.id = removed_entry_id;
// If an entry with the same inode was removed from the worktree during this scan,
// then it *might* represent the same file or directory. But the OS might also have
// re-used the inode for a completely different file or directory.
//
// Conditionally reuse the old entry's id:
// * if the mtime is the same, the file was probably been renamed.
// * if the path is the same, the file may just have been updated
if let Some(removed_entry) = self.removed_entries.remove(&entry.inode) {
if removed_entry.mtime == Some(mtime) || removed_entry.path == entry.path {
entry.id = removed_entry.id;
}
} else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
entry.id = existing_entry.id;
}
@@ -2776,30 +2806,47 @@ impl BackgroundScannerState {
}
self.snapshot.entries_by_path = new_entries;
let mut entries_by_id_edits = Vec::new();
let mut removed_ids = Vec::with_capacity(removed_entries.summary().count);
for entry in removed_entries.cursor::<()>() {
if let Some(mtime) = entry.mtime {
let removed_entry_id = self
.removed_entry_ids
.entry((entry.inode, mtime))
.or_insert(entry.id);
*removed_entry_id = cmp::max(*removed_entry_id, entry.id);
match self.removed_entries.entry(entry.inode) {
hash_map::Entry::Occupied(mut e) => {
let prev_removed_entry = e.get_mut();
if entry.id > prev_removed_entry.id {
*prev_removed_entry = entry.clone();
}
}
hash_map::Entry::Vacant(e) => {
e.insert(entry.clone());
}
}
entries_by_id_edits.push(Edit::Remove(entry.id));
}
self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
if path.file_name() == Some(&GITIGNORE) {
let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
if let Some((_, needs_update)) = self
.snapshot
.ignores_by_parent_abs_path
.get_mut(abs_parent_path.as_path())
{
*needs_update = true;
if entry.path.file_name() == Some(&GITIGNORE) {
let abs_parent_path = self.snapshot.abs_path.join(entry.path.parent().unwrap());
if let Some((_, needs_update)) = self
.snapshot
.ignores_by_parent_abs_path
.get_mut(abs_parent_path.as_path())
{
*needs_update = true;
}
}
if let Err(ix) = removed_ids.binary_search(&entry.id) {
removed_ids.insert(ix, entry.id);
}
}
self.snapshot.entries_by_id.edit(
removed_ids.iter().map(|&id| Edit::Remove(id)).collect(),
&(),
);
self.snapshot
.git_repositories
.retain(|id, _| removed_ids.binary_search(&id).is_err());
self.snapshot
.repository_entries
.retain(|repo_path, _| !repo_path.0.starts_with(path));
#[cfg(test)]
self.snapshot.check_invariants(false);
}
@@ -3699,11 +3746,14 @@ impl BackgroundScanner {
{
let mut state = self.state.lock();
state.snapshot.completed_scan_id = state.snapshot.scan_id;
for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
state.scanned_dirs.remove(&entry_id);
for (_, entry) in mem::take(&mut state.removed_entries) {
state.scanned_dirs.remove(&entry.id);
}
}
#[cfg(test)]
self.state.lock().snapshot.check_git_invariants();
self.send_status_update(false, None);
}
@@ -4116,7 +4166,6 @@ impl BackgroundScanner {
let is_dir = fs_entry.is_dir();
fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
fs_entry.is_external = is_external;
fs_entry.is_private = self.is_path_private(path);
@@ -4145,7 +4194,6 @@ impl BackgroundScanner {
self.remove_repo_path(path, &mut state.snapshot);
}
Err(err) => {
// TODO - create a special 'error' entry in the entries tree to mark this
log::error!("error reading file {abs_path:?} on event: {err:#}");
}
}
@@ -4175,9 +4223,6 @@ impl BackgroundScanner {
}
}
// TODO statuses
// Track when a .git is removed and iterate over the file system there
Some(())
}

View File

@@ -1212,6 +1212,76 @@ async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
);
}
#[gpui::test]
async fn test_bump_mtime_of_git_repo_workdir(cx: &mut TestAppContext) {
init_test(cx);
// Create a worktree with a git directory.
let fs = FakeFs::new(cx.background_executor.clone());
fs.insert_tree(
"/root",
json!({
".git": {},
"a.txt": "",
"b": {
"c.txt": "",
},
}),
)
.await;
let tree = Worktree::local(
"/root".as_ref(),
true,
fs.clone(),
Default::default(),
&mut cx.to_async(),
)
.await
.unwrap();
cx.executor().run_until_parked();
let (old_entry_ids, old_mtimes) = tree.read_with(cx, |tree, _| {
(
tree.entries(true, 0).map(|e| e.id).collect::<Vec<_>>(),
tree.entries(true, 0).map(|e| e.mtime).collect::<Vec<_>>(),
)
});
// Regression test: after the directory is scanned, touch the git repo's
// working directory, bumping its mtime. That directory keeps its project
// entry id after the directories are re-scanned.
fs.touch_path("/root").await;
cx.executor().run_until_parked();
let (new_entry_ids, new_mtimes) = tree.read_with(cx, |tree, _| {
(
tree.entries(true, 0).map(|e| e.id).collect::<Vec<_>>(),
tree.entries(true, 0).map(|e| e.mtime).collect::<Vec<_>>(),
)
});
assert_eq!(new_entry_ids, old_entry_ids);
assert_ne!(new_mtimes, old_mtimes);
// Regression test: changes to the git repository should still be
// detected.
fs.set_status_for_repo_via_git_operation(
&Path::new("/root/.git"),
&[(Path::new("b/c.txt"), GitFileStatus::Modified)],
);
cx.executor().run_until_parked();
let snapshot = tree.read_with(cx, |tree, _| tree.snapshot());
check_propagated_statuses(
&snapshot,
&[
(Path::new(""), Some(GitFileStatus::Modified)),
(Path::new("a.txt"), None),
(Path::new("b/c.txt"), Some(GitFileStatus::Modified)),
],
);
}
#[gpui::test]
async fn test_create_dir_all_on_create_entry(cx: &mut TestAppContext) {
init_test(cx);
@@ -2409,25 +2479,25 @@ async fn test_propagate_git_statuses(cx: &mut TestAppContext) {
(Path::new("f/no-status.txt"), None),
],
);
}
#[track_caller]
fn check_propagated_statuses(
snapshot: &Snapshot,
expected_statuses: &[(&Path, Option<GitFileStatus>)],
) {
let mut entries = expected_statuses
#[track_caller]
fn check_propagated_statuses(
snapshot: &Snapshot,
expected_statuses: &[(&Path, Option<GitFileStatus>)],
) {
let mut entries = expected_statuses
.iter()
.map(|(path, _)| snapshot.entry_for_path(path).unwrap().clone())
.collect::<Vec<_>>();
snapshot.propagate_git_statuses(&mut entries);
assert_eq!(
entries
.iter()
.map(|(path, _)| snapshot.entry_for_path(path).unwrap().clone())
.collect::<Vec<_>>();
snapshot.propagate_git_statuses(&mut entries);
assert_eq!(
entries
.iter()
.map(|e| (e.path.as_ref(), e.git_status))
.collect::<Vec<_>>(),
expected_statuses
);
}
.map(|e| (e.path.as_ref(), e.git_status))
.collect::<Vec<_>>(),
expected_statuses
);
}
#[track_caller]

View File

@@ -2,7 +2,7 @@
description = "The fast, collaborative code editor."
edition = "2021"
name = "zed"
version = "0.150.0"
version = "0.150.4"
publish = false
license = "GPL-3.0-or-later"
authors = ["Zed Team <hi@zed.dev>"]

View File

@@ -1 +1 @@
dev
stable

View File

@@ -95,6 +95,8 @@ impl Lexical {
.map_err(|e| format!("failed to download file: {e}"))?;
zed::make_file_executable(&binary_path)?;
zed::make_file_executable(&format!("{version_dir}/lexical/bin/debug_shell.sh"))?;
zed::make_file_executable(&format!("{version_dir}/lexical/priv/port_wrapper.sh"))?;
let entries =
fs::read_dir(".").map_err(|e| format!("failed to list working directory {e}"))?;