Compare commits

..

885 Commits

Author SHA1 Message Date
Mikayla Maki
ab97a75156 zed 0.72.3 2023-02-08 16:15:14 -08:00
Mikayla Maki
58b819e9a4 Merge pull request #2134 from zed-industries/fix-action-keystroke-bugs
Fix several action dispatching bugs
2023-02-08 16:14:21 -08:00
Joseph Lyons
f753a83eb1 v0.72.x stable 2023-02-08 14:55:09 -05:00
Antonio Scandurra
f877d26c3a Merge pull request #2145 from zed-industries/fix-focus-in-project-search
Focus results editor only when starting a new project search
2023-02-08 13:19:56 -05:00
Max Brunsfeld
37ad025ccd Merge pull request #2140 from zed-industries/feedback/929-project-search-crashes
Feedback/929 project search crashes
2023-02-07 10:12:34 -08:00
Max Brunsfeld
e8ebd6d7a2 zed 0.72.2 2023-02-07 09:25:30 -08:00
Kay Simmons
bcc38a60bf Merge pull request #2136 from zed-industries/fix-recent-projects-panic
Fix Recent Project Panic
2023-02-07 09:24:59 -08:00
Max Brunsfeld
8944ca9236 Merge pull request #2135 from zed-industries/handle-window-moved-crash
Fix crash when unplugging display containing a zed window
2023-02-07 09:24:49 -08:00
Joseph Lyons
6ae297832f zed 0.72.1 2023-02-06 14:24:41 -05:00
Joseph T. Lyons
a7f788746c Merge pull request #2125 from zed-industries/trim-leading-and-trailing-whitespace-in-feedback
Trim leading and trailing whitespace in feedback
2023-02-06 14:19:33 -05:00
Kay Simmons
7ff55087f3 Merge pull request #2124 from zed-industries/fix-display-uuid-panic
Make display uuid optional if the display is disconnected
2023-02-06 14:15:58 -05:00
Joseph Lyons
41102751ef Reduce length of feedback placeholder text 2023-02-03 08:35:55 -05:00
Joseph T. Lyons
a25580fb7f Merge pull request #2127 from zed-industries/fix-discourse-release
Fix discourse release
2023-02-02 14:41:59 -05:00
Joseph Lyons
bc7bccf5f5 v0.72.x preview 2023-02-01 13:44:50 -05:00
Mikayla Maki
a89cc22af4 Merge pull request #2113 from zed-industries/terminal-lost-cwd
Fix lost terminal working directories
2023-01-30 14:43:43 -08:00
Mikayla Maki
e682e2dd72 Changed SQLez migrations to be executed eagerly
Added fix for terminal working directory's sometimes getting lost
co-authored-by: Kay <kay@zed.dev>
2023-01-30 14:38:48 -08:00
Joseph T. Lyons
65641b1d3e Merge pull request #2112 from zed-industries/fix-version-for-feedback-related-commands
Fix version for feedback-related commands
2023-01-30 14:43:33 -05:00
Joseph Lyons
248161aa63 Fix version for feedback-related commands
Co-Authored-By: Max Brunsfeld <maxbrunsfeld@gmail.com>
2023-01-30 14:13:25 -05:00
Mikayla Maki
d9278f7416 Merge pull request #2066 from zed-industries/remove-staff-mode
Small patches
2023-01-27 15:50:00 -08:00
Mikayla Maki
57781fd7aa Move StaffMode declaration out of paths 2023-01-27 15:45:33 -08:00
Mikayla Maki
2d889f59bf Rewrite license documentation to be more clear 2023-01-27 15:44:19 -08:00
Mikayla Maki
2802e3a1c6 Fixed failling tests 2023-01-27 15:44:17 -08:00
Mikayla Maki
ea39983f78 Removed old experiments settings and staff mode flag, added new StaffMode global that is set based on the webserver's staff bit 2023-01-27 15:43:12 -08:00
Mikayla Maki
ca2e0256e1 Renamed open recent action to match menu 2023-01-27 15:38:48 -08:00
Mikayla Maki
070b89243f Merge pull request #2107 from zed-industries/fix-ci
Add an install step to the CI build script
2023-01-27 15:38:12 -08:00
Mikayla Maki
e530406d62 Add an install step to the CI build script 2023-01-27 15:24:21 -08:00
Kay Simmons
ea0dd8972f Merge pull request #2090 from zed-industries/workspace-window-position-persistence
Workspace window position persistence
2023-01-27 15:24:01 -08:00
Kay Simmons
a1308d20ce Merge pull request #2105 from zed-industries/fix-focus-stealing-when-collaborating
Limit focus grabbing in followed pane
2023-01-27 15:23:43 -08:00
Kay Simmons
486b3f64d1 Merge pull request #2106 from zed-industries/fix-local-integration-test-failure
fix local failing test
2023-01-27 15:23:23 -08:00
Kay Simmons
0f93386071 Add run until parked to test_fs_operations to ensure both update chunks are completed before asserting the changes 2023-01-27 15:07:51 -08:00
Kay Simmons
77a4f907a0 removed invalid focus assertion 2023-01-27 13:43:36 -08:00
Kay Simmons
d6acea525d add test for is_child_focused 2023-01-27 13:00:26 -08:00
Kay Simmons
89a5506f43 Add function which checks if a child of a view is focused and use that to only focus item updates from the leader when that the active item was focused 2023-01-27 12:39:32 -08:00
Antonio Scandurra
5431488a9a collab 0.5.4 2023-01-27 11:07:12 +01:00
Antonio Scandurra
ac7618da17 Merge pull request #2103 from zed-industries/connection-staleness
Fix connection staleness issues
2023-01-27 11:01:24 +01:00
Antonio Scandurra
647d9861b1 Abort collaboration process if any thread panics 2023-01-27 09:50:59 +01:00
Mikayla Maki
d7ac15fa71 Merge pull request #2101 from zed-industries/theme-licenses
Added build-licenses command to style tree
2023-01-26 18:29:20 -08:00
Mikayla Maki
3a1d533c01 Combine both license generations into one file 2023-01-26 18:25:28 -08:00
Mikayla Maki
c44acaefff Added build-licenses command to style tree 2023-01-26 17:33:54 -08:00
Kay Simmons
1593b1e13d window position restoration working 2023-01-26 16:35:00 -08:00
Max Brunsfeld
fabcdb909a Merge pull request #2100 from zed-industries/visible-worktrees-in-collab-ui
Omit hidden worktrees when showing projects in collaboration UI
2023-01-26 15:01:10 -08:00
Max Brunsfeld
f99e4043c4 Run CI for version branches but not all branches starting with 'v' 2023-01-26 14:57:24 -08:00
Max Brunsfeld
1b45911857 Omit hidden worktrees when showing projects in collaboration UI 2023-01-26 14:47:37 -08:00
Max Brunsfeld
4918ad5789 Merge pull request #2099 from zed-industries/empty-go-to-def-multibuffer
Avoid opening a definitions tab if there are no definitions found
2023-01-26 10:35:35 -08:00
Max Brunsfeld
9f86748aff Avoid opening a definitions tab if there are no definitions found 2023-01-26 10:30:01 -08:00
Petros Amoiridis
489be5e77b Merge pull request #2077 from zed-industries/2064-remove-contacts
Remove contact from contact list
2023-01-26 20:04:15 +02:00
Max Brunsfeld
b396e153d1 Merge pull request #2098 from zed-industries/help-menu-licenses
Add 'view dependency licenses' item to Help appication menu
2023-01-26 09:56:44 -08:00
Max Brunsfeld
1c572fd86e Add 'view dependency licenses' item to Help appication menu 2023-01-26 09:53:46 -08:00
Petros Amoiridis
73af155dd6 Refactor Database::remove_contact
Refactor it to avoid sending irrelevant messages to update the UI.

Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-26 19:01:51 +02:00
Antonio Scandurra
eca6115e4b Ensure proto::UpdateWorktree::removed_entries doesn't exceed chunk size
This was causing the database to panic because we were trying to remove too
many entries at once.
2023-01-26 17:26:31 +01:00
Antonio Scandurra
74aeec360d Cancel pending call when participant leaves room after a reconnection
Previously, if a user temporarily disconnected while there was a pending
call, we would fail to cancel such pending call when the caller left the
room. This was due to the caller reconnecting and having a different connection
id than the one originally used to initiate the call.
2023-01-26 16:44:55 +01:00
Petros Amoiridis
2f26fcd889 Merge branch 'main' into 2064-remove-contacts 2023-01-26 16:34:17 +02:00
Joseph T. Lyons
a4d9d6c750 Merge pull request #2095 from zed-industries/fix-crash-when-opening-feedback-while-in-call
Fix crash when opening feedback while in call
2023-01-25 21:16:12 -05:00
Max Brunsfeld
a2a3ebc42f Merge pull request #2096 from zed-industries/lazy-load-languages
Load languages lazily in the background
2023-01-25 18:09:45 -08:00
Max Brunsfeld
ddf4e1a316 Load languages lazily in the background 2023-01-25 17:47:46 -08:00
Kay Simmons
a369fb8033 better but still broken 2023-01-25 17:05:57 -08:00
Joseph Lyons
9ff34bcb6a Remove no-longer-needed method 2023-01-25 20:03:44 -05:00
Julia
10f130ee30 Merge pull request #2094 from zed-industries/project-lost-window-close-action-shortcut-accessibility
Add "Close Window" global action which does not need a focused workspace
2023-01-25 18:58:22 -05:00
Julia
3819a67185 Add "Close Window" global action which does not need a focused workspace 2023-01-25 18:51:25 -05:00
Joseph Lyons
6e7101ca6b Fix crash when opening feedback while in call 2023-01-25 17:48:01 -05:00
Julia
2df2d09e3c Merge pull request #2091 from zed-industries/style
Style
2023-01-25 15:22:52 -05:00
Joseph Lyons
4c3244b982 v0.72.x dev 2023-01-25 15:20:41 -05:00
Julia
a79b4e312b Style
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2023-01-25 15:09:57 -05:00
Kay Simmons
5eac797a93 mostly working now 2023-01-25 11:36:38 -08:00
Kay Simmons
a581d0c5b8 wip 2023-01-25 11:32:19 -08:00
Kay Simmons
15799f7af6 wip 2023-01-25 11:32:19 -08:00
Joseph T. Lyons
81ed961659 Merge pull request #2088 from zed-industries/add-cursor-position-to-feedback-editor
Add cursor position to feedback editor
2023-01-25 14:29:24 -05:00
Max Brunsfeld
9db55b3029 Merge pull request #2087 from zed-industries/buffer-language-registry
Assign the language registry to all buffers in the project
2023-01-25 11:25:40 -08:00
Joseph Lyons
328b779185 Clean up construction of FeedbackEditor 2023-01-25 14:20:58 -05:00
Joseph Lyons
7f3d937938 Count chars 2023-01-25 14:20:40 -05:00
Joseph Lyons
f68f9f37ab Add cursor position to feedback editor
Co-Authored-By: Mikayla Maki <mikayla.c.maki@gmail.com>
Co-Authored-By: Max Brunsfeld <maxbrunsfeld@gmail.com>
2023-01-25 14:20:23 -05:00
Julia
c22d13286d Merge pull request #2085 from zed-industries/cleanup-debug-printing
Clean up some debug printing
2023-01-25 14:18:43 -05:00
Joseph Lyons
44c7f162b6 Merge branch 'main' into add-cursor-position-to-feedback-editor 2023-01-25 14:01:45 -05:00
Max Brunsfeld
7003a475a7 Assign the language registry to all buffers in the project 2023-01-25 10:44:15 -08:00
Julia
3d8dbee76a Clean up some debug printing 2023-01-25 13:37:04 -05:00
Petros Amoiridis
160870c9de Improve user notification
The message is not really true. When one declines, the other person can notice that the contact request  is not pending any more. They will know. Switching to not alerted is closer to what is really happening.
2023-01-25 19:46:51 +02:00
Mikayla Maki
ba6ffd8256 Merge pull request #2081 from zed-industries/fix-failing-ci
Fixes a broken conditional that is only caught on darwin systems
2023-01-25 09:45:30 -08:00
Mikayla Maki
ecb7d1072f Fixes a broken conditional that is only caught on darwin systems 2023-01-25 09:33:07 -08:00
Mikayla Maki
38b83a70aa Merge pull request #2078 from zed-industries/fix-cursor-style
Fix cursor style thrashing on overlapping windows
2023-01-25 09:15:55 -08:00
Mikayla Maki
1fc6276eab Remove debug wiring 2023-01-25 09:10:51 -08:00
Mikayla Maki
45e4e3354e Changed the presenter to only send 'set_cursor_style' on the topmost window
co-authored-by: Antonio <antonio@zed.dev>
2023-01-25 09:10:35 -08:00
Mikayla Maki
27a80a1c94 WIP 2023-01-25 09:10:35 -08:00
Mikayla Maki
426aeb7c5e WIP - adds platform APIs for checking the top most window 2023-01-25 09:10:35 -08:00
Petros Amoiridis
35524db136 Add a confirmation prompt
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-25 18:55:08 +02:00
Petros Amoiridis
e928c1c61e Test removing a contact
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-25 17:31:42 +02:00
Petros Amoiridis
5d4eb2b7ae Push responder and requester to remove_contacts
When we ask the server to remove a contact we need to push the requester and responder ids to `remove_contacts` so that when the UI updates, the correct contacts will disappear from the list.

Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-25 13:10:29 +02:00
Petros Amoiridis
db978fcb6c Add an x mark icon to the list of contacts
We want to be able to remove contacts from our list. This was not possible. This change add an icon and dispatches the RemoveContact action.

Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-25 13:10:29 +02:00
Joseph Lyons
3329b2bbd6 Remove gpui:: prefix from parameters 2023-01-24 19:46:04 -05:00
Joseph T. Lyons
a66a0cfd70 Merge pull request #2075 from zed-industries/add-upper-character-count-limit
Add upper character count limit
2023-01-24 19:44:19 -05:00
Julia
27ee994e17 Merge pull request #2074 from zed-industries/decode-openurl-to-pathbuf
Decode URL from `openURLs` to handle percent encoded paths
2023-01-24 19:08:17 -05:00
Julia
0414723a54 Decode URL from openURLs to handle percent encoded paths
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2023-01-24 18:48:15 -05:00
Joseph Lyons
588419492a Add upper character count limit 2023-01-24 17:38:20 -05:00
Max Brunsfeld
52296836fe Merge pull request #2069 from zed-industries/markdown-fenced-blocks
Support syntax highlighting in Markdown fenced code blocks
2023-01-24 14:19:36 -08:00
Max Brunsfeld
678ee26c5e Merge branch 'main' into markdown-fenced-blocks 2023-01-24 14:13:50 -08:00
Julia
29d67452e0 Merge pull request #2072 from zed-industries/os-file-associations
Insert macOS file association metadata during bundle process
2023-01-24 17:09:41 -05:00
Max Brunsfeld
51984f0d39 Fix feedback editor compile error due to LanguageRegistry API change 2023-01-24 14:09:24 -08:00
Julia
4d73d4b1b9 Insert macOS file association metadata during bundle process 2023-01-24 17:07:02 -05:00
Nathan Sobo
e8cea130a4 Merge pull request #2068 from zed-industries/doc-reparse
Document Buffer::reparse
2023-01-24 09:09:38 -07:00
Antonio Scandurra
dff08d3cfe Merge branch 'main' into markdown-fenced-blocks 2023-01-24 15:43:35 +01:00
Antonio Scandurra
c48e3f3d05 Reparse unknown injection ranges in buffer when adding a new language 2023-01-24 15:29:59 +01:00
Antonio Scandurra
f3509824e8 WIP: Start on SyntaxMapSnapshot::unknown_injection_languages 2023-01-24 12:55:49 +01:00
Antonio Scandurra
14c72cac58 Store syntax layers even if a language for the injection can't be found 2023-01-24 12:25:12 +01:00
Joseph T. Lyons
f95bda64ba Merge pull request #2009 from zed-industries/in-app-feedback
In app feedback
2023-01-24 01:05:05 -05:00
Nathan Sobo
96ffe84edb Document Buffer::reparse 2023-01-23 21:51:10 -07:00
Joseph Lyons
2b3d09f70a Fix CI missing license check 2023-01-23 18:34:10 -05:00
Joseph Lyons
8e8f66a5e1 Merge branch 'main' into in-app-feedback 2023-01-23 18:24:12 -05:00
Joseph Lyons
c9299a49e1 Clean out unused code 2023-01-23 18:19:10 -05:00
Mikayla Maki
9f048a4b1c Merge pull request #2044 from zed-industries/licensing-scripts
Licensing scripts
2023-01-23 12:58:27 -08:00
Mikayla Maki
0f0d5d5726 Added cargo-about auto-install and CI steps 2023-01-23 12:51:32 -08:00
Mikayla Maki
d060114f00 Added complete scripts for generating third party license files 2023-01-23 12:47:12 -08:00
Mikayla Maki
9d58032064 Add action to open licenses file 2023-01-23 12:45:18 -08:00
Mikayla Maki
4609be20de WIP: Adding license compliance to CI 2023-01-23 12:43:42 -08:00
Mikayla Maki
4d05d61ed7 Merge pull request #2049 from zed-industries/425-create-file-for-cli
Create files passed as args to CLI
2023-01-23 10:44:55 -08:00
Antonio Scandurra
8dabdd1baa Ensure injection layer is recomputed when language changes
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2023-01-23 19:02:06 +01:00
Julia
4678f6e0a5 Merge pull request #2063 from zed-industries/active-tab-close-icon-pointing-hand
Avoid stomping on tab close icon's cursor style
2023-01-23 11:48:35 -05:00
Julia
95b259b841 Avoid stomping on tab close icon's cursor style 2023-01-23 11:43:50 -05:00
Antonio Scandurra
79cf6fb8b6 WIP: Add test for dynamic language injection 2023-01-23 09:45:36 +01:00
Antonio Scandurra
cb610f37f2 WIP: Search language injections also by file extension
There are still a few things left:

1. Add test to verify we can successfully locate a language by its extension
2. Add test to reproduce bug where changing the fenced code block language
   won't reparse the block with the new language
3. Reparse injections for which we couldn't find a language when the language
   registry changes.
4. Check why the markdown grammar considers the trailing triple backtick as
   `(code_block_content)`, as opposed to being part of the outer markdown.
2023-01-23 08:56:41 +01:00
Antonio Scandurra
36e4dcef16 Avoid allocating a string to compare language names 2023-01-23 08:56:41 +01:00
Antonio Scandurra
c49dc8d6e5 Rename LanguageRegistry::get_language to language_for_name 2023-01-23 08:56:41 +01:00
Antonio Scandurra
f086fa3f21 Add syntax injections for Markdown fenced code blocks 2023-01-23 08:56:41 +01:00
Joseph Lyons
c118f9aabd Fix new errors after merge 2023-01-23 01:31:02 -05:00
Joseph Lyons
f2a5a4d0fd Merge branch 'main' into in-app-feedback 2023-01-23 01:20:10 -05:00
Joseph Lyons
fb2278dc6d Complete first iteration of in-app feedback 2023-01-23 00:59:46 -05:00
Mikayla Maki
50d37e1ae7 Merge pull request #2060 from zed-industries/fix-ci-fail
Fix mismatched return types on CI
2023-01-20 18:28:59 -08:00
Mikayla Maki
8dcaa81aad switch return type of accepts_first_mouse 2023-01-20 18:19:24 -08:00
Max Brunsfeld
e1a58e9381 Merge pull request #2059 from zed-industries/no-indent-adjustment-on-error
Avoid adjusting indentation of lines inside of newly-created errors
2023-01-20 17:13:30 -08:00
Max Brunsfeld
56080771e6 Add test for avoiding indent adjustment inside newly-created errors 2023-01-20 17:02:38 -08:00
Mikayla Maki
bb24f1142f Removed dbg 2023-01-20 16:47:23 -08:00
Mikayla Maki
94b2f8e07f Merge pull request #2054 from zed-industries/notification-mouse-events
Notification mouse events
2023-01-20 16:41:27 -08:00
Mikayla Maki
310d867aab Switch PopUp windows to use the NSTrackingArea API and add support for the mouseExited event
Co-authored-by: Antonio <antonio@zed.dev>
2023-01-20 16:35:25 -08:00
Max Brunsfeld
9f74d6e4ac Highlight and auto-indent await expressions in rust 2023-01-20 15:56:56 -08:00
Max Brunsfeld
f7ceebfce3 Avoid adjusting indentation of lines inside newly-created errors 2023-01-20 15:56:45 -08:00
Joseph Lyons
083986dfae WIP
Co-Authored-By: Mikayla Maki <mikayla.c.maki@gmail.com>
2023-01-20 18:05:24 -05:00
Max Brunsfeld
df1e1295e3 Merge pull request #2056 from zed-industries/confirm-quit
Add confirm_quit setting
2023-01-20 14:11:02 -08:00
Joseph Lyons
c1934d6232 WIP 2023-01-20 16:56:56 -05:00
Kay Simmons
4bee273511 Merge pull request #2057 from zed-industries/multiple-definitions-multibuffer
Multiple Definitions Multibuffer
2023-01-20 13:49:07 -08:00
Kay Simmons
2e37c0ea4a Open multiple definitions in a multibuffer instead of opening the files directly 2023-01-20 13:28:13 -08:00
Max Brunsfeld
2f42af2ac3 Add confirm_quit setting 2023-01-20 13:02:38 -08:00
Max Brunsfeld
be2c601176 Merge pull request #2055 from zed-industries/language-config-overrides
Language config overrides
2023-01-20 11:15:26 -08:00
Max Brunsfeld
8dcef46842 Drop 'override.' prefix from capture names in override query
Co-authored-by: Julia Risley <julia@zed.dev>
2023-01-20 10:44:33 -08:00
Max Brunsfeld
2aa7a9e95b Add overrides for all languages
Co-authored-by: Julia Risley <julia@zed.dev>
2023-01-20 10:39:31 -08:00
Mikayla Maki
8af1294ba5 Changed platform mouse moved handling to only fire on active or popup windows
co-authored-by: Antonio <antonio@zed.dev>
2023-01-20 09:37:09 -08:00
Mikayla Maki
5a00729fad Merge pull request #2051 from zed-industries/show-following-to-followed
Show following to followed
2023-01-20 09:23:34 -08:00
Mikayla Maki
97203e1e02 Fix broken merge 2023-01-20 09:19:58 -08:00
Mikayla Maki
95e661a78c Switched from active hover to NSViews acceptsFirstMouse API
Co-authored-by: Nathan <nathan@zed.dev>
2023-01-20 09:14:38 -08:00
Julia
b54b77b9ec Merge pull request #2053 from zed-industries/on-move-out
Hide hovers when mouse leaves area & window focus is lost
2023-01-20 10:55:26 -05:00
Julia
467e3dc50a Hide editor hover on mouse move out & always notify when hiding hover
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-20 10:16:24 -05:00
Julia
131f3471fc Don't dispatch mousemove without focus & avoid swallowing external moves
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-20 10:11:28 -05:00
Mikayla Maki
88170df7f0 Switched from active hover to NSViews acceptsFirstMouse API 2023-01-19 15:21:26 -08:00
Max Brunsfeld
2967b46a17 Implement scope-specific bracket matching and comment toggling
Co-authored-by: Julia Risley <julia@zed.dev>
2023-01-19 15:04:27 -08:00
Mikayla Maki
4eeb1aec50 Adds UI for showing the followed-by status to collaboration 2023-01-19 14:22:12 -08:00
Max Brunsfeld
1851e2e77c Start work on language config overrides
Co-authored-by: Julia Risley <julia@zed.dev>
2023-01-19 12:32:08 -08:00
Mikayla Maki
4a46227909 Change incoming call notification to only require one click 2023-01-19 11:43:46 -08:00
Mikayla Maki
86371d9f5e Merge pull request #2050 from zed-industries/disable-soft-wrap-in-single-line-editors
Disable soft wrap in single line editors
2023-01-19 11:26:54 -08:00
Joseph Lyons
38476f5429 Disable soft wrap in single line editors
Co-Authored-By: Mikayla Maki <mikayla.c.maki@gmail.com>
2023-01-19 13:45:05 -05:00
Petros Amoiridis
6c9422808a Merge pull request #2048 from zed-industries/408-add-date-to-zedlog
Add date to the log format
2023-01-19 20:26:01 +02:00
Petros Amoiridis
d30e129d63 Create files passed as args to CLI
Co-Authored-by: Mikayla <mikayla@zed.dev>
2023-01-19 19:38:05 +02:00
Julia
ad1947fa50 Add in-window on-move-out mouse handler concept 2023-01-19 12:34:13 -05:00
Petros Amoiridis
f088de5947 Add date to the log format
Co-Authored-By: Mikayla <mikayla@zed.dev>
2023-01-19 19:05:17 +02:00
Antonio Scandurra
c85ad96b45 Merge pull request #2047 from zed-industries/optimize-large-multi-buffers
Avoid stalling the UI thread when running large searches
2023-01-19 17:31:14 +01:00
Antonio Scandurra
1f649e52de Document RopeFingerprint 2023-01-19 17:25:59 +01:00
Antonio Scandurra
0a7111d216 Fix tests 2023-01-19 16:26:27 +01:00
Antonio Scandurra
a58b39f884 Merge branch 'main' into optimize-large-multi-buffers 2023-01-19 16:18:21 +01:00
Antonio Scandurra
c124caeb0d Add test for stream_excerpts_with_context_lines 2023-01-19 15:54:32 +01:00
Antonio Scandurra
5ce065ac92 Introduce MultiBuffer::stream_excerpts_with_context_lines
This allows us to push excerpts in a streaming fashion without blocking
the main thread.
2023-01-19 15:42:14 +01:00
Max Brunsfeld
5189dea3d5 Merge pull request #2046 from zed-industries/line-breaks-in-outline-items
Prevent outline items from accidentally spanning multiple lines
2023-01-18 16:46:45 -08:00
Max Brunsfeld
d9948bf772 Prevent outline items from accidentally spanning multiple lines 2023-01-18 16:43:18 -08:00
Max Brunsfeld
062e7a03a9 Update comments in Pane::close_items 2023-01-18 15:17:44 -08:00
Max Brunsfeld
17b4bfdf98 Merge pull request #2045 from zed-industries/fewer-unsaved-prompts
Avoid prompting to save when closing an untitled buffer that is still open elsewhere
2023-01-18 15:10:19 -08:00
Max Brunsfeld
06c31a0daa Fix workspace tests after changing Item trait 2023-01-18 15:00:40 -08:00
Mikayla Maki
203f569f2e collab 0.5.3 2023-01-18 12:52:58 -08:00
Mikayla Maki
b0fb5913b6 v0.71.x dev 2023-01-18 12:39:38 -08:00
Petros Amoiridis
6cc84a77c8 Merge pull request #2042 from zed-industries/fix-pasting-files
Allow pasting the same entry more than once in project panel
2023-01-18 18:37:31 +02:00
Petros Amoiridis
27a6951403 Allow pasting the same entry more than once in project panel
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-18 17:35:21 +02:00
Petros Amoiridis
9f3c8c1e3a Merge pull request #2041 from zed-industries/fix-renaming-file
Fix mouse interrupting file/dir editing in project panel
2023-01-18 15:53:08 +02:00
Antonio Scandurra
a8f466b422 Don't starve the main thread adding too many search excerpts at once 2023-01-18 14:22:23 +01:00
Petros Amoiridis
f8d092fdc6 Fix mouse interrupting file/dir editing in project panel
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2023-01-18 15:22:20 +02:00
Antonio Scandurra
8ca0f9ac99 Fix compile errors 2023-01-18 13:58:01 +01:00
Antonio Scandurra
a653e87658 WIP: Avoid converting RopeFingerprint into a string
Co-Authored-By: Petros Amoiridis <petros@zed.dev>
2023-01-18 12:22:08 +01:00
Joseph Lyons
bec03dc882 WIP 2023-01-18 00:12:52 -05:00
Kay Simmons
2c3c8b4cb0 Merge pull request #2039 from zed-industries/vim-mode-single-line-editors
disable vim mode in non full editors
2023-01-17 18:43:16 -08:00
Max Brunsfeld
a0a50cb412 Set up fake project paths correctly in tests 2023-01-17 17:40:34 -08:00
Kay Simmons
cf193154e1 fix broken test 2023-01-17 17:35:39 -08:00
Kay Simmons
c3518cefe8 disable vim mode in non full editors 2023-01-17 17:32:10 -08:00
Kay Simmons
4746fb5936 Merge pull request #2038 from zed-industries/fix-sidebar-width-with-dock
Fix issue with sidebars resizing themselves when dock is toggled
2023-01-17 17:22:24 -08:00
Max Brunsfeld
8651320c9f Make workspace items expose their underlying models, remove file-related methods 2023-01-17 17:21:06 -08:00
Kay Simmons
c9a306b4ac Change sidebars to use the window width as a max width rather than participating in the flex
co-authored-by: Mikayla <mikayla@zed.dev>
2023-01-17 16:58:55 -08:00
Max Brunsfeld
292708573f Replace MultiBuffer::files with ::for_each_buffer 2023-01-17 16:16:44 -08:00
Joseph T. Lyons
c3b102f5a8 Add users to mailing list when using an invite link 2023-01-17 16:46:01 -05:00
Max Brunsfeld
f61b870db6 Merge pull request #2034 from zed-industries/tab-focus-search
Use tab instead of command-f to move focus from the search editor to the main editor
2023-01-17 10:25:04 -08:00
Max Brunsfeld
1a6a807db5 Merge pull request #2035 from zed-industries/always-auto-indent-block-on-paste
Always auto-indent in block-wise mode when pasting
2023-01-17 10:24:41 -08:00
Antonio Scandurra
01aac0de48 Merge pull request #2036 from zed-industries/spurious-modified-buffers
Fix buffers appearing as modified when guest joined after buffer had been saved
2023-01-17 18:21:21 +01:00
Antonio Scandurra
dc88a67f50 Fix assertions 2023-01-17 18:09:45 +01:00
Julia
5ce0472a75 Merge pull request #2037 from zed-industries/go-to-fit
Utilize fit autoscroll for various go-to actions
2023-01-17 10:49:22 -05:00
Antonio Scandurra
cc788dc5f7 Verify saved_version, saved_version_fingerprint and saved_mtime 2023-01-17 16:46:06 +01:00
Julia
7726a9ec3d Utilize fit autoscroll for various go-to actions 2023-01-17 10:42:53 -05:00
Antonio Scandurra
fcf97ab41e Bump protocol version 2023-01-17 16:32:54 +01:00
Antonio Scandurra
bb200aa082 Relay saved version metadata to ensure buffers modified state converges 2023-01-17 16:32:54 +01:00
Antonio Scandurra
2cd9db1cfe Ensure Buffer::{is_dirty,has_conflict} converge in randomized test 2023-01-17 16:32:51 +01:00
Antonio Scandurra
467e5691b9 Include saved mtime and fingerprint when serializing buffers
This still doesn't include:

- An assertion in the randomized test to ensure buffers are not spuriously
marked as modified
- Sending an update when synchronizing buffers after a reconnection
2023-01-17 10:46:19 +01:00
Max Brunsfeld
0bd6f9b6ce Add a test for block-wise auto-indent without original indent info 2023-01-16 18:06:58 -08:00
Max Brunsfeld
244f259331 Always auto-indent in block-wise mode when pasting
If the text was copied outside of Zed, so the original indent column is unknown,
then act as if the first line was copied in its entirety.
2023-01-16 17:42:06 -08:00
Max Brunsfeld
625151806a Merge pull request #2022 from zed-industries/restart-lsp-after-invalid-version-reported
Fix crash when restarting a language server after it reports an unknown buffer version
2023-01-16 16:26:50 -08:00
Max Brunsfeld
6810490bf4 Remove tree-sitter dependency from gpui 2023-01-16 16:11:13 -08:00
Max Brunsfeld
3312a06368 Move focus back from buffer search using tab, not cmd-f 2023-01-16 16:01:15 -08:00
Max Brunsfeld
373902d933 Add '>' child operator in keymap context predicates 2023-01-16 16:00:46 -08:00
Max Brunsfeld
f62d13de21 Use a hand-coded parser for keymap context predicates 2023-01-16 15:53:49 -08:00
Julia
df2e9625b3 Merge pull request #2033 from zed-industries/open-with-zed
Make Finder "Open With" work correctly
2023-01-16 16:39:02 -05:00
Julia
765773cfe6 Make Finder "Open With" work correctly 2023-01-16 16:34:10 -05:00
Max Brunsfeld
9e5612348c Merge pull request #2032 from zed-industries/drag-split-dock-panic
Fix panic when trying to create a split in the dock by dragging
2023-01-16 11:51:28 -08:00
Max Brunsfeld
aa9710f7c3 Avoid unwrapping pane split in SplitWithProjectEntry
Also, implement pane-splitting operations more consistently.
2023-01-16 11:46:47 -08:00
Max Brunsfeld
b90e1012bf Don't render split drag targets in the dock 2023-01-16 10:24:17 -08:00
Antonio Scandurra
96186a3dae Merge pull request #2030 from zed-industries/fix-typescript-lsp
Fix error when running TypeScript language server after version 3.0.2
2023-01-16 17:33:44 +01:00
Antonio Scandurra
2c1fd7b0bf Add a 5s timeout when running npm info and npm install
This prevents those two commands from getting stuck when there is
no internet connection.
2023-01-16 16:51:45 +01:00
Antonio Scandurra
9779663c6b Use cli.mjs when available in TypeScript language server
Otherwise, fall back to using `cli.js`.
2023-01-16 16:50:30 +01:00
Joseph T. Lyons
8e02266d07 Add Discourse release action 2023-01-14 02:30:21 -05:00
Mikayla Maki
24ef80f4b6 Merge pull request #2027 from zed-industries/fix-keybindings-in-command-palette
Fix bug where keybindings would not show in command palette
2023-01-11 16:40:04 -08:00
Mikayla Maki
febf992a43 Fix bug where keybindings would not show in command palette 2023-01-11 16:35:49 -08:00
Kay Simmons
e9fdb13cb5 Merge pull request #2025 from zed-industries/vim-r
Vim replace
2023-01-11 16:28:39 -08:00
Kay Simmons
216b1aec08 fix replace in normal and visual modes 2023-01-11 14:57:40 -08:00
Max Brunsfeld
02f6928328 collab 0.5.2 2023-01-11 14:00:44 -08:00
Max Brunsfeld
fe27f135c0 Bump protocol version after reconnect support 2023-01-11 14:00:16 -08:00
Max Brunsfeld
74f8b493b2 collab 0.5.1 2023-01-11 13:25:28 -08:00
Max Brunsfeld
49379924cb Avoid dropping is_complete column for backward compatibility
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2023-01-11 13:25:02 -08:00
Kay Simmons
14eec66e38 in progress 2023-01-11 12:10:55 -08:00
Mikayla Maki
048da9ddce collab 0.5.0 2023-01-11 10:50:16 -08:00
Mikayla Maki
9c627e82a0 v0.70.x dev 2023-01-11 10:34:11 -08:00
Mikayla Maki
14899d867e Merge pull request #2020 from zed-industries/telemtry-opt-out
Telemetry opt out
2023-01-10 17:43:30 -08:00
Max Brunsfeld
db831c3fbb Remove roadmap from readme 2023-01-10 17:38:34 -08:00
Mikayla Maki
bfb43c67f8 Silence spurious log error
co-authored-by: Kay <kay@zed.dev>
2023-01-10 16:50:54 -08:00
Mikayla Maki
a3da41bfad Fix test failures due to dependency on Settings global in client for telemetry
co-authored-by: kay <kay@zed.dev>
2023-01-10 16:39:03 -08:00
Max Brunsfeld
ef987cae6b Merge pull request #2019 from zed-industries/panic-activating-next-pane-in-dock
Fix crash when activating prev/next pane while dock is active
2023-01-10 16:27:39 -08:00
Max Brunsfeld
41ff42ddec Fix crash when restarting a language server after it reports an unknown buffer version
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2023-01-10 16:27:15 -08:00
Mikayla Maki
37a4de1a84 Add opt-out for metric reporting
co-authored-by: kay <kay@zed.dev>
2023-01-10 15:49:54 -08:00
Max Brunsfeld
551dc1f318 Fix crash when activating prev/next pane while dock is active
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2023-01-10 15:32:14 -08:00
Mikayla Maki
866f0e1344 Add the ability to opt-out of panic reporting
Co-authored-by: Kay <kay@zed.dev>
2023-01-10 15:07:01 -08:00
Kay Simmons
a222821dfa Merge pull request #2017 from zed-industries/dont-save-single-file-workspaces
Don't save single file worktrees
2023-01-09 17:31:34 -08:00
Mikayla Maki
d49a29d793 Merge pull request #2016 from zed-industries/serialization-updates
Serialization touch ups
2023-01-09 16:18:30 -08:00
Kay Simmons
176738d674 Address issue with workspaces where single file worktrees such as those from git commit messages would get restored
Co-authored-by: Mikayla <mikayla@zed.dev>
2023-01-09 16:18:04 -08:00
Mikayla Maki
ebbe6e7aa9 Add serializing and restoring editor scroll position
Co-authored-by: Kay <kay@zed.dev>
2023-01-09 14:06:40 -08:00
Mikayla Maki
d237bdaa9b Added support for ALTER TABLE syntax in the syntax error checker function
Co-authored-by: Kay <kay@zed.dev>
2023-01-09 12:41:37 -08:00
Joseph Lyons
5517e743e1 Merge branch 'main' into in-app-feedback 2023-01-09 14:05:30 -05:00
Joseph Lyons
c1e61b479c Move feedback items into a feedback crate 2023-01-09 13:55:06 -05:00
Mikayla Maki
828f406b4f Fixed issue where serialized terminal working directories would be lost in complex interactions
Co-authored-by: Kay <kay@zed.dev>
Co-authored-by: Julia <julia@zed.dev>
2023-01-09 10:54:13 -08:00
Mikayla Maki
e743f3b1d8 Merge pull request #2015 from zed-industries/screenshare-on-terminal
Added open screenshare when following into non-followable buffer
2023-01-09 10:28:46 -08:00
Mikayla Maki
69e28d04b0 Added open screenshare when following into non-followable buffer 2023-01-09 10:19:11 -08:00
Julia
2be4f41964 Merge pull request #2013 from zed-industries/autocomplete-require-word-start-match
Require first codepoint of autocomplete query to match the first codepoint of some completion's subword
2023-01-09 13:06:43 -05:00
Julia
97ed89a797 Test that completion word splitting does reasonable things 2023-01-09 13:02:44 -05:00
Antonio Scandurra
ad7eaca443 Make Buffer::diff_base available outside of tests 2023-01-08 09:36:58 -07:00
Antonio Scandurra
ddbf251b5f Merge pull request #2014 from zed-industries/git-diff-reconnect
Update git diff base when synchronizing a guest's buffers
2023-01-08 09:28:51 -07:00
Antonio Scandurra
95098e4f29 Update git diff base when synchronizing a guest's buffers 2023-01-08 09:10:57 -07:00
Antonio Scandurra
529ccbda3a Introduce git index mutations to randomized collaboration test
The test now fails at the following seed:

```bash
SEED=850 ITERATIONS=1 OPERATIONS=131 cargo test --package=collab random
```
2023-01-08 08:52:16 -07:00
Joseph Lyons
a73e264c3d Merge branch 'in-app-feedback' of https://github.com/zed-industries/zed into in-app-feedback 2023-01-07 18:53:11 -05:00
Joseph Lyons
0200fc5542 WIP
Don't rely on contacts popover or contacts list for theming
Add metrics id to request body
Clean up some code and comments

Co-Authored-By: Mikayla Maki <mikayla.c.maki@gmail.com>
2023-01-07 18:53:00 -05:00
Joseph Lyons
9694771752 Move notes into PR 2023-01-07 18:53:00 -05:00
Joseph Lyons
9fc7f54631 Add to TODO 2023-01-07 18:53:00 -05:00
Joseph Lyons
1545b2ac61 Update TODO 2023-01-07 18:53:00 -05:00
Joseph Lyons
318a0b7ed0 In-app feedback WIP 2023-01-07 18:53:00 -05:00
Julia
a46ca32356 Completion word start filtering which is codepoint aware 2023-01-07 15:34:28 -05:00
Julia
12cd712b53 Require start autocomplete query byte to match a completion word start byte 2023-01-06 22:47:06 -05:00
Nathan Sobo
3cffee4065 Merge pull request #2011 from zed-industries/project-reconnection
Retain connection to remote projects when temporarily disconnected
2023-01-06 18:01:08 -07:00
Nathan Sobo
213658f1e9 Fix tests that failed due to defaulting the grouping interval to zero in tests 2023-01-06 17:56:21 -07:00
Kay Simmons
6b337914d7 Merge pull request #2010 from zed-industries/vim-f-t
Vim f and t bindings
2023-01-06 16:32:39 -08:00
Nathan Sobo
386f7ba16d Merge remote-tracking branch 'origin/main' into project-reconnection 2023-01-06 16:52:22 -07:00
Joseph Lyons
5387695ee0 WIP
Don't rely on contacts popover or contacts list for theming
Add metrics id to request body
Clean up some code and comments

Co-Authored-By: Mikayla Maki <mikayla.c.maki@gmail.com>
2023-01-06 17:40:30 -05:00
Kay Simmons
73e7967a12 working f and t bindings 2023-01-06 14:24:20 -08:00
Joseph Lyons
9d4cf2ff62 Move notes into PR 2023-01-06 15:41:31 -05:00
Antonio Scandurra
83c98ce049 Prevent making further requests after language server shut down
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2023-01-06 13:40:32 -07:00
Joseph Lyons
658541ec9f Add to TODO 2023-01-06 15:32:28 -05:00
Max Brunsfeld
6a57bd2794 Merge pull request #2008 from zed-industries/callback-leaks
Fix callback leaks when subscriptions are added and dropped in the same effect cycle
2023-01-06 12:01:27 -08:00
Antonio Scandurra
8487ae77e7 Share new worktrees when resharing project
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2023-01-06 12:58:19 -07:00
Max Brunsfeld
b762d70202 Remove unused CallbackCollection method 2023-01-06 11:51:36 -08:00
Max Brunsfeld
53cb3a4429 Remove GC step for callback collections, always drop callbacks asap 2023-01-06 11:33:50 -08:00
Max Brunsfeld
ef192a902a Remove dropped subscription eagerly when removing callbacks 2023-01-06 11:03:45 -08:00
Antonio Scandurra
585c23e9f6 Match guest's reported buffers on host when synchronizing after reconnect
If the host thinks a guest has a buffer that they don't have, the host won't
send it to them when they attempt to open it the next time. This can happen
if the guest disconnected before they received the host's response to an
initial open buffer request.

Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2023-01-06 11:48:34 -07:00
Max Brunsfeld
4708f5d88f Add test for notifying and dropping subscriptions in an update cycle 2023-01-06 10:46:03 -08:00
Max Brunsfeld
a165cd596b Make event tests in gpui more consistent 2023-01-06 10:44:45 -08:00
Antonio Scandurra
0d31c8c1c8 Only share worktrees when UpdateProject succeeded 2023-01-06 10:41:11 -07:00
Antonio Scandurra
8c5a0ca3a4 Couple worktree sharing with project metadata updates 2023-01-06 10:31:36 -07:00
Antonio Scandurra
5c05b7d413 Ensure initial project metadata is sent when first sharing a project 2023-01-06 10:18:26 -07:00
Max Brunsfeld
3da69117ae Use a CallbackCollection for action dispatch observations 2023-01-06 09:15:53 -08:00
Nathan Sobo
4256a96051 Avoid holding project handle on a call that could hang
This fixes a leaked handle error.
2023-01-05 21:01:27 -07:00
Max Brunsfeld
82e9f736bd Use a CallbackCollection for release observations
Co-authored-by: Kay Simmons <kay@zed.dev>
2023-01-05 18:02:53 -08:00
Max Brunsfeld
fa620bf98f Fix logic error in dropping callback subscriptions
Co-authored-by: Kay Simmons <kay@zed.dev>
2023-01-05 17:30:39 -08:00
Max Brunsfeld
378f0c32fe Restructure callback subscriptions
Fix a callback leak that would occur when dropping a subscription
to a callback collection after triggering that callback, but before
processing the effect of *adding* the handler.

Co-authored-by: Kay Simmons <kay@zed.dev>
2023-01-05 16:41:23 -08:00
Joseph Lyons
404f59090c Update TODO 2023-01-05 18:14:28 -05:00
Joseph Lyons
eb02834582 In-app feedback WIP 2023-01-05 17:58:52 -05:00
Nathan Sobo
77e322cb75 Wait for incomplete buffers when handling incoming buffer file updates 2023-01-05 13:50:25 -07:00
Julia
f669b8a029 Merge pull request #2007 from zed-industries/recent-projects-prefer-first-match
Prefer first max while fuzzy matching projects fixes unexpected behavior
2023-01-05 12:10:51 -05:00
Julia
09d57d1f26 Prefer first max while fuzzy matching projects fixes unexpected behavior 2023-01-05 11:27:50 -05:00
Nathan Sobo
7a629769b7 Re-request incomplete remote buffers when syncing buffers
Any buffers we requested but that haven't been fully sent will cause
outstainding open requests to hang. If we re-request them, any
waiting open requests will resume when the requested buffers finish
being created.

Co-authored-by: Max Brunsfeld <max@zed.dev>
Co-authored-by: Mikayla Maki <mikayla@zed.dev>
2023-01-04 16:00:43 -07:00
Joseph T. Lyons
bd223f5a1f Merge pull request #2002 from zed-industries/appease-clippy
Appease clippy
2023-01-04 16:33:29 -05:00
Nathan Sobo
1006ada458 Update scan_id on worktree entries when there is a conflict
Forgetting to do this meant we were unable to sync changes with reconnecting
guests in some cases.
2023-01-04 13:59:16 -07:00
Mikayla Maki
79f8f08caf v0.69.x dev 2023-01-04 11:45:25 -08:00
Nathan Sobo
789bbf15b7 Update buffer files when synchronizing buffers
It's possible that the host was disconnected when attempting to notify
guests of a file save, so we need to transmit this in order to correctly
update the file's mtime.

Next failing seed OPERATIONS=200 SEED=6894
2023-01-04 12:33:48 -07:00
Nathan Sobo
1dd085fc92 Introduce completed_scan_id to worktree
We need to know the most recent scan id we have actually completed. This is to
handle the case where a guest disconnects when we're in the middle of streaming
worktree entries to them. When they reconnect, they need to report a scan_id
from before we started streaming the entries, because we have no record of when
the stream was interrupted.

Next failure:
SEED=5051 ITERATIONS=1 OPERATIONS=200 cargo test --release --package=collab random -- --nocapture
2023-01-03 18:26:57 -07:00
Julia
1e18480808 Merge pull request #2005 from zed-industries/tsserver-include-completion-detail
Include Typescript completion item `detail` field in completion label
2023-01-03 16:44:10 -05:00
Julia
93a634991b Include Typescript completion item detail field in completion label 2023-01-03 16:37:35 -05:00
Nathan Sobo
90fb9b53ad WIP 2023-01-03 13:30:14 -07:00
Julia
d0ce7b3516 Merge pull request #2003 from zed-industries/correct-ra-name-key-default-settings
Correct default settings' name key for RA in init options example
2023-01-03 13:51:03 -05:00
Julia
b94c265240 Correct default settings' name key for RA in init options example 2023-01-03 13:50:08 -05:00
Nathan Sobo
8d70a22fa3 Record failing seed 2023-01-02 21:12:39 -07:00
Nathan Sobo
a6ffcdd0cf Track open buffers when handling sync requests
When a host sends a buffer to a guest for the first time, they record that
they have done so in a set tied to that guest's peer id. When the guest
reconnects and syncs buffers, they do so under a different peer id, so we
need to be sure we track which buffers we have sent them to avoid sending
them the same buffer twice, which violates the guest's assumptions.
2023-01-02 20:27:59 -07:00
Max Brunsfeld
74843493f4 Assign fake fs entries' mtimes more consistently 2023-01-02 10:20:52 -08:00
Julia
6b62ce2aaa Merge pull request #2001 from zed-industries/dissmis-search-button
Add dismiss buffer search button & fix some faulty icon button styling
2023-01-02 11:21:16 -05:00
Julia
2b1118f597 Add dismiss buffer search button & fix some faulty icon button styling
Co-Authored-By: Nate Butler <nate@zed.dev>
2023-01-01 23:50:46 -05:00
Joseph Lyons
233b28a1b9 Appease clippy 2023-01-01 23:50:45 -05:00
Mikayla Maki
eeb21af841 Merge pull request #2000 from zed-industries/fix-line-seperator
Add other line seperators to regex normalization
2022-12-30 18:24:36 -08:00
Mikayla Maki
a5bccecd48 Add other line seperators to regex normalization 2022-12-30 18:18:02 -08:00
Joseph T. Lyons
0f818f2458 Merge pull request #1996 from zed-industries/add-close-clean-items-command
Add close clean items command
2022-12-29 14:12:04 -05:00
Joseph T. Lyons
7187cc8a4c Merge pull request #1994 from zed-industries/add-close-all-items-command
Add close all items command
2022-12-29 14:11:44 -05:00
Joseph Lyons
2bc36600d4 Rename variable 2022-12-29 13:43:56 -05:00
Joseph Lyons
60f29410ca Add close clean items command 2022-12-29 13:28:52 -05:00
Joseph Lyons
ca3c4566dd Add close all items command 2022-12-29 01:43:49 -05:00
Nathan Sobo
f3dee2d332 Remove printlns, found a failure
Failing seed:
SEED=416 MAX_PEERS=2 ITERATIONS=5000 OPERATIONS=159 cargo +beta test --package=collab random -- --nocapture
2022-12-27 17:01:31 -07:00
Nathan Sobo
273988b8d5 Set transaction group interval to ZERO by default in tests
We were seeing non-deterministic behavior in randomized tests when
generating backtraces took enough time to cause transactions to group
in some cases, but not group in others.

Tests will need to explicitly opt into grouping if they want it by
setting the interval explicitly. We have tests in the text module that
currently test the history grouping explicitly, but I'm not sure
it's needed elsewhere.
2022-12-27 16:47:28 -07:00
Joseph T. Lyons
b6337f59fd Merge pull request #1992 from zed-industries/add-home-and-end-key-support
Add home and end key support
2022-12-26 00:34:37 -05:00
Joseph Lyons
21a0df406f Add home and end key support 2022-12-26 00:24:26 -05:00
Max Brunsfeld
599acf0daa WIP - Panic immediately when detecting non-determinism via a change to the execution trace 2022-12-23 17:34:13 -08:00
Antonio Scandurra
6458a9144e WIP: failing randomized test
SEED=175 MAX_PEERS=2 ITERATIONS=1 OPERATIONS=159 cargo test --package=collab random -- --nocapture
2022-12-23 15:02:06 +01:00
Antonio Scandurra
344d05045d Avoid hanging waiting for operations when buffer has none 2022-12-23 12:26:48 +01:00
Antonio Scandurra
75803d8dbb Respond with an error when client hasn't got a registered handle 2022-12-23 11:53:13 +01:00
Joseph T. Lyons
04e053a216 Merge pull request #1991 from zed-industries/add-actions-for-requesting-features-and-filing-bug-reports
Add actions for requesting features and filing bug reports
2022-12-22 23:17:44 -05:00
Joseph Lyons
41bff3947c Add actions for requesting features and filing bug reports 2022-12-22 23:04:33 -05:00
Joseph T. Lyons
46152c6249 Merge pull request #1990 from zed-industries/add-memory-to-system-specs
Add memory to system specs
2022-12-22 18:16:50 -05:00
Joseph Lyons
f65fda2fa4 Add memory to system specs 2022-12-22 18:10:49 -05:00
Joseph T. Lyons
96ac650465 Merge pull request #1989 from zed-industries/add-command-to-copy-system-information-to-the-clipboard
add command to copy system information to the clipboard
2022-12-22 14:31:23 -05:00
Joseph Lyons
ea16082a42 Factored data into a SystemSpecs struct
Co-Authored-By: Mikayla Maki <mikayla.c.maki@gmail.com>
2022-12-22 14:27:32 -05:00
Max Brunsfeld
42e74e7eef Excluded deleted entries when initially sending worktrees to guests
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-12-22 11:18:10 -08:00
Antonio Scandurra
738e161bc6 WIP: failing test
SEED=882 RUST_LOG=collab::tests::randomized_integration_tests=info MAX_PEERS=2 ITERATIONS=1 OPERATIONS=49 cargo test --package=collab random -- --nocapture
2022-12-22 18:32:21 +01:00
Antonio Scandurra
559e14799c Restructure randomized test to be a bit clearer and test more stuff 2022-12-22 17:54:25 +01:00
Joseph Lyons
eeb5b03d63 add command to copy system information to the clipboard 2022-12-22 03:43:04 -05:00
Max Brunsfeld
d750b02a7c Handle file and diff updates to incomplete buffers
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-12-21 15:39:57 -08:00
Max Brunsfeld
c321f5d94a Assert that buffers' file state matches in randomized collab test
Co-authored-by: Nathan Sobo <nathan@zed.dev>
2022-12-21 15:38:44 -08:00
Max Brunsfeld
89da738fae In randomized test, open remote projects via the room
Co-authored-by: Nathan Sobo <nathan@zed.dev>
2022-12-21 14:13:43 -08:00
Max Brunsfeld
8cd94060bb 💄 Avoid referring to all clients as guests in random integration test 2022-12-21 11:37:18 -08:00
Max Brunsfeld
d8ccdff9fc Move randomized integration test into its own file 2022-12-21 11:26:24 -08:00
Antonio Scandurra
47348542ef Synchronize buffers when either the host or a guest reconnects 2022-12-21 14:20:56 +01:00
Antonio Scandurra
b5fb8e6b8b Remove unused JoinProjectError 2022-12-21 13:10:07 +01:00
Antonio Scandurra
b0336cd27e Add failing test for buffer synchronization after disconnecting 2022-12-21 11:56:15 +01:00
Antonio Scandurra
ecd80c553c Verify removing worktrees while host is offline 2022-12-21 11:47:01 +01:00
Antonio Scandurra
59d7f06c57 Handle proto::UpdateProjectCollaborator message in Project 2022-12-21 11:09:27 +01:00
Max Brunsfeld
15f666a50a Refresh project collaborator connection id for rejoined projects 2022-12-20 18:03:33 -08:00
Max Brunsfeld
ec6f2a3ad4 💄 Reorder private Project method 2022-12-20 17:32:42 -08:00
Max Brunsfeld
213be3d6bd Delete stale projects after cleanup interval, via server foreign key cascade 2022-12-20 17:27:42 -08:00
Max Brunsfeld
55800fc696 💄 Avoid repeated sql condition in rejoin_room 2022-12-20 17:23:52 -08:00
Max Brunsfeld
6a2066af6c 💄 Reduce indentation in Database::rejoin_room 2022-12-20 17:16:56 -08:00
Max Brunsfeld
cb8962691a Remove unnecessary UnshareProject message sent to clients leaving a project 2022-12-20 16:58:44 -08:00
Max Brunsfeld
bb00134f5f Clean up projects when leaving a room 2022-12-20 16:44:57 -08:00
Max Brunsfeld
21d6665c37 Merge branch 'main' into project-reconnection 2022-12-20 15:50:09 -08:00
Max Brunsfeld
6542b30d1f Implement rejoining projects as guest when rejoining a room
Co-authored-by: Julia Risley <julia@zed.dev>
2022-12-20 15:02:26 -08:00
Max Brunsfeld
55ebfe8321 Handle unshared projects when rejoining a room
Also, construct remote projects via the room, to guarantee
that the room can manage the projects' sharing lifecycle.

Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-12-20 11:10:46 -08:00
Antonio Scandurra
9d15b3d295 Remove unused import 2022-12-20 17:47:22 +01:00
Antonio Scandurra
d31fd9bbf2 Support adding worktrees to project while host is offline 2022-12-20 17:42:08 +01:00
Antonio Scandurra
52babc51a0 Make host reconnection test pass when mutating worktree while offline 2022-12-20 17:30:58 +01:00
Antonio Scandurra
1a3940a12e Fix project reconnection test to ensure rooms actually reconnects 2022-12-20 14:51:46 +01:00
Antonio Scandurra
1aec691b35 Sketch out project reconnection routine on the server 2022-12-20 12:03:43 +01:00
Max Brunsfeld
70dd586be9 Start work on rejoining rooms, supplying all project info at once
Co-authored-by: Nathan Sobo <nathan@zed.dev>
2022-12-19 17:50:43 -08:00
Max Brunsfeld
af85db9ea5 WIP - Retain hosts' project state when they disconnect 2022-12-19 11:38:08 -08:00
Max Brunsfeld
67b265b3d5 Add failing integration test for resharing projects on reconnect 2022-12-19 11:37:28 -08:00
Max Brunsfeld
c8b209306e collab 0.4.2 2022-12-19 11:29:22 -08:00
Max Brunsfeld
61c6c825b5 Merge pull request #1980 from zed-industries/following-panics
Fix panics when following
2022-12-19 11:26:28 -08:00
Antonio Scandurra
0ede89d82a WIP 2022-12-19 20:05:00 +01:00
Julia
6f211292b2 Merge pull request #1984 from zed-industries/format-problematic-db-macros
Format problematic DB macros
2022-12-19 11:17:34 -05:00
Julia
c49573dc11 Format problematic DB macros 2022-12-19 11:11:10 -05:00
Julia
de9c58d216 Merge pull request #1983 from zed-industries/multi-buffer-git-gutter
Multi buffer git gutter
2022-12-19 10:53:42 -05:00
Antonio Scandurra
84a860e54d Merge pull request #1982 from zed-industries/fix-rust-analyzer
Update rust-analyzer's `disk_based_diagnostics_progress_token`
2022-12-19 16:33:01 +01:00
Antonio Scandurra
cb60eb8a57 Update rust-analyzer's disk_based_diagnostics_progress_token 2022-12-19 16:27:25 +01:00
Antonio Scandurra
d8219545c9 💄 2022-12-19 16:17:27 +01:00
Antonio Scandurra
06f6d02579 Stop counting extensions in worktree 2022-12-19 16:05:22 +01:00
Max Brunsfeld
1e02ebbd11 Replicate pending selections separately from other selections
This fixes a panic that would occur when a leader created
a pending selection that overlapped another selection,
because the follower would attempt to treat that pending
selection as non-pending, which would violate the invariant
that selections are sorted and disjoint.
2022-12-17 14:00:53 -08:00
Max Brunsfeld
8c64514570 Add ZED_STATELESS env var, for suppressing persistence
Use this env var in the start-local-collaboration script to make
the behavior more predictable.
2022-12-17 12:03:51 -08:00
Kay Simmons
6fcb3c9020 Merge pull request #1972 from zed-industries/recent-workspace
Recent Project Picker
2022-12-16 15:51:57 -08:00
Kay Simmons
2c47bd4a97 Clear stale projects if they no longer exist 2022-12-16 15:45:17 -08:00
Antonio Scandurra
a5f624203e collab 0.4.1 2022-12-16 12:02:03 +01:00
Antonio Scandurra
98d1b6ec5a Merge pull request #1975 from zed-industries/screen-share-after-reconnect
Prevent screen-sharing from being lost after a reconnection
2022-12-16 12:00:02 +01:00
Antonio Scandurra
457e1046c8 Bump protocol version 2022-12-16 11:48:14 +01:00
Antonio Scandurra
21ab1bb434 Remove unnecessary PeerId parsing code 2022-12-16 11:45:42 +01:00
Antonio Scandurra
aa44de3d16 Fix test ensuring room is left when disconnected from LiveKit 2022-12-16 10:52:32 +01:00
Max Brunsfeld
ad37034960 Identify LiveKit room participants by user id, not peer id
This way, their participant id can remain the same when they reconnect.
2022-12-15 17:19:32 -08:00
Julia
ebd0c5d000 Handle reversed=true for multi-buffer git-hunks-in-range iteration
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-12-15 18:17:32 -05:00
Julia
f88b413f6a Rewrite multi-buffer aware git hunks in range to be more correct
Less ad-hoc state tracking, rely more on values provided by the
underlying data

Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-12-15 17:09:09 -05:00
Antonio Scandurra
c2f5381e5a collab 0.4.0 2022-12-15 19:37:53 +01:00
Antonio Scandurra
ea1f6689b9 Merge pull request #1971 from zed-industries/update-app-icons
Update Zed & Zed Preview icons
2022-12-15 19:37:04 +01:00
Antonio Scandurra
b1affb13bb Merge pull request #1973 from zed-industries/fix-reconnects-after-deploy
Improve reconnections to server after it is redeployed
2022-12-15 19:35:42 +01:00
Antonio Scandurra
2679e245a5 Minor stylistic change 2022-12-15 16:40:16 +01:00
Antonio Scandurra
5a334622ea 💄 2022-12-15 16:34:59 +01:00
Antonio Scandurra
5720c43fe7 Merge branch 'main' into fix-reconnects-after-deploy 2022-12-15 15:32:05 +01:00
Joseph T. Lyons
af4d846428 Merge pull request #1954 from zed-industries/add-symlink-to-applications-directory-in-dmg 2022-12-15 08:06:17 -05:00
Antonio Scandurra
5fb522a9b1 collab 0.3.14 2022-12-15 11:31:51 +01:00
Antonio Scandurra
86e5ae1f2e Allow nulls in projects.host_connection_{id,server_id}
The server version on stable won't be able to fill values for those
columns when we deploy the migration to preview.

With this commit we're also dropping the unused `worktree_extensions`
and `project_activity_periods` tables. The last version of the server
on stable (0.2.6) doesn't contain any code that accesses those tables.
2022-12-15 11:30:51 +01:00
Antonio Scandurra
aadd7f2886 collab 0.3.13 2022-12-15 10:53:17 +01:00
Antonio Scandurra
067a19c971 Avoid logging an error when user who hasn't joined any room disconnects 2022-12-15 10:45:03 +01:00
Antonio Scandurra
688f179256 Use "id" nomenclature more consistently 2022-12-15 10:15:59 +01:00
Antonio Scandurra
af77f1188a Re-add server_id indices for room_participants/project_collaborators 2022-12-15 09:58:25 +01:00
Julia
0dedc1f3a4 Get tests building again 2022-12-15 00:17:28 -05:00
Max Brunsfeld
6c58a4f885 Fix stale server queries, use foreign keys from connectionsn to servers 2022-12-14 17:34:24 -08:00
Kay Simmons
81e3b48f37 Add keybinding 2022-12-14 16:14:16 -08:00
Kay Simmons
6da59311d1 Add open recent project to file menu 2022-12-14 16:02:48 -08:00
Kay Simmons
2bc685281c Add recent project picker 2022-12-14 15:59:50 -08:00
Max Brunsfeld
7e0b6ed1c6 Bump RPC version due to multibuffer following PR 2022-12-14 15:34:22 -08:00
Max Brunsfeld
e08d6cd6de Merge pull request #1921 from zed-industries/multibuffer-following
Allow following collaborators into editors with multi-excerpt buffers (refactors + find-all-refs)
2022-12-14 15:33:11 -08:00
Max Brunsfeld
954c9ac3fd Add integration test coverage for following into multibuffers 2022-12-14 15:28:58 -08:00
Max Brunsfeld
e4c5dfcf6c Use run_until_parked instead of 'condition' in all integration tests 2022-12-14 15:05:35 -08:00
Nate Butler
5f6313d336 Update Zed & Zed Preview icons 2022-12-14 17:41:18 -05:00
Max Brunsfeld
70efd2bebe Introduce a ViewId message, identifying views across calls 2022-12-14 14:40:07 -08:00
Max Brunsfeld
43b7e16c89 Handle retina screens in start-local-collaboration script 2022-12-14 11:50:15 -08:00
Max Brunsfeld
f99f581bfc Clean up state matching in from_state_proto using let/else statements 2022-12-14 11:09:33 -08:00
Max Brunsfeld
09d3fbf04f In editor following test, apply excerpt removals to both followers 2022-12-14 11:08:08 -08:00
Antonio Scandurra
363e3cae4b WIP 2022-12-14 19:25:07 +01:00
Antonio Scandurra
930be6706f WIP 2022-12-14 18:02:39 +01:00
Antonio Scandurra
05e99eb67e Introduce an epoch to ConnectionId and PeerId 2022-12-14 15:55:56 +01:00
Antonio Scandurra
9bd400cf16 collab 0.3.12 2022-12-14 11:43:33 +01:00
Antonio Scandurra
553585b9a1 Add more logging to Room 2022-12-14 11:43:12 +01:00
Antonio Scandurra
674fddac87 Instrument rpc::Server::start and reduce cleanup timeout again 2022-12-14 11:42:12 +01:00
Antonio Scandurra
63e7b9189d collab 0.3.11 2022-12-14 11:25:04 +01:00
Antonio Scandurra
9530976f61 Try using a longer timeout for cleaning up stale rooms 2022-12-14 11:24:36 +01:00
Antonio Scandurra
02c30b0091 collab 0.3.10 2022-12-14 09:35:52 +01:00
Antonio Scandurra
b9c7796547 Reduce readiness probe delay and period 2022-12-14 09:35:36 +01:00
Antonio Scandurra
e00cb6b074 collab 0.3.9 2022-12-14 09:05:19 +01:00
Antonio Scandurra
dc47552180 Fix kubernetes configuration for readiness probe 2022-12-14 08:58:19 +01:00
Antonio Scandurra
98a593b263 collab 0.3.8 2022-12-14 08:56:02 +01:00
Antonio Scandurra
897506c797 Define readiness probe to know when the new server can accept traffic 2022-12-14 08:54:46 +01:00
Antonio Scandurra
59c9a57570 collab 0.3.7 2022-12-14 08:43:18 +01:00
Antonio Scandurra
dde6cf596e Don't wait for stale project deletion before listening for connections 2022-12-14 08:42:34 +01:00
Antonio Scandurra
2596fefa04 collab 0.3.6 2022-12-13 23:09:02 +01:00
Antonio Scandurra
34b69896e4 Listen to SIGTERM in addition to ctrl-c for graceful shutdown 2022-12-13 23:08:43 +01:00
Antonio Scandurra
7824ace58b collab 0.3.5 2022-12-13 22:40:55 +01:00
Antonio Scandurra
b150efbd96 Set log level to debug for preview deployment
Also, add a log statement when we receive the interrupt signal.

Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-12-13 22:38:55 +01:00
Max Brunsfeld
c20204d269 collab 0.3.4 2022-12-13 11:17:37 -08:00
Max Brunsfeld
45bfcfc3b8 Fix excessive delay before clearing stale room data
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-12-13 11:17:20 -08:00
Julia
cf72173282 Clamp end of visual git hunk to requested range 2022-12-13 13:58:50 -05:00
Max Brunsfeld
5218a2f966 collab 0.3.3 2022-12-13 10:17:01 -08:00
Max Brunsfeld
95748123b5 Merge remote-tracking branch 'origin/collab-0.3.x' 2022-12-13 10:16:25 -08:00
Max Brunsfeld
6ad326ac58 Wait longer before deleting outdated rooms from the database
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-12-13 10:14:43 -08:00
Mikayla Maki
b0652c55c6 Merge pull request #1968 from zed-industries/fix-text-size-in-updates
Fix text size in notifications
2022-12-13 10:00:24 -08:00
Mikayla Maki
790ef19a48 Fix mis-set variables 2022-12-13 09:58:04 -08:00
Julia
ecd44e6914 Git diff recalc in project diagnostics 2022-12-13 12:35:58 -05:00
Julia
2cd9987b54 Git diff recalc in project search 2022-12-13 12:35:58 -05:00
Julia
7c3dc1e3dc Cleanup 2022-12-13 12:35:58 -05:00
Julia
00b7c78e33 Initial hacky displaying of git gutter in multi-buffers 2022-12-13 12:35:58 -05:00
Max Brunsfeld
11800a8a78 Merge branch 'main' into multibuffer-following 2022-12-13 09:25:18 -08:00
Antonio Scandurra
99c5f8c713 collab 0.3.2 2022-12-13 15:16:41 +01:00
Antonio Scandurra
461c2400ad Merge pull request #1965 from zed-industries/preserve-calls-during-server-restarts
Automatically re-join call when server is restarted
2022-12-13 14:12:32 +01:00
Antonio Scandurra
073a2988e6 Move creation of room_id index into its own migration 2022-12-13 13:57:41 +01:00
Antonio Scandurra
70aac75dd5 Run until parked before asserting about participants in restart test 2022-12-13 13:53:51 +01:00
Antonio Scandurra
4dc838fbb7 Reset connection pool when tearing down the server in tests 2022-12-13 13:51:25 +01:00
Antonio Scandurra
d4c8fa3090 Use a synchronous mutex for ConnectionPool 2022-12-13 13:50:51 +01:00
Antonio Scandurra
a594ba8f8a Simulate server restarts in randomized test 2022-12-13 12:18:38 +01:00
Antonio Scandurra
f1884d608b Allow server to see client disconnection before giving up on reconnecting 2022-12-13 12:17:21 +01:00
Antonio Scandurra
417db95693 Fix typo in index name 2022-12-13 11:44:48 +01:00
Antonio Scandurra
0220d7ba5d Include room_id in CallCanceled message
This ensures we don't accidentally cancel old calls.
2022-12-13 11:43:09 +01:00
Antonio Scandurra
e2b132ef23 💄 2022-12-13 11:37:39 +01:00
Antonio Scandurra
7e8d9d52d3 Delete stray debug statement 2022-12-13 11:36:40 +01:00
Antonio Scandurra
6a6a032f1f Delete stale rooms/participants after RECONNECT_TIMEOUT 2022-12-13 11:32:37 +01:00
Mikayla Maki
fcea254e8e Merge pull request #1963 from zed-industries/fix-workspace-corner-cases
Fix small workspace deserialization corner cases
2022-12-12 17:56:43 -08:00
Mikayla Maki
9bf0a02eae Allow an empty center group to successfully deserialize into an empty pane.
Fix error when deserializing pane axis which caused it's members.len() > 1 invariant to be violated
Fix failure to gain center pane focus when failing to deserialize a center pane entirely

Co-authored-by: Max <max@zed.dev>
2022-12-12 17:51:16 -08:00
Max Brunsfeld
2affbcc495 Merge pull request #1962 from zed-industries/scrolling-breaks-follow
Avoid breaking follow when syncing leader's scroll position
2022-12-12 16:37:58 -08:00
Mikayla Maki
8012e9fcbd Merge pull request #1961 from zed-industries/fix-next-screen-bug
Fixed issue where the NextScreen action would never have an effect
2022-12-12 15:47:16 -08:00
Mikayla Maki
cd2d593a6c Fixed issue where the NextScreen action would never have an effect 2022-12-12 15:36:51 -08:00
Max Brunsfeld
9ef00ea44c Avoid breaking follow when syncing leader's scroll position
Co-authored-by: Mikayla Maki <mikayla@zed.dev>
Co-authored-by: Kay Simmons <kay@zed.dev>
2022-12-12 15:36:30 -08:00
Mikayla Maki
91d6b66fc4 Merge pull request #1959 from zed-industries/serializing-bug-fixes
Add check for if the user wants a blanks workspace when deserializing
2022-12-12 13:37:43 -08:00
Mikayla Maki
5a29a74956 Fetch last workspace explicitly when starting Zed
co-authored-by: Max <max@zed.dev>
2022-12-12 13:29:18 -08:00
Mikayla Maki
db3119b553 Add check for if the user wants a blanks workspace when deserializing 2022-12-12 12:25:52 -08:00
Max Brunsfeld
f797dfb88f Merge branch 'main' into multibuffer-following 2022-12-12 11:47:39 -08:00
Antonio Scandurra
beea9b68ff Allow re-joining room after server restarts 2022-12-12 16:03:21 +01:00
Antonio Scandurra
82397f34d1 Merge pull request #1950 from zed-industries/reconnect-to-room
Automatically re-join call when client connection drops
2022-12-12 13:40:21 +01:00
Antonio Scandurra
3cd77bfcc4 Always cast connection ids to i32
Postgres doesn't support unsigned types. This also adds indices to
support querying `project_collaborators` and `room_participants`
by connection id.
2022-12-12 11:43:08 +01:00
Antonio Scandurra
456396ca6e Rename connection_lost to answering_connection_lost 2022-12-12 11:43:08 +01:00
Antonio Scandurra
26b5653427 Delete hosted projects from database when connection is lost 2022-12-12 11:43:08 +01:00
Antonio Scandurra
895c365485 Introduce random reconnections in the randomized test 2022-12-12 11:43:08 +01:00
Antonio Scandurra
8fa26bfe18 Fix test_calls_on_multiple_connections after adding room reconnection 2022-12-12 11:43:08 +01:00
Antonio Scandurra
aca3f02590 Re-join room when client temporarily loses connection 2022-12-12 11:43:08 +01:00
Antonio Scandurra
d74fb97158 Remove Executor trait from collab and use an enum instead
This will let us save off the executor and avoid using generics.
2022-12-12 11:43:08 +01:00
Joseph Lyons
7608875625 Remove extraneous newline 2022-12-10 09:56:42 -05:00
Joseph Lyons
dcf11ac7e5 Add symlink to applications directory in dmg 2022-12-10 09:48:40 -05:00
Mikayla Maki
5879dcc4e9 Merge pull request #1951 from zed-industries/dock-bugfix
Fix infinite loop in dock position when deserializing
2022-12-09 13:19:44 -08:00
Mikayla Maki
34388a1d31 Updated is_child() to omit self 2022-12-09 12:07:49 -08:00
Mikayla Maki
3a4f8d267a Fix infinite loop in dock position when deserializing 2022-12-09 11:50:24 -08:00
Antonio Scandurra
0366d725ea collab 0.3.1 2022-12-09 08:19:41 +01:00
Antonio Scandurra
8bd7b28056 Merge pull request #1949 from zed-industries/do-not-drop-unregistered
Don't drop `unregistered` column in reconnection support migration
2022-12-09 08:15:53 +01:00
Antonio Scandurra
2697112a8a Don't drop unregistered column in reconnection support migration
We don't use this column anymore because, when a project is unshared, we
simply remove it from the `projects` table. However, this column is expected
in the stable version of the server and the database is shared between stable
and preview. If we dropped it, stable would start throwing errors.
2022-12-09 08:11:18 +01:00
Mikayla Maki
9bd4bc8813 Merge pull request #1940 from zed-industries/terminal-collab-kickoff
WIP - move terminal to project as pre-prep for collaboration
2022-12-08 20:30:32 -08:00
Mikayla Maki
925c9e13bb Remove terminal container view, switch to notify errors 2022-12-08 20:21:00 -08:00
Mikayla Maki
da100a09fb WIP 2022-12-08 20:21:00 -08:00
Mikayla Maki
c42da5c9b9 WIP 2022-12-08 20:21:00 -08:00
Mikayla Maki
2733f91d8c Fix bugs resulting from refactoring the terminal into project and workspace halves 2022-12-08 20:21:00 -08:00
Mikayla Maki
83aefffa38 Rearrange the terminal code to not have a cyclic dependency with the project 2022-12-08 20:21:00 -08:00
Mikayla Maki
1b8763d0cf WIP - move terminal to project as pre-prep for collaboration 2022-12-08 20:21:00 -08:00
Max Brunsfeld
7dde54b052 v0.68.x dev 2022-12-08 15:33:02 -08:00
Kay Simmons
b1e37378dc Merge pull request #1944 from zed-industries/vim-page-movement
Add scroll commands to vim mode
2022-12-08 14:58:19 -08:00
Kay Simmons
e61a38b3a9 remove printline 2022-12-08 14:45:22 -08:00
Kay Simmons
2cf48c03f9 fix final failing tests 2022-12-08 14:39:48 -08:00
Joseph Lyons
ab978ff1a3 collab 0.3.0 2022-12-08 16:35:13 -05:00
Joseph T. Lyons
dcd4b8f7db Merge pull request #1941 from zed-industries/Allow-overwriting-signup-data
Allow overwriting signup data if a user signs up more than once with the same email address
2022-12-08 16:11:28 -05:00
Kay Simmons
2eb335158b Merge pull request #1946 from zed-industries/fix-zombie-tooltips
notify views when hover finishes in tooltip wrapper
2022-12-08 11:37:12 -08:00
Kay Simmons
10aecc310e notify views when hover finishes in tooltip wrapper 2022-12-08 11:26:46 -08:00
Kay Simmons
750e7eb833 Merge pull request #1945 from zed-industries/drag-and-drop-deadzones
Add deadzones to drag and drop
2022-12-08 11:15:42 -08:00
Kay Simmons
36bc90b2b8 Add deadzones to drag and drop 2022-12-07 17:46:00 -08:00
Kay Simmons
f6f41510d2 fix failing tests from incorrect follow behavior 2022-12-07 17:25:48 -08:00
Kay Simmons
cffb064c16 Refactor editor scrolling and implement scroll commands from vim mode 2022-12-07 16:39:32 -08:00
Antonio Scandurra
3313387b28 Merge pull request #1943 from zed-industries/fix-inviting-existing-users-via-different-mail
Fix inviting existing users via a different email address
2022-12-07 14:19:17 +01:00
Joseph Lyons
d71d543337 Ensure that subsequent signup happens after initial
We can't rely on the fact that the test won't run fast enough such that both `created_at`s are the same time.  This ensures the subsequent signup happens after the initial one and that the database doesn't overwrite the initial one.
2022-12-07 08:15:01 -05:00
Antonio Scandurra
665219fb00 Fix inviting user that had already signed up via a different email 2022-12-07 14:07:01 +01:00
Antonio Scandurra
1b8f23eeed Add failing test showcasing inviting existing user via different email 2022-12-07 14:06:59 +01:00
Joseph Lyons
5f31907127 Clean up test 2022-12-07 07:12:27 -05:00
Joseph Lyons
97989b04a0 Remove comment 2022-12-06 17:18:54 -05:00
Joseph Lyons
694840cdd6 Allow overwriting signup data if a user signs up more than once with the same email address 2022-12-06 17:12:12 -05:00
Antonio Scandurra
1920de81d9 Merge pull request #1938 from zed-industries/fix-metrics
Query project count as `i64` instead of `i32` when gathering metrics
2022-12-06 15:04:27 +01:00
Antonio Scandurra
3b5b48c043 Query project count as i64 instead of i32 when gathering metrics
Using the latter will cause a type mismatch when performing the query.
2022-12-06 15:00:32 +01:00
Antonio Scandurra
2080d3efff Merge pull request #1937 from zed-industries/fix-accepted-contact-busy-status
Fix busy status when accepting a contact request
2022-12-06 10:29:58 +01:00
Antonio Scandurra
fc7b01b74e Fix busy status when accepting a contact request
Previously, we would send an contact update when accepting a request
using the same `busy` status for both the requester and the responder.
This was obviously wrong and caused the requester to see their own
busy status as the newly-added responder contact's status.
2022-12-06 10:19:34 +01:00
Antonio Scandurra
f1b35981c2 Merge pull request #1935 from zed-industries/reconnections-2
Move in-memory server state to the database
2022-12-06 09:22:59 +01:00
Antonio Scandurra
744714b478 Remove unused UserId import from seed script 2022-12-06 09:07:25 +01:00
Max Brunsfeld
35549ffabe Merge pull request #1936 from zed-industries/c-outline-pointers
Include outline items for c/c++ functions returning pointers
2022-12-05 14:03:49 -08:00
Max Brunsfeld
855f17c378 Include outline items for c/c++ functions returning pointers-to-pointers, references
Co-authored-by: Julia Risley <julia@zed.dev>
2022-12-05 13:56:21 -08:00
Mikayla Maki
f23f294b86 Merge pull request #1858 from zed-industries/add-lisp
Added tree sitter support for scheme and racket
2022-12-05 11:40:52 -08:00
Mikayla Maki
0921178b42 Got tree sitter integration to a shippable place 2022-12-05 11:31:52 -08:00
Mikayla Maki
30872d3992 Added experimental support for scheme, racket, and commonlisp 2022-12-05 11:31:49 -08:00
Antonio Scandurra
cd08d289aa Fix warnings 2022-12-05 19:45:56 +01:00
Antonio Scandurra
9a62150dce Merge branch 'main' into reconnections-2 2022-12-05 19:18:40 +01:00
Antonio Scandurra
7bbd97cfb9 Send diagnostic summaries synchronously 2022-12-05 19:07:06 +01:00
Antonio Scandurra
5443d9cffe Return project collaborators and connection IDs in a RoomGuard 2022-12-05 18:37:01 +01:00
Antonio Scandurra
be3fb1e985 Update sea-orm to fix bug on failure to commit transactions
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-12-05 18:36:25 +01:00
Antonio Scandurra
b97c35a468 Remove project_id foreign key from room_participants 2022-12-05 15:16:06 +01:00
Antonio Scandurra
eec3df09be Upgrade sea-orm 2022-12-05 14:56:01 +01:00
Antonio Scandurra
d3c411677a Remove random pauses to prevent the database from deadlocking 2022-12-05 12:03:45 +01:00
Antonio Scandurra
d97a8364ad Retry transactions if there's a serialization failure during commit 2022-12-05 10:49:53 +01:00
Antonio Scandurra
0ed731780a Remove duplication between transaction and room_transaction 2022-12-05 09:46:03 +01:00
Julia
11c1254e71 Merge pull request #1924 from zed-industries/simon-says-dont-move
Do not reorder tab opened by follower to end of item list
2022-12-04 13:00:07 -05:00
Mikayla Maki
6ba225f3a5 Merge pull request #1798 from zed-industries/serializing-workspaces
Serializing workspaces
2022-12-03 16:56:02 -08:00
Mikayla Maki
55eb0a3742 Fixed and error message and properly initialized the DB 2022-12-03 16:46:35 -08:00
Mikayla Maki
1ce0863158 Removed old code 2022-12-03 16:27:45 -08:00
Mikayla Maki
d609237c32 Found db parallelism problem :( 2022-12-03 16:26:37 -08:00
Mikayla Maki
4288f10873 And library change 2022-12-03 16:13:02 -08:00
Mikayla Maki
80e035cc2c Fixed bad rebase 2022-12-03 16:12:07 -08:00
Mikayla Maki
a1f273278b Added user notifications 2022-12-03 16:06:02 -08:00
Mikayla Maki
ffcad4e4e2 WIP fixing dock problems 2022-12-03 16:06:02 -08:00
Mikayla Maki
5262e8c77e CHANGE LOCK TO NOT BE DROPPED INSTANTLY. DANG U RUST
co-authored-by: kay@zed.dev
2022-12-03 16:06:02 -08:00
Mikayla Maki
5e240f98f0 Reworked thread safe connection be threadsafer,,,, again
Co-Authored-By: kay@zed.dev
2022-12-03 16:06:02 -08:00
Mikayla Maki
189a820113 First draft of graceful corruption restoration 2022-12-03 16:06:02 -08:00
Mikayla Maki
b8d423555b Added side bar restoration 2022-12-03 16:06:02 -08:00
Kay Simmons
8a48567857 Reactivate the correct item in each pane when deserializing 2022-12-03 16:06:01 -08:00
Kay Simmons
f68e8d4664 Address some issues with the sqlez_macros 2022-12-03 16:06:01 -08:00
Kay Simmons
1b225fa37c fix test failures 2022-12-03 16:06:01 -08:00
Kay Simmons
a29ccb4ff8 make thread safe connection more thread safe
Co-Authored-By: Mikayla Maki <mikayla@zed.dev>
2022-12-03 16:06:01 -08:00
Mikayla Maki
9cd6894dc5 Added multi-threading problem test 2022-12-03 16:06:01 -08:00
Kay Simmons
dd9d20be25 Added sql! proc macro which checks syntax errors on sql code and displays them with reasonable underline locations
Co-Authored-By: Mikayla Maki <mikayla@zed.dev>
2022-12-03 16:06:01 -08:00
Mikayla Maki
260164a711 Added basic syntax checker to sqlez 2022-12-03 16:06:01 -08:00
Kay Simmons
359b8aaf47 rename sql_method to query and adjust the syntax to more closely match function definitions 2022-12-03 16:06:01 -08:00
Kay Simmons
1cc3e4820a working serialized writes with panics on failure. Everything seems to be working 2022-12-03 16:06:01 -08:00
Mikayla Maki
b01243109e Removed database test files 2022-12-03 16:06:01 -08:00
Mikayla Maki
3e0f9d27a7 Made dev tools not break everything about the db
Also improved multi statements to allow out of order parameter binding in statements
Ensured that all statements are run for maybe_row and single, and that of all statements only 1 of them returns only 1 row
Made bind and column calls add useful context to errors

Co-authored-by: kay@zed.dev
2022-12-03 16:06:01 -08:00
Mikayla Maki
2dc1130902 Added extra sql methods 2022-12-03 16:06:01 -08:00
Mikayla Maki
37174f45f0 Touched up sql macro 2022-12-03 16:06:01 -08:00
Mikayla Maki
76c42af62a Finished terminal working directory restoration 2022-12-03 16:06:01 -08:00
Mikayla Maki
cf4c103660 Fixed workspace tests 2022-12-03 16:06:01 -08:00
Mikayla Maki
e1eff3f4cd WIP: Some bugs switching to database provided IDs, terminal titles don't reload when restored from serialized, workspace tests are no longer passing but should be easy to fix when it isn't 11:44 2022-12-03 16:06:01 -08:00
Mikayla Maki
a47f2ca445 Added UUID based, stable workspace ID for caching on item startup. Completed first sketch of terminal persistence. Still need to debug it though.... 2022-12-03 16:06:01 -08:00
Mikayla Maki
e659823e6c WIP termial implementation. need some way of getting the currently valid workspace ID 2022-12-03 16:06:01 -08:00
Mikayla Maki
a8ed95e1dc Implementing persistence for the terminal working directory, found an issue with my current data model. :( 2022-12-03 16:06:01 -08:00
Kay Simmons
cb1d2cd1f2 WIP serializing and deserializing editors 2022-12-03 16:06:01 -08:00
Mikayla Maki
9077b058a2 removed test file 2022-12-03 16:06:01 -08:00
Mikayla Maki
7ceb5e815e workspace level integration of serialization complete! Time for item level integration....
Co-Authored-By: kay@zed.dev
2022-12-03 16:06:01 -08:00
Mikayla Maki
992b94eef3 Rebased to main 2022-12-03 16:06:01 -08:00
Mikayla Maki
a0cb6542ba Polishing workspace data structures
Co-authored-by: kay@zed.dev
2022-12-03 16:06:01 -08:00
Mikayla Maki
6530658c3e Added center group deserialization 2022-12-03 16:06:01 -08:00
Kay Simmons
75d3d46b1b wip serialize editor 2022-12-03 16:06:01 -08:00
Kay Simmons
d20d21c6a2 Dock persistence working!
Co-Authored-By: Mikayla Maki <mikayla@zed.dev>
2022-12-03 16:06:01 -08:00
Kay Simmons
c1f7902309 wip 2022-12-03 16:06:01 -08:00
Mikayla Maki
4798161118 Distributed database pattern built.
Co-Authored-By: kay@zed.dev
2022-12-03 16:06:01 -08:00
Mikayla Maki
2a5565ca93 WIP 2022-12-03 16:06:00 -08:00
Mikayla Maki
a5edac312e Moved to workspaces crate... don't feel great about it 2022-12-03 16:05:26 -08:00
Mikayla Maki
e578f2530e WIP commit, migrating workspace serialization code into the workspace 2022-12-03 16:05:25 -08:00
Mikayla Maki
c84201fc9f Done first draft of strongly typed migrations 2022-12-03 16:05:25 -08:00
Kay Simmons
4a00f0b062 Add typed statements 2022-12-03 16:05:25 -08:00
Mikayla Maki
64ac84fdf4 Re-use big union statement for get_center_pane 2022-12-03 16:05:25 -08:00
Mikayla Maki
f27a9d77d1 Finished the bulk of workspace serialization. Just items and wiring it all through.
Co-Authored-By: kay@zed.dev
2022-12-03 16:05:25 -08:00
Mikayla Maki
0186289420 Refined sqlez, implemented 60% of workspace serialization sql 2022-12-03 16:05:25 -08:00
Mikayla Maki
6b214acbc4 Got Zed compiling again 🥰 2022-12-03 16:05:25 -08:00
Kay Simmons
d419f27d75 replace worktree roots table with serialized worktree roots list 2022-12-03 16:05:25 -08:00
Kay Simmons
eb0598dac2 more refactoring and slightly better api 2022-12-03 16:05:25 -08:00
Mikayla Maki
aa7b909b7b WIP3 2022-12-03 16:05:25 -08:00
Mikayla Maki
b552f1788c WIP2 2022-12-03 16:05:25 -08:00
Mikayla Maki
d492cbced9 WIP 2022-12-03 16:05:25 -08:00
Mikayla Maki
19aac6a57f Moved docks to a better position 2022-12-03 16:05:25 -08:00
Kay Simmons
685bc9fed3 impl bind and column and adjust pane tables 2022-12-03 16:05:25 -08:00
Mikayla Maki
406663c75e Converted to sqlez, so much nicer 2022-12-03 16:05:25 -08:00
Mikayla Maki
c8face33fa WIP, incorporating type parsing using new sqlez patterns 2022-12-03 16:05:25 -08:00
Mikayla Maki
3c1b747f64 WIP almost compiling with sqlez 2022-12-03 16:05:25 -08:00
Mikayla Maki
777f05eb76 Finished implementing the workspace stuff 2022-12-03 16:05:25 -08:00
Mikayla Maki
395070cb92 remove submodule 2022-12-03 16:05:25 -08:00
Mikayla Maki
a4a1859dfc Added sqlez api 2022-12-03 16:05:25 -08:00
Kay Simmons
e3fdfe02e5 WIP switching to sqlez 2022-12-03 16:05:24 -08:00
Mikayla Maki
7744c9ba45 Abandoning rusqlite, the API is miserable 2022-12-03 16:04:10 -08:00
Mikayla Maki
e6ca0adbcb Fixed failing serialization issues 2022-12-03 16:04:10 -08:00
Mikayla Maki
c105f41487 Started working on dock panes
co-authored-by: kay@zed.dev
2022-12-03 16:04:10 -08:00
Mikayla Maki
ddecba143f Refactored workspaces API and corrected method headers + fixed bug caused by migration failures
co-authored-by: kay@zed.dev
2022-12-03 16:04:10 -08:00
Mikayla Maki
3451a3c7fe Rebase - Got Zed compiling and fixed a build error due to conflicting dependencies that cargo didn't catch :(
Co-Authored-By: kay@zed.dev
2022-12-03 16:04:10 -08:00
Mikayla Maki
b9cbd4084e WIP: fixing up behavior of workspace initialization 2022-12-03 16:04:10 -08:00
Mikayla Maki
5505a776e6 Figured out a good schema for the pane serialization stuff 2022-12-03 16:04:10 -08:00
Mikayla Maki
46ff0885f0 WIP: Writing tests 2022-12-03 16:04:10 -08:00
Mikayla Maki
a9dc46c950 added stubs for more tests 2022-12-03 16:04:10 -08:00
Mikayla Maki
7d33520b2c Tidied up code, managed errors, etc. 2022-12-03 16:04:10 -08:00
Mikayla Maki
e9ea751f3d All workspace tests passing :D 2022-12-03 16:04:10 -08:00
Mikayla Maki
d7bbfb82a3 Rebase - Successfully detecting workplace IDs :D 2022-12-03 16:04:10 -08:00
Mikayla Maki
500ecbf915 Rebase fix + Started writing the real SQL we're going to need 2022-12-03 16:04:10 -08:00
K Simmons
e5c6393f85 rebase fix - almost have serialize_workspace piped to the workspace constructor. Just a few compile errors left 2022-12-03 16:04:10 -08:00
K Simmons
73f0459a0f wip 2022-12-03 16:04:10 -08:00
K Simmons
0c466f806c WIP 2022-12-03 16:04:10 -08:00
Mikayla Maki
b48e28b555 Built first draft of workspace serialization schemas, started writing DB tests
Co-Authored-By: kay@zed.dev
2022-12-03 16:04:10 -08:00
Mikayla Maki
60ebe33518 Rebase fix - Reworking approach to sql for take 2022-12-03 16:04:10 -08:00
Mikayla Maki
72c1ee904b Fix rebase - Broken tab 2022-12-03 16:04:10 -08:00
Julia
57e10b7dd5 Cleanup dbg 2022-12-02 16:42:49 -05:00
Julia
4bc1d77535 Fix tab following order test to wait for file open to propagate
Now it can actually repro the original bug

Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-12-02 16:09:37 -05:00
Antonio Scandurra
d96f524fb6 WIP: Manually rollback transactions to avoid spurious savepoint failure
TODO:
- Avoid unwrapping transaction after f(tx)
- Remove duplication between `transaction` and `room_transaction`
- Introduce random delay before and after committing a transaction
- Run lots of randomized tests
- Investigate diverging diagnostic summaries

Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-12-02 20:36:50 +01:00
Antonio Scandurra
1c30767592 Remove stale Error variant
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-12-02 19:20:51 +01:00
Antonio Scandurra
969c314315 Merge branch 'main' into reconnections-2 2022-12-02 19:09:33 +01:00
Antonio Scandurra
568de814aa Delete empty rooms 2022-12-02 16:56:41 +01:00
Antonio Scandurra
27f6ae945d Clear stale data on startup
This is a stopgap measure until we introduce reconnection support.
2022-12-02 16:30:00 +01:00
Antonio Scandurra
1b46b7a7d6 Move modules into collab library as opposed to using the binary
This ensures that we can use collab's modules from the seed script
as well.
2022-12-02 14:37:52 +01:00
Antonio Scandurra
7502558631 Make all tests pass again after migration to sea-orm 2022-12-02 14:22:36 +01:00
Antonio Scandurra
48b6ee313f Use i32 to represent Postgres INTEGER types in Rust 2022-12-02 13:58:54 +01:00
Antonio Scandurra
dec5f37e4e Finish porting remaining db methods to sea-orm 2022-12-02 13:58:23 +01:00
Julia
239a04ea5b Add test that should have exercised tab reordering while following
Except it doesn't, it passes both with and without the prior commit.
Investigate further
2022-12-02 00:31:16 -05:00
Joseph T. Lyons
ea03b48243 Merge pull request #1876 from zed-industries/update-release-urls-to-match-new-zed.dev-url-format
Update release urls to match new zed.dev url format
2022-12-01 20:32:14 -05:00
Max Brunsfeld
82824f78b6 Make each Zed instance use half the screen in 'start-local-collaboration' script 2022-12-01 16:43:39 -08:00
Max Brunsfeld
e4507c1d74 Fetch missing buffers when adding excerpts to a multibuffer while following
Make FollowableItem::apply_update_proto asynchronous. Use a single
task per workspace to process all leader updates, to prevent updates
from being interleaved.

Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-12-01 15:17:51 -08:00
Antonio Scandurra
585ac3e1be WIP 2022-12-01 18:39:24 +01:00
Antonio Scandurra
29a4baf346 Replace i32 with u32 for database columns
We never expect to return signed integers and so we shouldn't use
a signed type. I think this was a limitation of sqlx.
2022-12-01 17:47:51 +01:00
Antonio Scandurra
cfdf0a57b8 Implement Database::update_project 2022-12-01 17:36:36 +01:00
Antonio Scandurra
944d6554de Implement Database::unshare_project 2022-12-01 16:26:13 +01:00
Antonio Scandurra
e3ac67784a Implement Database::project_guest_connection_ids 2022-12-01 16:23:29 +01:00
Antonio Scandurra
62624b81d8 Avoid using col_expr whenever possible
...and use the more type-safe `::set`.
2022-12-01 16:17:27 +01:00
Antonio Scandurra
256e3e8e0f Get basic calls working again with sea-orm 2022-12-01 16:17:24 +01:00
Antonio Scandurra
aebc6326a9 Implement Database::create_room 2022-12-01 15:22:20 +01:00
Antonio Scandurra
db1d93576f Go back to a compiling state, panicking on unimplemented db methods 2022-12-01 15:13:57 +01:00
Antonio Scandurra
d2385bd6a0 Start using the new sea-orm backed database 2022-12-01 14:41:59 +01:00
Antonio Scandurra
19d14737bf Implement signups using sea-orm 2022-12-01 11:58:07 +01:00
Antonio Scandurra
4f864a20a7 Implement invite codes using sea-orm 2022-12-01 11:10:51 +01:00
Antonio Scandurra
2375741bdf Implement db2::Database::fuzzy_search_users 2022-12-01 10:09:53 +01:00
Julia
46f1d5f5c2 Avoid moving tab when leader item updates 2022-12-01 00:29:58 -05:00
Max Brunsfeld
d70996bb99 collab 0.2.5 2022-11-30 14:10:10 -08:00
Max Brunsfeld
9314c0e313 Replicate multibuffer excerpt additions and removals to followers 2022-11-30 13:20:13 -08:00
Julia
5a0c39cbed Merge pull request #1922 from zed-industries/dont-panic-clip-instead
Dont panic in point conversion, clip instead
2022-11-30 13:28:10 -05:00
Julia
41b2fde10d Style
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-30 13:11:08 -05:00
Julia
023ecd595b Change verify macro to debug panic
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-30 13:03:15 -05:00
Julia
2b979d3b88 Don't panic rope point conversions 2022-11-30 12:43:43 -05:00
Julia
5965113fc8 Add verify macros & use in one location for point conversion 2022-11-30 12:43:43 -05:00
Max Brunsfeld
a48cd9125b Start-local-collaboration script: put peers' windows at different positions 2022-11-30 09:29:49 -08:00
Antonio Scandurra
4c04d512db Implement db2::Database::remove_contact 2022-11-30 17:39:17 +01:00
Antonio Scandurra
d1a44b889e Implement contacts using sea-orm
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-30 17:36:25 +01:00
Antonio Scandurra
04d553d4d3 Implement db2::Database::get_user_metrics_id 2022-11-30 15:06:04 +01:00
Antonio Scandurra
2e24d128db Implement access tokens using sea-orm 2022-11-30 14:47:03 +01:00
Antonio Scandurra
9e59056e7f Implement db2::Database::get_user_by_github_account 2022-11-30 14:18:46 +01:00
Antonio Scandurra
d9a892a423 Make some db tests pass against the new sea-orm implementation 2022-11-30 12:13:16 +01:00
Joseph T. Lyons
3a1cd6ed3a Merge pull request #1913 from zed-industries/Add-column-to-signups-for-added-to-mailing-list
Add "added_to_mailing_list" column on signups table
2022-11-29 19:30:11 -05:00
Max Brunsfeld
6120d6488b Start work on following in multi-buffers 2022-11-29 14:50:43 -08:00
Max Brunsfeld
82abf31ef1 Add start-local-collaboration script 2022-11-29 14:50:12 -08:00
Joseph T. Lyons
9f9398476d Merge pull request #1920 from zed-industries/order-invites-by-creation-time
Order invites by creation time
2022-11-29 14:28:53 -05:00
Antonio Scandurra
b7294887c7 WIP: move to a non-generic test database struct
Co-Authored-By: Mikayla Maki <mikayla@zed.dev>
Co-Authored-By: Julia Risley <julia@zed.dev>
2022-11-29 19:20:11 +01:00
Joseph Lyons
049c0f8ba4 Order invites by creation time 2022-11-29 12:57:51 -05:00
Antonio Scandurra
11a39226e8 Start on a new db2 module that uses SeaORM 2022-11-29 16:49:04 +01:00
Antonio Scandurra
ac24600a40 Start moving towards using sea-query to construct queries 2022-11-29 13:55:08 +01:00
Antonio Scandurra
d525cfd697 Increase probability of creating new files in randomized test 2022-11-29 11:02:14 +01:00
Joseph Lyons
4436ec48eb Add "added_to_mailing_list" column on signups table 2022-11-29 02:13:13 -05:00
Joseph T. Lyons
5a9a0f9fa5 Merge pull request #1918 from zed-industries/remove-sign-in-telemetry-event
Remove sign in telemetry event
2022-11-29 01:59:33 -05:00
Max Brunsfeld
6d9b55a654 Send full multibuffer anchors to following peers 2022-11-28 18:00:38 -08:00
Max Brunsfeld
3eac3e20d5 Emit events from a multibuffer when adding/removing excerpts 2022-11-28 17:57:55 -08:00
Joseph Lyons
d2cd9c94f7 Remove sign in telemetry event 2022-11-28 18:56:27 -05:00
Max Brunsfeld
3adc0b947f Merge pull request #1917 from zed-industries/integer-excerpt-ids
Use integers for excerpt ids, map them to locators internally
2022-11-28 14:27:35 -08:00
Max Brunsfeld
718f802157 Implement Copy for multibuffer anchors 2022-11-28 14:18:49 -08:00
Max Brunsfeld
f71145bb32 Add a layer of indirection between excerpt ids and locators 2022-11-28 14:18:49 -08:00
Antonio Scandurra
cd2a8579b9 Capture runnable backtraces only when detecting nondeterminism 2022-11-28 19:35:33 +01:00
Antonio Scandurra
d0709e7bfa Error if project is disconnected after getting completions response 2022-11-28 19:19:24 +01:00
Antonio Scandurra
fa3f100eff Introduce a new detect_nondeterminism = true attribute to gpui::test 2022-11-28 19:01:28 +01:00
Antonio Scandurra
f0a721032d Remove non-determinism caused by random entropy when reconnecting 2022-11-28 18:56:11 +01:00
Antonio Scandurra
0a565c6bae 💄 2022-11-28 17:44:18 +01:00
Antonio Scandurra
af2a2d2494 Return error when waiting on a worktree snapshot after disconnecting 2022-11-28 17:43:40 +01:00
Antonio Scandurra
cd0b663f62 Introduce per-room lock acquired before committing a transaction 2022-11-28 17:00:47 +01:00
Antonio Scandurra
2a0ddd99d2 Error if project is disconnected after getting code actions response 2022-11-28 15:05:34 +01:00
Antonio Scandurra
5581674f8f After completing LSP request, return an error if guest is disconnected 2022-11-28 14:39:27 +01:00
Antonio Scandurra
b0e1d6bc7f Fix integration test incorrectly assuming a certain ordering 2022-11-28 13:57:15 +01:00
Antonio Scandurra
ae11e4f798 Check the correct serialization failure code when retrying transaction 2022-11-28 13:56:03 +01:00
Max Brunsfeld
0b0fe91545 Merge pull request #1912 from zed-industries/matching-brackets-must-contain-range
Fix enclosing-bracket bug that appeared in JS for loops
2022-11-23 13:44:48 -08:00
Max Brunsfeld
aeea47323a Fix enclosing-bracket bug that appeared in JS for loops
Previously, we were relying on the tree-sitter query's range restriction to
avoid returning brackets that did not contain the given range. But the
query's range restriction only guarantees that we don't descend into parent
nodes unless they intersect the range.
2022-11-23 13:37:22 -08:00
Julia
e4185f38cf Merge pull request #1910 from zed-industries/lsp-coordinate-clamp
Don't trust LSP coordinates to be within document bounds
2022-11-23 14:07:37 -05:00
Julia
09e6d44873 Move Unclipped into separate file 2022-11-23 14:02:11 -05:00
Julia
525d84e5bf Remove spurious lifetimes
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-23 13:52:39 -05:00
Julia
55ca085d7d Consistency in prefix/suffix/signature of UTF16 point to point conversion
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-23 13:52:18 -05:00
Julia
03cfd23ac5 Bump protocol version back down as proto changes are non-breaking 2022-11-23 13:40:49 -05:00
Julia
a666ca3e40 Collapse proto Point into the one kind of use case, utf-16 coords
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-23 13:28:44 -05:00
Julia
b58ae8bdd7 Clip diagnostic range before and during empty range expansion
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-23 13:21:05 -05:00
Max Brunsfeld
5e7652698d v0.67.x dev 2022-11-23 09:56:06 -08:00
Julia
e51cbf67ab Fixup compile errors 2022-11-22 02:49:47 -05:00
Julia
8c75df30cb Wrap a bunch of traits for Unclipped<T> 2022-11-21 15:58:44 -05:00
Julia
1c84e77c37 Start adding concept of Unclipped text coordinates
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-21 15:48:25 -05:00
Max Brunsfeld
b3a92979a3 Merge pull request #1911 from zed-industries/single-file-worktree-event-extension
Fix file extension retrieval for single-file worktrees
2022-11-21 12:41:35 -08:00
Max Brunsfeld
55d3c09b6b Fix file extension retrieval for single-file worktrees
Previously, we used the file's 'path' method, which only returns the relative
path from the worktree root.
2022-11-21 12:34:36 -08:00
Julia
436c89650a Rename clamped -> clipped 2022-11-21 15:23:00 -05:00
Julia
4ead1ecbbf Simply logic of this method
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-21 14:25:01 -05:00
Julia
074e3cfbd6 Clamp UTF-16 to point conversions 2022-11-21 14:25:01 -05:00
Julia
bb32599ded Clamp for all UTF-16 to offset conversions which used to use ToOffset 2022-11-21 14:25:01 -05:00
Julia
f9cbed5a1f Clamp UTF-16 coordinate while performing LSP edits rather than panicing 2022-11-21 11:48:13 -05:00
Kay Simmons
0078bea877 change bump-version to install jq if its not already installed 2022-11-18 13:42:46 -08:00
Kay Simmons
bb80cee19e Merge pull request #1814 from zed-industries/golden-ratio
Active Pane Magnification
2022-11-18 13:14:03 -08:00
Kay Simmons
0c50c0959d Merge pull request #1906 from zed-industries/mouse-down-capture-on-click-fix
Fix mouse down falling through popovers
2022-11-18 13:10:50 -08:00
Kay Simmons
75b8a12ab3 address issue where mouse down events weren't getting captured after the multiple handlers change 2022-11-18 13:04:27 -08:00
Antonio Scandurra
4c1b4953c1 Remove version from Room
We won't need it once we add the per-room lock.
2022-11-18 20:18:48 +01:00
Antonio Scandurra
c3d556d9bd Don't take an Arc<Server> in message handlers 2022-11-18 11:45:42 +01:00
Max Brunsfeld
d090d230e2 Merge pull request #1903 from zed-industries/override-pyright-completion-sorting
Add LspAdapter hook for processing completions, fix completion sorting from Pyright
2022-11-17 15:30:07 -08:00
Max Brunsfeld
bca635e5d3 Add LspAdapter hook for processing completions, fix completion sorting from Pyright 2022-11-17 15:26:46 -08:00
Julia
3938adf60a Merge pull request #1902 from zed-industries/event-handlers-are-multitude
Allow having multiple mouse event handlers of the same kind
2022-11-17 17:19:32 -05:00
Julia
6537def97e Allow having multiple mouse event handlers of the same kind
Co-Authored-By: Kay Simmons <kay@zed.dev>
2022-11-17 17:01:34 -05:00
Max Brunsfeld
5020c70a04 collab 0.2.4 2022-11-17 11:44:29 -08:00
Mikayla Maki
0a63d2e3e1 Merge pull request #1900 from zed-industries/fix-terminal-performance
Check for wakeups correctly
2022-11-17 11:17:13 -08:00
Mikayla Maki
ce0dfde8ee Check for wakeups correctly 2022-11-17 11:14:31 -08:00
Antonio Scandurra
44bb2ce024 Rename Store to ConnectionPool 2022-11-17 19:03:59 +01:00
Antonio Scandurra
6c83be3f89 Remove obsolete code from Store 2022-11-17 18:46:39 +01:00
Antonio Scandurra
0a4517f97e WIP: Introduce a db field to Session
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-17 17:30:26 +01:00
Antonio Scandurra
c34a5f3177 Introduce a new Session struct to server message handlers
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-17 17:11:06 +01:00
Antonio Scandurra
4f39181c4c Revert "Don't replace newer diagnostics with older ones"
This reverts commit 71eeeedc05.
2022-11-17 16:57:40 +01:00
Antonio Scandurra
e7e45be6e1 Revert "Wait for previous UpdateFollowers message ack before sending new ones"
This reverts commit fe93263ad4.
2022-11-17 16:57:32 +01:00
Antonio Scandurra
8621c88a3c Use int8 for scan_id and inode in Postgres 2022-11-17 16:56:43 +01:00
Antonio Scandurra
7dae21cb36 🎨 2022-11-17 15:35:18 +01:00
Antonio Scandurra
0f4598a243 Fix seed script 2022-11-17 15:34:35 +01:00
Antonio Scandurra
6415809b61 Fix errors in Postgres schema 2022-11-17 15:34:12 +01:00
Antonio Scandurra
fe93263ad4 Wait for previous UpdateFollowers message ack before sending new ones 2022-11-17 14:12:00 +01:00
Antonio Scandurra
3b34d858b5 Remove unwrap from Server::share_project 2022-11-17 13:33:26 +01:00
Antonio Scandurra
71eeeedc05 Don't replace newer diagnostics with older ones 2022-11-17 12:21:51 +01:00
Antonio Scandurra
532a599239 Use Db::get_guest_connection_ids in other db methods 2022-11-17 11:38:00 +01:00
Nathan Sobo
9eee22ff0a Fix column name in query 2022-11-16 19:40:53 -07:00
Nathan Sobo
94fe93c6ee Move unshare_project to db module 2022-11-16 18:28:45 -07:00
Joseph Lyons
93824dd239 Fix top-level header in discord webhook action 2022-11-16 20:02:15 -05:00
Nathan Sobo
e5f05c9f3b Move leave_project from Store to db module 2022-11-16 17:45:47 -07:00
Nathan Sobo
bdb521cb6b Fix typo in query 2022-11-16 16:52:05 -07:00
Joseph Lyons
c613c98e37 Move comment to correct location 2022-11-16 17:28:50 -05:00
Max Brunsfeld
4e4299d500 v0.66.x dev 2022-11-16 14:22:18 -08:00
Antonio Scandurra
c1291a093b WIP: Allow subscribing to remote entity before creating a model
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-16 19:51:24 +01:00
Mikayla Maki
ccc8c247a1 Merge pull request #1894 from zed-industries/opt-as-meta-fix
Fix small terminal bugs
2022-11-16 10:50:02 -08:00
Mikayla Maki
8e6c5dbc3b Fix unscaled scrolling when using an imprecise mouse wheel 2022-11-16 10:44:13 -08:00
Mikayla Maki
3c53fcdb43 Added alt-left: move word left and alt-right: move word right in the terminal for for antonio 2022-11-16 09:59:23 -08:00
Antonio Scandurra
adf43c87dd Batch some of the new queries in Db
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-16 17:19:06 +01:00
Antonio Scandurra
faf265328e Wait for acknowledgment before sending the next diagnostic summary 2022-11-16 16:03:01 +01:00
Antonio Scandurra
9bc57c0c61 Move Store::start_language_server to Db 2022-11-16 15:48:26 +01:00
Antonio Scandurra
95369f92eb Move Store::update_diagnostic_summary to Db 2022-11-16 15:41:33 +01:00
Antonio Scandurra
117458f4f6 Send worktree updates after project metadata has been sent 2022-11-16 14:58:11 +01:00
Antonio Scandurra
eeb32fa888 Improve queries for composite primary keys 2022-11-16 11:07:39 +01:00
Antonio Scandurra
f9567ae116 Cascade deletes when project is deleted 2022-11-16 10:41:36 +01:00
Antonio Scandurra
c151c87e12 Correctly leave projects when leaving room 2022-11-16 10:36:48 +01:00
Antonio Scandurra
3190236396 Update worktree entry instead of erroring when it already exists 2022-11-16 08:57:19 +01:00
Joseph T. Lyons
17dfbb91ba Merge pull request #1897 from zed-industries/allow-users-to-sign-up-multiple-times 2022-11-15 20:13:43 -05:00
Joseph Lyons
c3cf056fc5 allow users to sign up multiple times without throwing a 500 2022-11-15 20:04:56 -05:00
Nathan Sobo
275f0ae492 collab 0.2.3 2022-11-15 15:45:04 -07:00
Nathan Sobo
f4e9759f26 Merge pull request #1896 from zed-industries/fix-invites
Once we email someone an invite, honor the invitation
2022-11-15 15:43:30 -07:00
Nathan Sobo
fdf758e050 Once we email someone an invite, honor the invitation
Previously, we were waiting to decrement the invite_count until a user
confirmed their email address, which created weird situations where we would
email people only to have them get a 500 when trying to sign up. Now, we
decrement the invite_count upon sending the email and always honor the
invitation.

Co-Authored-By: Joseph Lyons <joseph@zed.dev>
Co-Authored-By: Max Brunsfeld <max@zed.dev>
2022-11-15 15:36:59 -07:00
Max Brunsfeld
0dfacd7ffa Merge pull request #1895 from zed-industries/ruby-solargraph
Add ruby LSP support via SolarGraph
2022-11-15 12:45:54 -08:00
Max Brunsfeld
36c07f940c Add ruby LSP support via SolarGraph 2022-11-15 12:34:43 -08:00
Mikayla Maki
01929037f1 fixed clear problem 2022-11-15 12:02:09 -08:00
Antonio Scandurra
0817f905a2 Fix syntax error in schema 2022-11-15 18:02:07 +01:00
Antonio Scandurra
ad67f5e4de Always use the database to retrieve collaborators for a project 2022-11-15 17:49:37 +01:00
Antonio Scandurra
e9eadcaa6a Move Store::update_worktree to Db::update_worktree 2022-11-15 17:18:28 +01:00
Antonio Scandurra
4b1dcf2d55 Always use strings to represent paths over the wire
Previously, the protocol used a mix of strings and bytes without any consistency.

When we go to multiple platforms, we won't be able to mix encodings of paths anyway.
We don't know this is the right approach, but it at least makes things consistent
and easy to read in the database, on the wire, etc. Really, we should be using entry
ids etc to refer to entries on the wire anyway, but there's a chance this is the
wrong decision.

Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-15 16:46:17 +01:00
Antonio Scandurra
974ef967a3 Move Store::join_project to Db::join_project
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-15 16:37:51 +01:00
Antonio Scandurra
be523617c9 Start reworking join_project to use the database 2022-11-15 11:44:26 +01:00
Antonio Scandurra
6cbf197226 Determine whether a contact is busy via the database 2022-11-15 10:41:21 +01:00
Antonio Scandurra
3e8fcb04f7 Finish implementing Db::update_project 2022-11-15 09:01:51 +01:00
Antonio Scandurra
42bb5f0e9f Add random delay after returning results from the database 2022-11-15 08:48:16 +01:00
Max Brunsfeld
e401caff7c Merge pull request #1863 from zed-industries/erb
Add support for ERB
2022-11-14 16:59:51 -08:00
Max Brunsfeld
b222e8eb5a Use a longer example text in random combined injections test 2022-11-14 16:56:21 -08:00
Max Brunsfeld
fb35631337 Bump tree-sitter after merging included-ranges PR 2022-11-14 16:56:09 -08:00
Max Brunsfeld
6659dac2e5 Fix compile errors in seed script, ensure it is compiled on CI
Co-authored-by: Nate Butler <nate@zed.dev>
2022-11-14 11:12:25 -08:00
Antonio Scandurra
b9af2ae66e Switch to serializable isolation
Co-Authored-By: Nathan Sobo <nathan@zed.dev>
2022-11-14 19:39:12 +01:00
Mikayla Maki
0dcdd6ea39 Merge pull request #1889 from zed-industries/terminal-bugs
Refactored rendering to squash all wakeups into 1
2022-11-14 10:29:00 -08:00
Mikayla Maki
a66aa9c09c Refactored rendering to squash all wakeups into 1 2022-11-14 10:20:55 -08:00
Kay Simmons
e6c5079a49 Merge pull request #1873 from zed-industries/drag-project-entry-to-pane
Drag and Drop Project Entries Between Folders
2022-11-14 09:55:56 -08:00
Antonio Scandurra
d7369ace6a Skip applying room updates if they're older than the local room state 2022-11-14 15:35:39 +01:00
Antonio Scandurra
40073f6100 Wait for acknowledgment before sending the next project update 2022-11-14 15:32:49 +01:00
Antonio Scandurra
65c5adff05 Automatically decline call when user drops their last connection 2022-11-14 11:32:26 +01:00
Antonio Scandurra
59e8600e4c Implement Db::cancel_call 2022-11-14 11:12:23 +01:00
Antonio Scandurra
0310e27347 Fix query errors in Db::share_project 2022-11-14 10:53:11 +01:00
Antonio Scandurra
9902211af1 Leave room when connection is dropped 2022-11-14 10:13:36 +01:00
Joseph Lyons
1da5be6e8f Update release urls to match new zed.dev url format 2022-11-12 21:39:08 -05:00
Max Brunsfeld
ee66adbb49 SyntaxMap - Don't ignore deletions at the boundaries of layers 2022-11-11 16:43:57 -08:00
Max Brunsfeld
3612c46d6d Bump tree-sitter for included range bugfix 2022-11-11 16:36:04 -08:00
Julia
bf9c9b0103 Merge pull request #1875 from zed-industries/fix-code-actions-regression
Use `EMPTY` code action kind to get more RA actions without breaking TS
2022-11-11 15:34:40 -05:00
Julia
ea8778921b Use EMPTY code action kind to get more RA actions without breaking TS 2022-11-11 15:26:12 -05:00
Julia
2ef2b5a053 Merge pull request #1874 from zed-industries/propagate-mouse-up-through-drop-receiver
Propagate mouse up event through drop receiver in early return
2022-11-11 14:05:07 -05:00
Julia
5bb7701de7 Propagate mouse up event through drop receiver in early return 2022-11-11 14:00:01 -05:00
Antonio Scandurra
2145965749 WIP 2022-11-11 19:36:20 +01:00
Antonio Scandurra
11caba4a4c Remove stray log statement 2022-11-11 18:54:08 +01:00
Antonio Scandurra
9f39dcf7cf Get basic calls test passing again 2022-11-11 18:53:23 +01:00
Antonio Scandurra
1135aeecb8 WIP: Move Store::leave_room to Db::leave_room 2022-11-11 16:59:54 +01:00
Julia
b6f78cd5dc Merge pull request #1871 from zed-industries/skip-additional-edit-within-primary
Skip LSP additional completion edits which fall within primary edit
2022-11-11 10:31:41 -05:00
Antonio Scandurra
a6198c9a1a Merge pull request #1870 from zed-industries/fix-remote-abs-paths
Fix bug where absolute paths of worktrees were not being stored on the server
2022-11-11 15:28:17 +00:00
Julia
ad698fd110 Test for filtering out of faulty LSP completion additional edits 2022-11-11 10:28:07 -05:00
Antonio Scandurra
0d1d267213 Move Store::decline_call to Db::decline_call 2022-11-11 15:41:56 +01:00
Antonio Scandurra
c213c98ea4 Remove calls table and use just room_participants 2022-11-11 15:22:04 +01:00
Antonio Scandurra
cc58607c3b Move Store::join_room into Db::join_room 2022-11-11 14:43:40 +01:00
Antonio Scandurra
58947c5c72 Move incoming calls into Db 2022-11-11 14:28:26 +01:00
Antonio Scandurra
6871bbbc71 Start moving Store state into the database 2022-11-11 12:06:43 +01:00
Antonio Scandurra
28aa1567ce Include sender_user_id when handling a server message/request 2022-11-11 11:45:58 +01:00
Antonio Scandurra
f639c4c3d1 Add schema for reconnection support 2022-11-11 10:41:44 +01:00
Kay Simmons
d61c0fb24c Allow dragging and dropping project entries 2022-11-10 20:43:55 -08:00
Kay Simmons
3d5a3634cf Merge pull request #1867 from zed-industries/drag-project-entry-to-pane
Drag project entry to pane
2022-11-10 17:25:22 -08:00
Max Brunsfeld
9ad8731897 Fix boundary condition where injection was not found after an edit 2022-11-10 17:04:40 -08:00
Julia
44c3cedc48 Skip additional completions on any kind of overlap with primary edit 2022-11-10 18:53:37 -05:00
Max Brunsfeld
eeeaf6d9a2 Merge pull request #1872 from zed-industries/tests-use-real-db
Run integration tests with an in-memory sqlite database instead of a hand-coded fake database
2022-11-10 15:15:52 -08:00
Max Brunsfeld
2d4deaafcd Use upstream sqlx git repository 2022-11-10 15:13:32 -08:00
Max Brunsfeld
c839ab2028 Add missing cfg(test) attribute to sqlite RowsAffected 2022-11-10 15:04:57 -08:00
Max Brunsfeld
5d17347a45 Use our fork of sqlx, for now 2022-11-10 14:58:05 -08:00
Max Brunsfeld
9ce3524eb8 Run db tests against both postgres and sqlite 2022-11-10 14:29:03 -08:00
Julia
03115c8d71 Skip LSP additional completion edits which fall within primary edit 2022-11-10 15:28:11 -05:00
Max Brunsfeld
dafdc4b4a5 Run tests with an in-memory sqlite database 2022-11-10 12:18:35 -08:00
Max Brunsfeld
05a6bd914d Get integration tests passing with sqlite
Co-authored-by: Antonio Scandurra <antonio@zed.dev>
2022-11-10 11:03:52 -08:00
Nathan Sobo
fb03eb7a3c Store absolute path on server when sharing worktree
Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2022-11-10 09:34:16 -07:00
Nathan Sobo
8e70e1934a Avoid unwrapping when computing tab description
A bug caused the assumptions of this method to be violated. We will fix that in the next commit, but we want to be more conservative in our assumptions here going forward.

Co-Authored-By: Antonio Scandurra <me@as-cii.com>
2022-11-10 09:33:57 -07:00
Antonio Scandurra
1bb41b6f54 Go back to a compiling state and start running tests again 2022-11-10 15:24:49 +01:00
Antonio Scandurra
90d1d9ac82 WIP: add more trait bounds 2022-11-10 12:24:56 +01:00
Max Brunsfeld
bed06346d1 Total WIP - try making Db a generic struct instead of a trait 2022-11-09 19:28:06 -08:00
Max Brunsfeld
7e02ac772a Start work on using sqlite in tests 2022-11-09 19:26:29 -08:00
Nate Butler
c0d67d9522 Merge pull request #1868 from zed-industries/readd-search-match-highlight
Update search match highlight and occurrence style
2022-11-09 18:37:17 -05:00
Max Brunsfeld
d14dd27cdc Use a real database in tests, but block on db calls
Co-authored-by: Nathan Sobo <nathan@zed.dev>
2022-11-09 15:22:50 -08:00
Nate Butler
6b4dd2a5de Update search match highlight and occurrence style 2022-11-09 18:17:00 -05:00
Max Brunsfeld
9355d501bc Fetch release branches before bumping zed minor versions 2022-11-09 14:02:46 -08:00
Max Brunsfeld
335db5d03d v0.65.x dev 2022-11-09 13:18:23 -08:00
Julia
98461ea0cd Merge pull request #1865 from zed-industries/do-not-restrict-code-action-kinds
Don't restrict which kind of code actions we ask the LSP server for
2022-11-09 09:49:47 -05:00
Kay Simmons
5707bae9b9 Merge pull request #1866 from zed-industries/tweak-restart-zed-message
Remove restart to update zed icon
2022-11-08 14:38:10 -08:00
Kay Simmons
bbeb685769 remove unused comment 2022-11-08 14:26:55 -08:00
Kay Simmons
cea103e47c remove dead comment 2022-11-08 14:24:51 -08:00
Kay Simmons
ad31c284c7 remove restart to update zed icon because it clashes with the no diagnostics icon 2022-11-08 14:22:11 -08:00
Kay Simmons
738893c527 Split and move to pane working 2022-11-08 14:19:31 -08:00
Max Brunsfeld
6da04d0eee Fix failure to load .env.toml in bootstrap script 2022-11-08 14:09:17 -08:00
Julia
7482660456 Don't restrict which kind of code actions we ask the LSP server for 2022-11-08 16:23:31 -05:00
Mikayla Maki
00123ffe2b Merge pull request #1864 from zed-industries/add-more-move-cursor
Added more autoscroll behaviors
2022-11-08 11:57:09 -08:00
Mikayla Maki
53f8744794 Tried alternate stratergy 2022-11-08 11:54:26 -08:00
Mikayla Maki
537d4762f6 Added more autoscroll behaviors 2022-11-08 11:35:12 -08:00
Max Brunsfeld
2f5004c238 Add highlight query for ERB 2022-11-08 11:29:57 -08:00
Max Brunsfeld
7dcd6c920f Add randomized test for syntax map with combined injections 2022-11-08 11:29:23 -08:00
Max Brunsfeld
ea42bc3c9b Rename some sum_tree seek targets in SyntaxMap 2022-11-08 10:36:44 -08:00
Antonio Scandurra
d3ba769291 Merge pull request #1862 from zed-industries/fix-catalina
Weakly link ReplayKit to ensure this library can be used on macOS 10.15
2022-11-08 15:07:09 +00:00
Antonio Scandurra
3f1b95927f Move weak linking into zed's build.rs 2022-11-08 16:04:55 +01:00
Antonio Scandurra
c183e854d7 Weakly link ReplayKit to ensure this library can be used on macOS 10.15 2022-11-08 13:44:31 +01:00
Max Brunsfeld
86f51ade60 Fix panic in handling edits to combined injections 2022-11-07 17:32:15 -08:00
Max Brunsfeld
c838a7d973 Get combined injections basically working
Co-authored-by: Nathan Sobo <nathan@zed.dev>
Co-authored-by: Mikayla Maki <mikayla@zed.dev>
2022-11-07 16:58:12 -08:00
Julia
9abfa037fd Handle project entry drop render & start fixing drag cancel issues
Co-Authored-By: Kay Simmons <kay@zed.dev>
2022-11-07 18:17:36 -05:00
Max Brunsfeld
5efe2ed6d3 Start work on handling combined injections in SyntaxMap 2022-11-07 14:45:17 -08:00
Julia
847376a4f5 Start dragging project panel entries
Co-Authored-By: Kay Simmons <kay@zed.dev>
2022-11-07 17:00:01 -05:00
Kay Simmons
1d6af4cf20 Merge pull request #1857 from zed-industries/fix-unicode-vim-left
fixes issue with left motion in vim mode clipping incorrectly
2022-11-04 15:24:17 -07:00
Kay Simmons
b6c5c7871e Addresses issue where left motion in vim mode would clip in the wrong direction 2022-11-04 15:21:29 -07:00
Nate Butler
5acae094bd Swap the color of diagnostic underlines to fix low contrast issue. 2022-11-04 18:02:10 -04:00
Kay Simmons
4d7425f4bf Merge pull request #1845 from zed-industries/vim-dd-fix
Vim dd fix
2022-11-04 14:57:21 -07:00
Joseph T. Lyons
2497e7c008 Merge pull request #1855 from zed-industries/make-app-a-user-property-in-mixpanel
Make `App` a user property in Mixpanel
2022-11-04 14:43:46 -04:00
Joseph T Lyons
474a5dd4f2 Make App a user property in Mixpanel
Currently, we cannot take advantage of Mixpanel's virtual session end events because they are associated with users, not events; this change moves the property onto users.

Co-Authored-By: Max Brunsfeld <maxbrunsfeld@gmail.com>
2022-11-04 14:16:12 -04:00
Max Brunsfeld
18ff459014 collab 0.2.2 2022-11-04 10:04:14 -07:00
Max Brunsfeld
927cfa44db Use Rust 1.65 in collab docker image 2022-11-04 10:03:47 -07:00
Max Brunsfeld
be6ee3cbff Start work on ERB language support 2022-11-04 09:33:59 -07:00
Antonio Scandurra
7301ab4f44 Merge pull request #1854 from zed-industries/command-palette-improvements
Improve styling of command palettes
2022-11-04 14:09:55 +00:00
Antonio Scandurra
0b231e58fd Show placeholder text for pickers 2022-11-04 10:18:47 +01:00
Antonio Scandurra
08b84416d2 Avoid showing "No matches" when query is empty if there are no matches 2022-11-04 10:18:47 +01:00
Antonio Scandurra
aec8aec800 Show the cursor right away when opening opening/focusing editors 2022-11-04 10:18:47 +01:00
Max Brunsfeld
5bcf9916c9 Merge pull request #1853 from zed-industries/diagnostics-focus-loop
Fix infinite focus transfer loop in project diagnostics
2022-11-03 18:01:14 -07:00
Max Brunsfeld
6076a3fc61 Fix infinite focus transfer loop in project diagnostics
Co-authored-by: Kay Simmons <kay@zed.dev>
2022-11-03 17:53:36 -07:00
Max Brunsfeld
05389dc239 Merge pull request #1844 from zed-industries/ruby
Add support for Ruby
2022-11-03 15:59:11 -07:00
Max Brunsfeld
d222904471 Add basic support for ruby
Co-authored-by: Kay Simmons <kay@zed.dev>
2022-11-03 15:52:33 -07:00
Max Brunsfeld
9f3ea0c87f Merge pull request #1851 from zed-industries/rust-let-else
Bump tree-sitter-rust for let-else, let-chains
2022-11-03 12:39:37 -07:00
Max Brunsfeld
601ec40ddc Bump tree-sitter-rust for let-else, let-chains 2022-11-03 12:35:12 -07:00
Joseph T. Lyons
adc4a5984e Merge pull request #1850 from zed-industries/add-automatic-annotations-for-mixpanel
Add automatic annotations for mixpanel
2022-11-03 13:26:15 -04:00
Joseph T Lyons
0f78174d78 Use folded style to make arguments easier to read 2022-11-03 13:06:19 -04:00
Joseph T Lyons
ad67a1b744 Add automatic annotations for mixpanel 2022-11-03 01:48:49 -04:00
Nate Butler
edc2966651 Merge pull request #1847 from zed-industries/readd-abruzzo-theme
Re-add Abruzzo theme to experimental themes
2022-11-02 22:34:04 -04:00
Joseph T. Lyons
eacfa856cf Merge pull request #1848 from zed-industries/fix-markdown-rendering-of-discord-release-notes-webhook
Fix markdown rendering of discord release notes webhook
2022-11-02 16:12:29 -04:00
Max Brunsfeld
fe4862d756 Fix error in bump-zed-patch-version script 2022-11-02 12:18:14 -07:00
Max Brunsfeld
8312d974ac Fix release channel on stable 2022-11-02 12:18:07 -07:00
Max Brunsfeld
6d3bd495fc Fix error case in what-is-deployed script 2022-11-02 12:05:37 -07:00
Joseph T Lyons
576e350bea Fix markdown rendering of discord release notes webhook 2022-11-02 14:55:27 -04:00
Max Brunsfeld
cc1325d6f9 Adjust script for getting changes to put in release notes
Now, this script is only useful for the preview channel's releases. The
stable channel's release notes can be mostly copied from the existing
preview releases notes.

Co-authored-by: Joseph Lyons <joseph@zed.dev>
2022-11-02 10:55:48 -07:00
Max Brunsfeld
c411cb7eef collab 0.2.1 2022-11-02 10:27:26 -07:00
Max Brunsfeld
c9ba41d002 Fix errors in bump-zed-minor-versions script
Co-authored-by: Joseph Lyons <joseph@zed.dev>
2022-11-02 10:25:20 -07:00
Joseph T Lyons
3caa7a4916 v0.64.x dev 2022-11-02 12:57:53 -04:00
Nate Butler
4f344f1ac1 Re-add Abruzzo theme to experimental themes 2022-11-02 11:28:43 -04:00
Kay Simmons
4977acf6a5 fix some vim mode bugs around deletions and failed motions 2022-11-02 01:20:11 -07:00
Kay Simmons
0cd2d9a9c8 added new supported feature 2022-11-01 13:15:14 -07:00
K Simmons
e2ba8d6df7 Add active pane magnification setting which grows the active pane making it easier to see it's contents 2022-10-25 17:24:19 -07:00
447 changed files with 37616 additions and 23066 deletions

View File

@@ -8,4 +8,4 @@ crates/collab/static/styles.css
vendor/bin
assets/themes/*.json
assets/themes/internal/*.json
assets/themes/experiments/*.json
assets/themes/staff/*.json

View File

@@ -4,7 +4,7 @@ on:
push:
branches:
- main
- "v*"
- "v[0-9]+.[0-9]+.x"
tags:
- "v*"
pull_request:
@@ -41,12 +41,18 @@ jobs:
with:
clean: false
submodules: 'recursive'
- name: Run tests
run: cargo test --workspace --no-fail-fast
- name: Build collab binaries
run: cargo build --bins --all-features
- name: Build collab
run: cargo build -p collab
- name: Build other binaries
run: cargo build --workspace --bins --all-features
- name: Generate license file
run: script/generate-licenses
bundle:
name: Bundle app
@@ -106,6 +112,9 @@ jobs:
exit 1
fi
- name: Generate license file
run: script/generate-licenses
- name: Create app bundle
run: script/bundle

View File

@@ -14,13 +14,27 @@ jobs:
content: |
📣 Zed ${{ github.event.release.tag_name }} was just released!
Restart your Zed or head to https://zed.dev/releases to grab it.
Restart your Zed or head to https://zed.dev/releases/latest to grab it.
```md
### Changelog
# Changelog
${{ github.event.release.body }}
```
discourse_release:
if: ${{ ! github.event.release.prerelease }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: "19"
- run: >
node "./script/discourse_release"
${{ secrets.DISCOURSE_RELEASES_API_KEY }}
${{ github.event.release.tag_name }}
${{ github.event.release.body }}
mixpanel_release:
runs-on: ubuntu-latest
steps:

5
.gitignore vendored
View File

@@ -7,8 +7,8 @@
/crates/collab/static/styles.css
/vendor/bin
/assets/themes/*.json
/assets/themes/Internal/*.json
/assets/themes/Experiments/*.json
/assets/*licenses.md
/assets/themes/staff/*.json
**/venv
.build
Packages
@@ -18,3 +18,4 @@ DerivedData/
.swiftpm/config/registries.json
.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata
.netrc
**/*.db

1351
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,6 +17,7 @@ members = [
"crates/diagnostics",
"crates/drag_and_drop",
"crates/editor",
"crates/feedback",
"crates/file_finder",
"crates/fs",
"crates/fsevent",
@@ -40,11 +41,14 @@ members = [
"crates/project",
"crates/project_panel",
"crates/project_symbols",
"crates/recent_projects",
"crates/rope",
"crates/rpc",
"crates/search",
"crates/settings",
"crates/snippet",
"crates/sqlez",
"crates/sqlez_macros",
"crates/sum_tree",
"crates/terminal",
"crates/text",
@@ -65,7 +69,7 @@ serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] }
rand = { version = "0.8" }
[patch.crates-io]
tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "366210ae925d7ea0891bc7a0c738f60c77c04d7b" }
tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "36b5b6c89e55ad1a502f8b3234bb3e12ec83a5da" }
async-task = { git = "https://github.com/zed-industries/async-task", rev = "341b57d6de98cdfd7b418567b8de2022ca993a6e" }
# TODO - Remove when a version is released with this PR: https://github.com/servo/core-foundation-rs/pull/457
@@ -80,4 +84,3 @@ split-debuginfo = "unpacked"
[profile.release]
debug = true

View File

@@ -1,10 +1,11 @@
# syntax = docker/dockerfile:1.2
FROM rust:1.64-bullseye as builder
FROM rust:1.65-bullseye as builder
WORKDIR app
COPY . .
# Compile collab server
ARG CARGO_PROFILE_RELEASE_PANIC=abort
RUN --mount=type=cache,target=./script/node_modules \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=./target \

View File

@@ -49,30 +49,14 @@ script/zed-with-local-servers --release
If you trigger `cmd-alt-i`, Zed will copy a JSON representation of the current window contents to the clipboard. You can paste this in a tool like [DJSON](https://chrome.google.com/webstore/detail/djson-json-viewer-formatt/chaeijjekipecdajnijdldjjipaegdjc?hl=en) to navigate the state of on-screen elements in a structured way.
### Staff Only Features
### Licensing
Many features (e.g. the terminal) take significant time and effort before they are polished enough to be released to even Alpha users. But Zed's team workflow relies on fast, daily PRs and there can be large merge conflicts for feature branchs that diverge for a few days. To bridge this gap, there is a `staff_mode` field in the Settings that staff can set to enable these unpolished or incomplete features. Note that this setting isn't leaked via autocompletion, but there is no mechanism to stop users from setting this anyway. As initilization of Zed components is only done once, on startup, setting `staff_mode` may require a restart to take effect. You can set staff only key bindings in the `assets/keymaps/internal.json` file, and add staff only themes in the `styles/src/themes/internal` directory
We use `[cargo-about](https://github.com/EmbarkStudios/cargo-about)` to automatically comply with open source licenses. If CI is failing, check the following:
### Experimental Features
- Is it showing a `no license specified` error for a crate you've created? If so, add `publish = false` under `[package]` in your crate's Cargo.toml.
- Is the error `failed to satisfy license requirements` for a dependency? If so, first determine what license the project has and whether this system is sufficient to comply with this license's requirements. If you're unsure, ask a lawyer. Once you've verified that this system is acceptable add the license's SPDX identifier to the `accepted` array in `script/licenses/zed-licenses.toml`.
- Is `cargo-about` unable to find the license for a dependency? If so, add a clarification field at the end of `script/licenses/zed-licenses.toml`, as specified in the [cargo-about book](https://embarkstudios.github.io/cargo-about/cli/generate/config.html#crate-configuration).
A user facing feature flag can be added to Zed by:
* Adding a setting to the crates/settings/src/settings.rs FeatureFlags struct. Use a boolean for a simple on/off, or use a struct to experiment with different configuration options.
* If the feature needs keybindings, add a file to the `assets/keymaps/experiments/` folder, then update the `FeatureFlags::keymap_files()` method to check for your feature's flag and add it's keybindings's path to the method's list.
* If you want to add an experimental theme, add it to the `styles/src/themes/experiments` folder
The Settings global should be initialized with the user's feature flags by the time the feature's `init(cx)` equivalent is called.
To promote an experimental feature to a full feature:
* If this is an experimental theme, move the theme file from the `styles/src/themes/experiments` folder to the `styles/src/themes/` folder
* Take the features settings (if any) and add them under a new variable in the Settings struct. Don't forget to add a `merge()` call in `set_user_settings()`!
* Take the feature's keybindings and add them to the default.json (or equivalent) file
* Remove the file from the `FeatureFlags::keymap_files()` method
* Remove the conditional in the feature's `init(cx)` equivalent.
That's it 😸
### Wasm Plugins
@@ -83,56 +67,3 @@ rustup target add wasm32-wasi
```
Plugins can be found in the `plugins` folder in the root. For more information about how plugins work, check the [Plugin Guide](./crates/plugin_runtime/README.md) in `crates/plugin_runtime/README.md`.
## Roadmap
We will organize our efforts around the following major milestones. We'll create tracking issues for each of these milestones to detail the individual tasks that comprise them.
### Minimal text editor
[Tracking issue](https://github.com/zed-industries/zed/issues/2)
Ship a minimal text editor to investors and other insiders. It should be extremely fast and stable, but all it can do is open, edit, and save text files, making it potentially useful for basic editing but not for real coding.
Establish basic infrastructure for building the app bundle and uploading an artifact. Once this is released, we should regularly distribute updates as features land.
### Collaborative code editor for internal use
[Tracking issue](https://github.com/zed-industries/zed/issues/6)
Turn the minimal text editor into a collaborative _code_ editor. This will include the minimal features that the Zed team needs to collaborate in Zed to build Zed without net loss in developer productivity. This includes productivity-critical features such as:
- Syntax highlighting and syntax-aware editing and navigation
- The ability to see and edit non-local working copies of a repository
- Language server support for Rust code navigation, refactoring, diagnostics, etc.
- Project browsing and project-wide search and replace
We want to tackle collaboration fairly early so that the rest of the design of the product can flow around that assumption. We could probably produce a single-player code editor more quickly, but at the risk of having collaboration feel more "bolted on" when we eventually add it.
### Private alpha for Rust teams on macOS
The "minimal" milestones were about getting Zed to a point where the Zed team could use Zed productively to build Zed. What features are required for someone outside the company to use Zed to productively work on another project that is also written in Rust?
This includes infrastructure like auto-updates, error reporting, and metrics collection. It also includes some amount of polish to make the tool more discoverable for someone that didn't write it, such as a UI for updating settings and key bindings. We may also need to enhance the server to support user authentication and related concerns.
The initial target audience is like us. A small team working in Rust that's potentially interested in collaborating. As the alpha proceeds, we can work with teams of different sizes.
### Private beta for Rust teams on macOS
Once we're getting sufficiently positive feedback from our initial alpha users, we widen the audience by letting people share invites. Now may be a good time to get Zed running on the web, so that it's extremely easy for a Zed user to share a link and be collaborating in seconds. Once someone is using Zed on the Web, we'll let them register for the private beta and download the native binary if they're on macOS.
### Expand to other languages
Depending on how the Rust beta is going, focus hard on dominating another niche language such as Elixr or getting a foothold within a niche of a larger language, such as React/Typescript. Alternatively, go wide at this point and add decent support several widely-used languages such as Python, Ruby, Typescript, etc. This would entail taking 1-2 weeks per language and making sure we ship a solid experience based on a publicly-available language server. Each language has slightly different development practices, so we need to make sure Zed's UX meshes well with those practices.
### Future directions
Each of these sections could probably broken into multiple milestones, but this part of the roadmap is too far in the future to go into that level of detail at this point.
#### Expand to other platforms
Support Linux and Windows. We'll probably want to hire at least one person that prefers to work on each respective platform and have them spearhead the effort to port Zed to that platform. Once they've done so, they can join the general development effort while ensuring the user experience stays good on that platform.
#### Expand on collaboration
To start with, we'll focus on synchronous collaboration because that's where we're most differentiated, but there's no reason we have to limit ourselves to that. How can our tool facilitate collaboration generally, whether it's sync or async? What would it take for a team to go 100% Zed and collaborate fully within the tool? If we haven't added it already, basic Git support would be nice.

View File

@@ -20,8 +20,10 @@
"alt-cmd-left": "pane::ActivatePrevItem",
"alt-cmd-right": "pane::ActivateNextItem",
"cmd-w": "pane::CloseActiveItem",
"cmd-shift-w": "workspace::CloseWindow",
"alt-cmd-t": "pane::CloseInactiveItems",
"cmd-k u": "pane::CloseCleanItems",
"cmd-k cmd-w": "pane::CloseAllItems",
"cmd-shift-w": "workspace::CloseWindow",
"cmd-s": "workspace::Save",
"cmd-shift-s": "workspace::SaveAs",
"cmd-=": "zed::IncreaseBufferFontSize",
@@ -36,6 +38,7 @@
"cmd-n": "workspace::NewFile",
"cmd-shift-n": "workspace::NewWindow",
"cmd-o": "workspace::Open",
"alt-cmd-o": "projects::OpenRecent",
"ctrl-`": "workspace::NewTerminal"
}
},
@@ -66,16 +69,18 @@
"up": "editor::MoveUp",
"pageup": "editor::PageUp",
"shift-pageup": "editor::MovePageUp",
"home": "editor::MoveToBeginningOfLine",
"down": "editor::MoveDown",
"pagedown": "editor::PageDown",
"shift-pagedown": "editor::MovePageDown",
"end": "editor::MoveToEndOfLine",
"left": "editor::MoveLeft",
"right": "editor::MoveRight",
"ctrl-p": "editor::MoveUp",
"ctrl-n": "editor::MoveDown",
"ctrl-b": "editor::MoveLeft",
"ctrl-f": "editor::MoveRight",
"ctrl-l": "editor::CenterScreen",
"ctrl-l": "editor::NextScreen",
"alt-left": "editor::MoveToPreviousWordStart",
"alt-b": "editor::MoveToPreviousWordStart",
"alt-right": "editor::MoveToNextWordEnd",
@@ -109,6 +114,12 @@
"stop_at_soft_wraps": true
}
],
"shift-home": [
"editor::SelectToBeginningOfLine",
{
"stop_at_soft_wraps": true
}
],
"ctrl-shift-a": [
"editor::SelectToBeginningOfLine",
{
@@ -121,6 +132,12 @@
"stop_at_soft_wraps": true
}
],
"shift-end": [
"editor::SelectToEndOfLine",
{
"stop_at_soft_wraps": true
}
],
"ctrl-shift-e": [
"editor::SelectToEndOfLine",
{
@@ -169,10 +186,10 @@
}
},
{
"context": "BufferSearchBar",
"context": "BufferSearchBar > Editor",
"bindings": {
"escape": "buffer_search::Dismiss",
"cmd-f": "buffer_search::FocusEditor",
"tab": "buffer_search::FocusEditor",
"enter": "search::SelectNextMatch",
"shift-enter": "search::SelectPrevMatch"
}
@@ -472,6 +489,15 @@
"terminal::SendText",
"\u0001"
],
// Terminal.app compatability
"alt-left": [
"terminal::SendText",
"\u001bb"
],
"alt-right": [
"terminal::SendText",
"\u001bf"
],
// There are conflicting bindings for these keys in the global context.
// these bindings override them, remove at your own risk:
"up": [

View File

@@ -1 +0,0 @@
[]

View File

@@ -1,6 +1,6 @@
[
{
"context": "Editor && VimControl",
"context": "Editor && VimControl && !VimWaiting",
"bindings": {
"g": [
"vim::PushOperator",
@@ -8,6 +8,22 @@
"Namespace": "G"
}
],
"i": [
"vim::PushOperator",
{
"Object": {
"around": false
}
}
],
"a": [
"vim::PushOperator",
{
"Object": {
"around": true
}
}
],
"h": "vim::Left",
"backspace": "vim::Backspace",
"j": "vim::Down",
@@ -37,23 +53,43 @@
}
],
"%": "vim::Matching",
"ctrl-y": [
"vim::Scroll",
"LineUp"
],
"f": [
"vim::PushOperator",
{
"FindForward": {
"before": false
}
}
],
"t": [
"vim::PushOperator",
{
"FindForward": {
"before": true
}
}
],
"shift-f": [
"vim::PushOperator",
{
"FindBackward": {
"after": false
}
}
],
"shift-t": [
"vim::PushOperator",
{
"FindBackward": {
"after": true
}
}
],
"escape": "editor::Cancel",
"i": [
"vim::PushOperator",
{
"Object": {
"around": false
}
}
],
"a": [
"vim::PushOperator",
{
"Object": {
"around": true
}
}
],
"0": "vim::StartOfLine", // When no number operator present, use start of line motion
"1": [
"vim::Number",
@@ -94,7 +130,7 @@
}
},
{
"context": "Editor && vim_mode == normal && vim_operator == none",
"context": "Editor && vim_mode == normal && vim_operator == none && !VimWaiting",
"bindings": {
"c": [
"vim::PushOperator",
@@ -110,6 +146,12 @@
"vim::PushOperator",
"Yank"
],
"z": [
"vim::PushOperator",
{
"Namespace": "Z"
}
],
"i": [
"vim::SwitchMode",
"Insert"
@@ -147,6 +189,30 @@
{
"focus": true
}
],
"ctrl-f": [
"vim::Scroll",
"PageDown"
],
"ctrl-b": [
"vim::Scroll",
"PageUp"
],
"ctrl-d": [
"vim::Scroll",
"HalfPageDown"
],
"ctrl-u": [
"vim::Scroll",
"HalfPageUp"
],
"ctrl-e": [
"vim::Scroll",
"LineDown"
],
"r": [
"vim::PushOperator",
"Replace"
]
}
},
@@ -188,6 +254,18 @@
"y": "vim::CurrentLine"
}
},
{
"context": "Editor && vim_operator == z",
"bindings": {
"t": "editor::ScrollCursorTop",
"z": "editor::ScrollCursorCenter",
"b": "editor::ScrollCursorBottom",
"escape": [
"vim::SwitchMode",
"Normal"
]
}
},
{
"context": "Editor && VimObject",
"bindings": {
@@ -213,14 +291,18 @@
}
},
{
"context": "Editor && vim_mode == visual",
"context": "Editor && vim_mode == visual && !VimWaiting",
"bindings": {
"u": "editor::Undo",
"c": "vim::VisualChange",
"d": "vim::VisualDelete",
"x": "vim::VisualDelete",
"y": "vim::VisualYank",
"p": "vim::VisualPaste"
"p": "vim::VisualPaste",
"r": [
"vim::PushOperator",
"Replace"
]
}
},
{
@@ -229,5 +311,11 @@
"escape": "vim::NormalBefore",
"ctrl-c": "vim::NormalBefore"
}
},
{
"context": "Editor && VimWaiting",
"bindings": {
"*": "gpui::KeyPressed"
}
}
]

View File

@@ -1,230 +1,242 @@
{
// The name of the Zed theme to use for the UI
"theme": "One Dark",
// The name of a font to use for rendering text in the editor
"buffer_font_family": "Zed Mono",
// The default font size for text in the editor
"buffer_font_size": 15,
// Whether to enable vim modes and key bindings
"vim_mode": false,
// Whether to show the informational hover box when moving the mouse
// over symbols in the editor.
"hover_popover_enabled": true,
// Whether the cursor blinks in the editor.
"cursor_blink": true,
// Whether to pop the completions menu while typing in an editor without
// explicitly requesting it.
"show_completions_on_input": true,
// Whether new projects should start out 'online'. Online projects
// appear in the contacts panel under your name, so that your contacts
// can see which projects you are working on. Regardless of this
// setting, projects keep their last online status when you reopen them.
"projects_online_by_default": true,
// Whether to use language servers to provide code intelligence.
"enable_language_server": true,
// When to automatically save edited buffers. This setting can
// take four values.
//
// 1. Never automatically save:
// "autosave": "off",
// 2. Save when changing focus away from the Zed window:
// "autosave": "on_window_change",
// 3. Save when changing focus away from a specific buffer:
// "autosave": "on_focus_change",
// 4. Save when idle for a certain amount of time:
// "autosave": { "after_delay": {"milliseconds": 500} },
"autosave": "off",
// Where to place the dock by default. This setting can take three
// values:
//
// 1. Position the dock attached to the bottom of the workspace
// "default_dock_anchor": "bottom"
// 2. Position the dock to the right of the workspace like a side panel
// "default_dock_anchor": "right"
// 3. Position the dock full screen over the entire workspace"
// "default_dock_anchor": "expanded"
"default_dock_anchor": "right",
// Whether or not to perform a buffer format before saving
"format_on_save": "on",
// How to perform a buffer format. This setting can take two values:
//
// 1. Format code using the current language server:
// "format_on_save": "language_server"
// 2. Format code using an external command:
// "format_on_save": {
// "external": {
// "command": "prettier",
// "arguments": ["--stdin-filepath", "{buffer_path}"]
// }
// }
"formatter": "language_server",
// How to soft-wrap long lines of text. This setting can take
// three values:
//
// 1. Do not soft wrap.
// "soft_wrap": "none",
// 2. Soft wrap lines that overflow the editor:
// "soft_wrap": "editor_width",
// 3. Soft wrap lines at the preferred line length
// "soft_wrap": "preferred_line_length",
"soft_wrap": "none",
// The column at which to soft-wrap lines, for buffers where soft-wrap
// is enabled.
"preferred_line_length": 80,
// Whether to indent lines using tab characters, as opposed to multiple
// spaces.
"hard_tabs": false,
// How many columns a tab should occupy.
"tab_size": 4,
// Git gutter behavior configuration.
"git": {
// Control whether the git gutter is shown. May take 2 values:
// 1. Show the gutter
// "git_gutter": "tracked_files"
// 2. Hide the gutter
// "git_gutter": "hide"
"git_gutter": "tracked_files"
},
// Settings specific to journaling
"journal": {
// The path of the directory where journal entries are stored
"path": "~",
// What format to display the hours in
// May take 2 values:
// 1. hour12
// 2. hour24
"hour_format": "hour12"
},
// Settings specific to the terminal
"terminal": {
// What shell to use when opening a terminal. May take 3 values:
// 1. Use the system's default terminal configuration (e.g. $TERM).
// "shell": "system"
// 2. A program:
// "shell": {
// "program": "sh"
// }
// 3. A program with arguments:
// "shell": {
// "with_arguments": {
// "program": "/bin/bash",
// "arguments": ["--login"]
// }
// }
"shell": "system",
// What working directory to use when launching the terminal.
// May take 4 values:
// 1. Use the current file's project directory. Will Fallback to the
// first project directory strategy if unsuccessful
// "working_directory": "current_project_directory"
// 2. Use the first project in this workspace's directory
// "working_directory": "first_project_directory"
// 3. Always use this platform's home directory (if we can find it)
// "working_directory": "always_home"
// 4. Always use a specific directory. This value will be shell expanded.
// If this path is not a valid directory the terminal will default to
// this platform's home directory (if we can find it)
// "working_directory": {
// "always": {
// "directory": "~/zed/projects/"
// }
// }
// The name of the Zed theme to use for the UI
"theme": "One Dark",
// The name of a font to use for rendering text in the editor
"buffer_font_family": "Zed Mono",
// The default font size for text in the editor
"buffer_font_size": 15,
// The factor to grow the active pane by. Defaults to 1.0
// which gives the same size as all other panes.
"active_pane_magnification": 1.0,
// Whether to enable vim modes and key bindings
"vim_mode": false,
// Whether to show the informational hover box when moving the mouse
// over symbols in the editor.
"hover_popover_enabled": true,
// Whether to confirm before quitting Zed.
"confirm_quit": false,
// Whether the cursor blinks in the editor.
"cursor_blink": true,
// Whether to pop the completions menu while typing in an editor without
// explicitly requesting it.
"show_completions_on_input": true,
// Whether new projects should start out 'online'. Online projects
// appear in the contacts panel under your name, so that your contacts
// can see which projects you are working on. Regardless of this
// setting, projects keep their last online status when you reopen them.
"projects_online_by_default": true,
// Whether to use language servers to provide code intelligence.
"enable_language_server": true,
// When to automatically save edited buffers. This setting can
// take four values.
//
// 1. Never automatically save:
// "autosave": "off",
// 2. Save when changing focus away from the Zed window:
// "autosave": "on_window_change",
// 3. Save when changing focus away from a specific buffer:
// "autosave": "on_focus_change",
// 4. Save when idle for a certain amount of time:
// "autosave": { "after_delay": {"milliseconds": 500} },
"autosave": "off",
// Where to place the dock by default. This setting can take three
// values:
//
"working_directory": "current_project_directory",
// Set the cursor blinking behavior in the terminal.
// May take 4 values:
// 1. Never blink the cursor, ignoring the terminal mode
// "blinking": "off",
// 2. Default the cursor blink to off, but allow the terminal to
// set blinking
// "blinking": "terminal_controlled",
// 3. Always blink the cursor, ignoring the terminal mode
// "blinking": "on",
"blinking": "terminal_controlled",
// Set whether Alternate Scroll mode (code: ?1007) is active by default.
// Alternate Scroll mode converts mouse scroll events into up / down key
// presses when in the alternate screen (e.g. when running applications
// like vim or less). The terminal can still set and unset this mode.
// May take 2 values:
// 1. Default alternate scroll mode to on
// "alternate_scroll": "on",
// 2. Default alternate scroll mode to off
// "alternate_scroll": "off",
"alternate_scroll": "off",
// Set whether the option key behaves as the meta key.
// May take 2 values:
// 1. Rely on default platform handling of option key, on macOS
// this means generating certain unicode characters
// "option_to_meta": false,
// 2. Make the option keys behave as a 'meta' key, e.g. for emacs
// "option_to_meta": true,
"option_as_meta": false,
// Whether or not selecting text in the terminal will automatically
// copy to the system clipboard.
"copy_on_select": false,
// Any key-value pairs added to this list will be added to the terminal's
// enviroment. Use `:` to seperate multiple values.
"env": {
// "KEY": "value1:value2"
}
// Set the terminal's font size. If this option is not included,
// the terminal will default to matching the buffer's font size.
// "font_size": "15"
// Set the terminal's font family. If this option is not included,
// the terminal will default to matching the buffer's font family.
// "font_family": "Zed Mono"
},
// Different settings for specific languages.
"languages": {
"Plain Text": {
"soft_wrap": "preferred_line_length"
},
"C": {
"tab_size": 2
},
"C++": {
"tab_size": 2
},
"Elixir": {
"tab_size": 2
},
"Go": {
"tab_size": 4,
"hard_tabs": true
},
"Markdown": {
"soft_wrap": "preferred_line_length"
},
"Rust": {
"tab_size": 4
},
"JavaScript": {
"tab_size": 2
},
"TypeScript": {
"tab_size": 2
},
"TSX": {
"tab_size": 2
}
},
// LSP Specific settings.
"lsp": {
// Specify the LSP name as a key here.
// As of 8/10/22, supported LSPs are:
// pyright
// gopls
// rust-analyzer
// typescript-language-server
// vscode-json-languageserver
// "rust_analyzer": {
// //These initialization options are merged into Zed's defaults
// "initialization_options": {
// "checkOnSave": {
// "command": "clippy"
// }
// 1. Position the dock attached to the bottom of the workspace
// "default_dock_anchor": "bottom"
// 2. Position the dock to the right of the workspace like a side panel
// "default_dock_anchor": "right"
// 3. Position the dock full screen over the entire workspace"
// "default_dock_anchor": "expanded"
"default_dock_anchor": "right",
// Whether or not to perform a buffer format before saving
"format_on_save": "on",
// How to perform a buffer format. This setting can take two values:
//
// 1. Format code using the current language server:
// "format_on_save": "language_server"
// 2. Format code using an external command:
// "format_on_save": {
// "external": {
// "command": "prettier",
// "arguments": ["--stdin-filepath", "{buffer_path}"]
// }
// }
// }
}
"formatter": "language_server",
// How to soft-wrap long lines of text. This setting can take
// three values:
//
// 1. Do not soft wrap.
// "soft_wrap": "none",
// 2. Soft wrap lines that overflow the editor:
// "soft_wrap": "editor_width",
// 3. Soft wrap lines at the preferred line length
// "soft_wrap": "preferred_line_length",
"soft_wrap": "none",
// The column at which to soft-wrap lines, for buffers where soft-wrap
// is enabled.
"preferred_line_length": 80,
// Whether to indent lines using tab characters, as opposed to multiple
// spaces.
"hard_tabs": false,
// How many columns a tab should occupy.
"tab_size": 4,
// Control what info Zed sends to our servers
"telemetry": {
// Send debug info like crash reports.
"diagnostics": true,
// Send anonymized usage data like what languages you're using Zed with.
"metrics": true
},
// Git gutter behavior configuration.
"git": {
// Control whether the git gutter is shown. May take 2 values:
// 1. Show the gutter
// "git_gutter": "tracked_files"
// 2. Hide the gutter
// "git_gutter": "hide"
"git_gutter": "tracked_files"
},
// Settings specific to journaling
"journal": {
// The path of the directory where journal entries are stored
"path": "~",
// What format to display the hours in
// May take 2 values:
// 1. hour12
// 2. hour24
"hour_format": "hour12"
},
// Settings specific to the terminal
"terminal": {
// What shell to use when opening a terminal. May take 3 values:
// 1. Use the system's default terminal configuration (e.g. $TERM).
// "shell": "system"
// 2. A program:
// "shell": {
// "program": "sh"
// }
// 3. A program with arguments:
// "shell": {
// "with_arguments": {
// "program": "/bin/bash",
// "arguments": ["--login"]
// }
// }
"shell": "system",
// What working directory to use when launching the terminal.
// May take 4 values:
// 1. Use the current file's project directory. Will Fallback to the
// first project directory strategy if unsuccessful
// "working_directory": "current_project_directory"
// 2. Use the first project in this workspace's directory
// "working_directory": "first_project_directory"
// 3. Always use this platform's home directory (if we can find it)
// "working_directory": "always_home"
// 4. Always use a specific directory. This value will be shell expanded.
// If this path is not a valid directory the terminal will default to
// this platform's home directory (if we can find it)
// "working_directory": {
// "always": {
// "directory": "~/zed/projects/"
// }
// }
//
//
"working_directory": "current_project_directory",
// Set the cursor blinking behavior in the terminal.
// May take 4 values:
// 1. Never blink the cursor, ignoring the terminal mode
// "blinking": "off",
// 2. Default the cursor blink to off, but allow the terminal to
// set blinking
// "blinking": "terminal_controlled",
// 3. Always blink the cursor, ignoring the terminal mode
// "blinking": "on",
"blinking": "terminal_controlled",
// Set whether Alternate Scroll mode (code: ?1007) is active by default.
// Alternate Scroll mode converts mouse scroll events into up / down key
// presses when in the alternate screen (e.g. when running applications
// like vim or less). The terminal can still set and unset this mode.
// May take 2 values:
// 1. Default alternate scroll mode to on
// "alternate_scroll": "on",
// 2. Default alternate scroll mode to off
// "alternate_scroll": "off",
"alternate_scroll": "off",
// Set whether the option key behaves as the meta key.
// May take 2 values:
// 1. Rely on default platform handling of option key, on macOS
// this means generating certain unicode characters
// "option_to_meta": false,
// 2. Make the option keys behave as a 'meta' key, e.g. for emacs
// "option_to_meta": true,
"option_as_meta": false,
// Whether or not selecting text in the terminal will automatically
// copy to the system clipboard.
"copy_on_select": false,
// Any key-value pairs added to this list will be added to the terminal's
// enviroment. Use `:` to seperate multiple values.
"env": {
// "KEY": "value1:value2"
}
// Set the terminal's font size. If this option is not included,
// the terminal will default to matching the buffer's font size.
// "font_size": "15"
// Set the terminal's font family. If this option is not included,
// the terminal will default to matching the buffer's font family.
// "font_family": "Zed Mono"
},
// Different settings for specific languages.
"languages": {
"Plain Text": {
"soft_wrap": "preferred_line_length"
},
"C": {
"tab_size": 2
},
"C++": {
"tab_size": 2
},
"Elixir": {
"tab_size": 2
},
"Go": {
"tab_size": 4,
"hard_tabs": true
},
"Markdown": {
"soft_wrap": "preferred_line_length"
},
"Rust": {
"tab_size": 4
},
"JavaScript": {
"tab_size": 2
},
"TypeScript": {
"tab_size": 2
},
"TSX": {
"tab_size": 2
}
},
// LSP Specific settings.
"lsp": {
// Specify the LSP name as a key here.
// As of 8/10/22, supported LSPs are:
// pyright
// gopls
// rust-analyzer
// typescript-language-server
// vscode-json-languageserver
// "rust-analyzer": {
// //These initialization options are merged into Zed's defaults
// "initialization_options": {
// "checkOnSave": {
// "command": "clippy"
// }
// }
// }
}
}

View File

@@ -2,6 +2,7 @@
name = "activity_indicator"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/activity_indicator.rs"

View File

@@ -11,13 +11,12 @@ use settings::Settings;
use smallvec::SmallVec;
use std::{cmp::Reverse, fmt::Write, sync::Arc};
use util::ResultExt;
use workspace::{ItemHandle, StatusItemView, Workspace};
use workspace::{item::ItemHandle, StatusItemView, Workspace};
actions!(lsp_status, [ShowErrorMessage]);
const DOWNLOAD_ICON: &str = "icons/download_12.svg";
const WARNING_ICON: &str = "icons/triangle_exclamation_12.svg";
const DONE_ICON: &str = "icons/circle_check_12.svg";
pub enum Event {
ShowError { lsp_name: Arc<str>, error: String },
@@ -237,7 +236,6 @@ impl ActivityIndicator {
// Show any application auto-update info.
if let Some(updater) = &self.auto_updater {
// let theme = &cx.global::<Settings>().theme.workspace.status_bar;
match &updater.read(cx).status() {
AutoUpdateStatus::Checking => (
Some(DOWNLOAD_ICON),
@@ -254,9 +252,7 @@ impl ActivityIndicator {
"Installing Zed update…".to_string(),
None,
),
AutoUpdateStatus::Updated => {
(Some(DONE_ICON), "Restart to update Zed".to_string(), None)
}
AutoUpdateStatus::Updated => (None, "Restart to update Zed".to_string(), None),
AutoUpdateStatus::Errored => (
Some(WARNING_ICON),
"Auto update failed".to_string(),

View File

@@ -2,6 +2,7 @@
name = "assets"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/assets.rs"

View File

@@ -2,12 +2,14 @@
name = "auto_update"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/auto_update.rs"
doctest = false
[dependencies]
db = { path = "../db" }
client = { path = "../client" }
gpui = { path = "../gpui" }
menu = { path = "../menu" }

View File

@@ -1,29 +1,23 @@
mod update_notification;
use anyhow::{anyhow, Context, Result};
use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN, ZED_SERVER_URL};
use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN};
use client::{ZED_APP_PATH, ZED_APP_VERSION};
use db::kvp::KEY_VALUE_STORE;
use gpui::{
actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle,
MutableAppContext, Task, WeakViewHandle,
};
use lazy_static::lazy_static;
use serde::Deserialize;
use settings::ReleaseChannel;
use smol::{fs::File, io::AsyncReadExt, process::Command};
use std::{env, ffi::OsString, path::PathBuf, sync::Arc, time::Duration};
use std::{ffi::OsString, sync::Arc, time::Duration};
use update_notification::UpdateNotification;
use util::channel::ReleaseChannel;
use workspace::Workspace;
const SHOULD_SHOW_UPDATE_NOTIFICATION_KEY: &str = "auto-updater-should-show-updated-notification";
const POLL_INTERVAL: Duration = Duration::from_secs(60 * 60);
lazy_static! {
pub static ref ZED_APP_VERSION: Option<AppVersion> = env::var("ZED_APP_VERSION")
.ok()
.and_then(|v| v.parse().ok());
pub static ref ZED_APP_PATH: Option<PathBuf> = env::var("ZED_APP_PATH").ok().map(PathBuf::from);
}
actions!(auto_update, [Check, DismissErrorMessage, ViewReleaseNotes]);
#[derive(Clone, Copy, PartialEq, Eq)]
@@ -41,7 +35,6 @@ pub struct AutoUpdater {
current_version: AppVersion,
http_client: Arc<dyn HttpClient>,
pending_poll: Option<Task<()>>,
db: project::Db,
server_url: String,
}
@@ -55,11 +48,11 @@ impl Entity for AutoUpdater {
type Event = ();
}
pub fn init(db: project::Db, http_client: Arc<dyn HttpClient>, cx: &mut MutableAppContext) {
pub fn init(http_client: Arc<dyn HttpClient>, server_url: String, cx: &mut MutableAppContext) {
if let Some(version) = (*ZED_APP_VERSION).or_else(|| cx.platform().app_version().ok()) {
let server_url = ZED_SERVER_URL.to_string();
let server_url = server_url;
let auto_updater = cx.add_model(|cx| {
let updater = AutoUpdater::new(version, db.clone(), http_client, server_url.clone());
let updater = AutoUpdater::new(version, http_client, server_url.clone());
updater.start_polling(cx).detach();
updater
});
@@ -70,7 +63,14 @@ pub fn init(db: project::Db, http_client: Arc<dyn HttpClient>, cx: &mut MutableA
}
});
cx.add_global_action(move |_: &ViewReleaseNotes, cx| {
cx.platform().open_url(&format!("{server_url}/releases"));
let latest_release_url = if cx.has_global::<ReleaseChannel>()
&& *cx.global::<ReleaseChannel>() == ReleaseChannel::Preview
{
format!("{server_url}/releases/preview/latest")
} else {
format!("{server_url}/releases/latest")
};
cx.platform().open_url(&latest_release_url);
});
cx.add_action(UpdateNotification::dismiss);
}
@@ -113,14 +113,12 @@ impl AutoUpdater {
fn new(
current_version: AppVersion,
db: project::Db,
http_client: Arc<dyn HttpClient>,
server_url: String,
) -> Self {
Self {
status: AutoUpdateStatus::Idle,
current_version,
db,
http_client,
server_url,
pending_poll: None,
@@ -290,20 +288,28 @@ impl AutoUpdater {
should_show: bool,
cx: &AppContext,
) -> Task<Result<()>> {
let db = self.db.clone();
cx.background().spawn(async move {
if should_show {
db.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?;
KEY_VALUE_STORE
.write_kvp(
SHOULD_SHOW_UPDATE_NOTIFICATION_KEY.to_string(),
"".to_string(),
)
.await?;
} else {
db.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?;
KEY_VALUE_STORE
.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY.to_string())
.await?;
}
Ok(())
})
}
fn should_show_update_notification(&self, cx: &AppContext) -> Task<Result<bool>> {
let db = self.db.clone();
cx.background()
.spawn(async move { Ok(db.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?.is_some()) })
cx.background().spawn(async move {
Ok(KEY_VALUE_STORE
.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?
.is_some())
})
}
}

View File

@@ -5,8 +5,9 @@ use gpui::{
Element, Entity, MouseButton, View, ViewContext,
};
use menu::Cancel;
use settings::{ReleaseChannel, Settings};
use workspace::Notification;
use settings::Settings;
use util::channel::ReleaseChannel;
use workspace::notifications::Notification;
pub struct UpdateNotification {
version: AppVersion,
@@ -29,7 +30,7 @@ impl View for UpdateNotification {
let theme = cx.global::<Settings>().theme.clone();
let theme = &theme.update_notification;
let app_name = cx.global::<ReleaseChannel>().name();
let app_name = cx.global::<ReleaseChannel>().display_name();
MouseEventHandler::<ViewReleaseNotes>::new(0, cx, |state, cx| {
Flex::column()

View File

@@ -2,6 +2,7 @@
name = "breadcrumbs"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/breadcrumbs.rs"

View File

@@ -4,7 +4,10 @@ use gpui::{
use itertools::Itertools;
use search::ProjectSearchView;
use settings::Settings;
use workspace::{ItemEvent, ItemHandle, ToolbarItemLocation, ToolbarItemView};
use workspace::{
item::{ItemEvent, ItemHandle},
ToolbarItemLocation, ToolbarItemView,
};
pub enum Event {
UpdateLocation,

View File

@@ -2,6 +2,7 @@
name = "call"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/call.rs"
@@ -21,7 +22,10 @@ test-support = [
client = { path = "../client" }
collections = { path = "../collections" }
gpui = { path = "../gpui" }
log = "0.4"
live_kit_client = { path = "../live_kit_client" }
fs = { path = "../fs" }
language = { path = "../language" }
media = { path = "../media" }
project = { path = "../project" }
util = { path = "../util" }
@@ -33,6 +37,8 @@ postage = { version = "0.4.1", features = ["futures-traits"] }
[dev-dependencies]
client = { path = "../client", features = ["test-support"] }
fs = { path = "../fs", features = ["test-support"] }
language = { path = "../language", features = ["test-support"] }
collections = { path = "../collections", features = ["test-support"] }
gpui = { path = "../gpui", features = ["test-support"] }
live_kit_client = { path = "../live_kit_client", features = ["test-support"] }

View File

@@ -22,7 +22,7 @@ pub fn init(client: Arc<Client>, user_store: ModelHandle<UserStore>, cx: &mut Mu
#[derive(Clone)]
pub struct IncomingCall {
pub room_id: u64,
pub caller: Arc<User>,
pub calling_user: Arc<User>,
pub participants: Vec<Arc<User>>,
pub initial_project: Option<proto::ParticipantProject>,
}
@@ -78,9 +78,9 @@ impl ActiveCall {
user_store.get_users(envelope.payload.participant_user_ids, cx)
})
.await?,
caller: user_store
calling_user: user_store
.update(&mut cx, |user_store, cx| {
user_store.get_user(envelope.payload.caller_user_id, cx)
user_store.get_user(envelope.payload.calling_user_id, cx)
})
.await?,
initial_project: envelope.payload.initial_project,
@@ -94,12 +94,18 @@ impl ActiveCall {
async fn handle_call_canceled(
this: ModelHandle<Self>,
_: TypedEnvelope<proto::CallCanceled>,
envelope: TypedEnvelope<proto::CallCanceled>,
_: Arc<Client>,
mut cx: AsyncAppContext,
) -> Result<()> {
this.update(&mut cx, |this, _| {
*this.incoming_call.0.borrow_mut() = None;
let mut incoming_call = this.incoming_call.0.borrow_mut();
if incoming_call
.as_ref()
.map_or(false, |call| call.room_id == envelope.payload.room_id)
{
incoming_call.take();
}
});
Ok(())
}
@@ -110,13 +116,13 @@ impl ActiveCall {
pub fn invite(
&mut self,
recipient_user_id: u64,
called_user_id: u64,
initial_project: Option<ModelHandle<Project>>,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let client = self.client.clone();
let user_store = self.user_store.clone();
if !self.pending_invites.insert(recipient_user_id) {
if !self.pending_invites.insert(called_user_id) {
return Task::ready(Err(anyhow!("user was already invited")));
}
@@ -136,13 +142,13 @@ impl ActiveCall {
};
room.update(&mut cx, |room, cx| {
room.call(recipient_user_id, initial_project_id, cx)
room.call(called_user_id, initial_project_id, cx)
})
.await?;
} else {
let room = cx
.update(|cx| {
Room::create(recipient_user_id, initial_project, client, user_store, cx)
Room::create(called_user_id, initial_project, client, user_store, cx)
})
.await?;
@@ -155,7 +161,7 @@ impl ActiveCall {
let result = invite.await;
this.update(&mut cx, |this, cx| {
this.pending_invites.remove(&recipient_user_id);
this.pending_invites.remove(&called_user_id);
cx.notify();
});
result
@@ -164,7 +170,7 @@ impl ActiveCall {
pub fn cancel_invite(
&mut self,
recipient_user_id: u64,
called_user_id: u64,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
let room_id = if let Some(room) = self.room() {
@@ -178,7 +184,7 @@ impl ActiveCall {
client
.request(proto::CancelCall {
room_id,
recipient_user_id,
called_user_id,
})
.await?;
anyhow::Ok(())

View File

@@ -4,7 +4,7 @@ use collections::HashMap;
use gpui::WeakModelHandle;
pub use live_kit_client::Frame;
use project::Project;
use std::sync::Arc;
use std::{fmt, sync::Arc};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ParticipantLocation {
@@ -36,9 +36,10 @@ pub struct LocalParticipant {
pub active_project: Option<WeakModelHandle<Project>>,
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct RemoteParticipant {
pub user: Arc<User>,
pub peer_id: proto::PeerId,
pub projects: Vec<proto::ParticipantProject>,
pub location: ParticipantLocation,
pub tracks: HashMap<live_kit_client::Sid, Arc<RemoteVideoTrack>>,
@@ -49,6 +50,12 @@ pub struct RemoteVideoTrack {
pub(crate) live_kit_track: Arc<live_kit_client::RemoteVideoTrack>,
}
impl fmt::Debug for RemoteVideoTrack {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RemoteVideoTrack").finish()
}
}
impl RemoteVideoTrack {
pub fn frames(&self) -> async_broadcast::Receiver<Frame> {
self.live_kit_track.frames()

View File

@@ -3,23 +3,32 @@ use crate::{
IncomingCall,
};
use anyhow::{anyhow, Result};
use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
use collections::{BTreeMap, HashSet};
use futures::StreamExt;
use gpui::{AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task};
use client::{
proto::{self, PeerId},
Client, TypedEnvelope, User, UserStore,
};
use collections::{BTreeMap, HashMap, HashSet};
use fs::Fs;
use futures::{FutureExt, StreamExt};
use gpui::{
AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle,
};
use language::LanguageRegistry;
use live_kit_client::{LocalTrackPublication, LocalVideoTrack, RemoteVideoTrackUpdate};
use postage::stream::Stream;
use project::Project;
use std::{mem, os::unix::prelude::OsStrExt, sync::Arc};
use util::{post_inc, ResultExt};
use std::{mem, sync::Arc, time::Duration};
use util::{post_inc, ResultExt, TryFutureExt};
pub const RECONNECT_TIMEOUT: Duration = client::RECEIVE_TIMEOUT;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Event {
ParticipantLocationChanged {
participant_id: PeerId,
participant_id: proto::PeerId,
},
RemoteVideoTracksChanged {
participant_id: PeerId,
participant_id: proto::PeerId,
},
RemoteProjectShared {
owner: Arc<User>,
@@ -36,8 +45,10 @@ pub struct Room {
id: u64,
live_kit: Option<LiveKitRoom>,
status: RoomStatus,
shared_projects: HashSet<WeakModelHandle<Project>>,
joined_projects: HashSet<WeakModelHandle<Project>>,
local_participant: LocalParticipant,
remote_participants: BTreeMap<PeerId, RemoteParticipant>,
remote_participants: BTreeMap<u64, RemoteParticipant>,
pending_participants: Vec<Arc<User>>,
participant_user_ids: HashSet<u64>,
pending_call_count: usize,
@@ -46,6 +57,7 @@ pub struct Room {
user_store: ModelHandle<UserStore>,
subscriptions: Vec<client::Subscription>,
pending_room_update: Option<Task<()>>,
maintain_connection: Option<Task<Option<()>>>,
}
impl Entity for Room {
@@ -53,7 +65,8 @@ impl Entity for Room {
fn release(&mut self, _: &mut MutableAppContext) {
if self.status.is_online() {
self.client.send(proto::LeaveRoom { id: self.id }).log_err();
log::info!("room was released, sending leave message");
let _ = self.client.send(proto::LeaveRoom {});
}
}
}
@@ -66,21 +79,6 @@ impl Room {
user_store: ModelHandle<UserStore>,
cx: &mut ModelContext<Self>,
) -> Self {
let mut client_status = client.status();
cx.spawn_weak(|this, mut cx| async move {
let is_connected = client_status
.next()
.await
.map_or(false, |s| s.is_connected());
// Even if we're initially connected, any future change of the status means we momentarily disconnected.
if !is_connected || client_status.next().await.is_some() {
if let Some(this) = this.upgrade(&cx) {
let _ = this.update(&mut cx, |this, cx| this.leave(cx));
}
}
})
.detach();
let live_kit_room = if let Some(connection_info) = live_kit_connection_info {
let room = live_kit_client::Room::new();
let mut status = room.status();
@@ -131,10 +129,15 @@ impl Room {
None
};
let maintain_connection =
cx.spawn_weak(|this, cx| Self::maintain_connection(this, client.clone(), cx).log_err());
Self {
id,
live_kit: live_kit_room,
status: RoomStatus::Online,
shared_projects: Default::default(),
joined_projects: Default::default(),
participant_user_ids: Default::default(),
local_participant: Default::default(),
remote_participants: Default::default(),
@@ -145,11 +148,12 @@ impl Room {
pending_room_update: None,
client,
user_store,
maintain_connection: Some(maintain_connection),
}
}
pub(crate) fn create(
recipient_user_id: u64,
called_user_id: u64,
initial_project: Option<ModelHandle<Project>>,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
@@ -182,7 +186,7 @@ impl Room {
match room
.update(&mut cx, |room, cx| {
room.leave_when_empty = true;
room.call(recipient_user_id, initial_project_id, cx)
room.call(called_user_id, initial_project_id, cx)
})
.await
{
@@ -235,16 +239,194 @@ impl Room {
cx.notify();
cx.emit(Event::Left);
log::info!("leaving room");
for project in self.shared_projects.drain() {
if let Some(project) = project.upgrade(cx) {
project.update(cx, |project, cx| {
project.unshare(cx).log_err();
});
}
}
for project in self.joined_projects.drain() {
if let Some(project) = project.upgrade(cx) {
project.update(cx, |project, cx| {
project.disconnected_from_host(cx);
});
}
}
self.status = RoomStatus::Offline;
self.remote_participants.clear();
self.pending_participants.clear();
self.participant_user_ids.clear();
self.subscriptions.clear();
self.live_kit.take();
self.client.send(proto::LeaveRoom { id: self.id })?;
self.pending_room_update.take();
self.maintain_connection.take();
self.client.send(proto::LeaveRoom {})?;
Ok(())
}
async fn maintain_connection(
this: WeakModelHandle<Self>,
client: Arc<Client>,
mut cx: AsyncAppContext,
) -> Result<()> {
let mut client_status = client.status();
loop {
let is_connected = client_status
.next()
.await
.map_or(false, |s| s.is_connected());
// Even if we're initially connected, any future change of the status means we momentarily disconnected.
if !is_connected || client_status.next().await.is_some() {
log::info!("detected client disconnection");
this.upgrade(&cx)
.ok_or_else(|| anyhow!("room was dropped"))?
.update(&mut cx, |this, cx| {
this.status = RoomStatus::Rejoining;
cx.notify();
});
// Wait for client to re-establish a connection to the server.
{
let mut reconnection_timeout = cx.background().timer(RECONNECT_TIMEOUT).fuse();
let client_reconnection = async {
let mut remaining_attempts = 3;
while remaining_attempts > 0 {
log::info!(
"waiting for client status change, remaining attempts {}",
remaining_attempts
);
let Some(status) = client_status.next().await else { break };
if status.is_connected() {
log::info!("client reconnected, attempting to rejoin room");
let Some(this) = this.upgrade(&cx) else { break };
if this
.update(&mut cx, |this, cx| this.rejoin(cx))
.await
.log_err()
.is_some()
{
return true;
} else {
remaining_attempts -= 1;
}
}
}
false
}
.fuse();
futures::pin_mut!(client_reconnection);
futures::select_biased! {
reconnected = client_reconnection => {
if reconnected {
log::info!("successfully reconnected to room");
// If we successfully joined the room, go back around the loop
// waiting for future connection status changes.
continue;
}
}
_ = reconnection_timeout => {
log::info!("room reconnection timeout expired");
}
}
}
// The client failed to re-establish a connection to the server
// or an error occurred while trying to re-join the room. Either way
// we leave the room and return an error.
if let Some(this) = this.upgrade(&cx) {
log::info!("reconnection failed, leaving room");
let _ = this.update(&mut cx, |this, cx| this.leave(cx));
}
return Err(anyhow!(
"can't reconnect to room: client failed to re-establish connection"
));
}
}
}
fn rejoin(&mut self, cx: &mut ModelContext<Self>) -> Task<Result<()>> {
let mut projects = HashMap::default();
let mut reshared_projects = Vec::new();
let mut rejoined_projects = Vec::new();
self.shared_projects.retain(|project| {
if let Some(handle) = project.upgrade(cx) {
let project = handle.read(cx);
if let Some(project_id) = project.remote_id() {
projects.insert(project_id, handle.clone());
reshared_projects.push(proto::UpdateProject {
project_id,
worktrees: project.worktree_metadata_protos(cx),
});
return true;
}
}
false
});
self.joined_projects.retain(|project| {
if let Some(handle) = project.upgrade(cx) {
let project = handle.read(cx);
if let Some(project_id) = project.remote_id() {
projects.insert(project_id, handle.clone());
rejoined_projects.push(proto::RejoinProject {
id: project_id,
worktrees: project
.worktrees(cx)
.map(|worktree| {
let worktree = worktree.read(cx);
proto::RejoinWorktree {
id: worktree.id().to_proto(),
scan_id: worktree.completed_scan_id() as u64,
}
})
.collect(),
});
}
return true;
}
false
});
let response = self.client.request(proto::RejoinRoom {
id: self.id,
reshared_projects,
rejoined_projects,
});
cx.spawn(|this, mut cx| async move {
let response = response.await?;
let room_proto = response.room.ok_or_else(|| anyhow!("invalid room"))?;
this.update(&mut cx, |this, cx| {
this.status = RoomStatus::Online;
this.apply_room_update(room_proto, cx)?;
for reshared_project in response.reshared_projects {
if let Some(project) = projects.get(&reshared_project.id) {
project.update(cx, |project, cx| {
project.reshared(reshared_project, cx).log_err();
});
}
}
for rejoined_project in response.rejoined_projects {
if let Some(project) = projects.get(&rejoined_project.id) {
project.update(cx, |project, cx| {
project.rejoined(rejoined_project, cx).log_err();
});
}
}
anyhow::Ok(())
})
})
}
pub fn id(&self) -> u64 {
self.id
}
@@ -257,10 +439,16 @@ impl Room {
&self.local_participant
}
pub fn remote_participants(&self) -> &BTreeMap<PeerId, RemoteParticipant> {
pub fn remote_participants(&self) -> &BTreeMap<u64, RemoteParticipant> {
&self.remote_participants
}
pub fn remote_participant_for_peer_id(&self, peer_id: PeerId) -> Option<&RemoteParticipant> {
self.remote_participants
.values()
.find(|p| p.peer_id == peer_id)
}
pub fn pending_participants(&self) -> &[Arc<User>] {
&self.pending_participants
}
@@ -294,6 +482,11 @@ impl Room {
.position(|participant| Some(participant.user_id) == self.client.user_id());
let local_participant = local_participant_ix.map(|ix| room.participants.swap_remove(ix));
let pending_participant_user_ids = room
.pending_participants
.iter()
.map(|p| p.user_id)
.collect::<Vec<_>>();
let remote_participant_user_ids = room
.participants
.iter()
@@ -303,7 +496,7 @@ impl Room {
self.user_store.update(cx, move |user_store, cx| {
(
user_store.get_users(remote_participant_user_ids, cx),
user_store.get_users(room.pending_participant_user_ids, cx),
user_store.get_users(pending_participant_user_ids, cx),
)
});
self.pending_room_update = Some(cx.spawn(|this, mut cx| async move {
@@ -321,12 +514,12 @@ impl Room {
if let Some(participants) = remote_participants.log_err() {
for (participant, user) in room.participants.into_iter().zip(participants) {
let peer_id = PeerId(participant.peer_id);
let Some(peer_id) = participant.peer_id else { continue };
this.participant_user_ids.insert(participant.user_id);
let old_projects = this
.remote_participants
.get(&peer_id)
.get(&participant.user_id)
.into_iter()
.flat_map(|existing| &existing.projects)
.map(|project| project.id)
@@ -348,6 +541,20 @@ impl Room {
}
for unshared_project_id in old_projects.difference(&new_projects) {
this.joined_projects.retain(|project| {
if let Some(project) = project.upgrade(cx) {
project.update(cx, |project, cx| {
if project.remote_id() == Some(*unshared_project_id) {
project.disconnected_from_host(cx);
false
} else {
true
}
})
} else {
false
}
});
cx.emit(Event::RemoteProjectUnshared {
project_id: *unshared_project_id,
});
@@ -355,9 +562,11 @@ impl Room {
let location = ParticipantLocation::from_proto(participant.location)
.unwrap_or(ParticipantLocation::External);
if let Some(remote_participant) = this.remote_participants.get_mut(&peer_id)
if let Some(remote_participant) =
this.remote_participants.get_mut(&participant.user_id)
{
remote_participant.projects = participant.projects;
remote_participant.peer_id = peer_id;
if location != remote_participant.location {
remote_participant.location = location;
cx.emit(Event::ParticipantLocationChanged {
@@ -366,9 +575,10 @@ impl Room {
}
} else {
this.remote_participants.insert(
peer_id,
participant.user_id,
RemoteParticipant {
user: user.clone(),
peer_id,
projects: participant.projects,
location,
tracks: Default::default(),
@@ -377,7 +587,7 @@ impl Room {
if let Some(live_kit) = this.live_kit.as_ref() {
let tracks =
live_kit.room.remote_video_tracks(&peer_id.0.to_string());
live_kit.room.remote_video_tracks(&peer_id.to_string());
for track in tracks {
this.remote_video_track_updated(
RemoteVideoTrackUpdate::Subscribed(track),
@@ -389,8 +599,8 @@ impl Room {
}
}
this.remote_participants.retain(|_, participant| {
if this.participant_user_ids.contains(&participant.user.id) {
this.remote_participants.retain(|user_id, participant| {
if this.participant_user_ids.contains(user_id) {
true
} else {
for project in &participant.projects {
@@ -412,6 +622,7 @@ impl Room {
this.pending_room_update.take();
if this.should_leave() {
log::info!("room is empty, leaving");
let _ = this.leave(cx);
}
@@ -431,11 +642,11 @@ impl Room {
) -> Result<()> {
match change {
RemoteVideoTrackUpdate::Subscribed(track) => {
let peer_id = PeerId(track.publisher_id().parse()?);
let user_id = track.publisher_id().parse()?;
let track_id = track.sid().to_string();
let participant = self
.remote_participants
.get_mut(&peer_id)
.get_mut(&user_id)
.ok_or_else(|| anyhow!("subscribed to track by unknown participant"))?;
participant.tracks.insert(
track_id.clone(),
@@ -444,21 +655,21 @@ impl Room {
}),
);
cx.emit(Event::RemoteVideoTracksChanged {
participant_id: peer_id,
participant_id: participant.peer_id,
});
}
RemoteVideoTrackUpdate::Unsubscribed {
publisher_id,
track_id,
} => {
let peer_id = PeerId(publisher_id.parse()?);
let user_id = publisher_id.parse()?;
let participant = self
.remote_participants
.get_mut(&peer_id)
.get_mut(&user_id)
.ok_or_else(|| anyhow!("unsubscribed from track by unknown participant"))?;
participant.tracks.remove(&track_id);
cx.emit(Event::RemoteVideoTracksChanged {
participant_id: peer_id,
participant_id: participant.peer_id,
});
}
}
@@ -472,10 +683,12 @@ impl Room {
{
for participant in self.remote_participants.values() {
assert!(self.participant_user_ids.contains(&participant.user.id));
assert_ne!(participant.user.id, self.client.user_id().unwrap());
}
for participant in &self.pending_participants {
assert!(self.participant_user_ids.contains(&participant.id));
assert_ne!(participant.id, self.client.user_id().unwrap());
}
assert_eq!(
@@ -487,7 +700,7 @@ impl Room {
pub(crate) fn call(
&mut self,
recipient_user_id: u64,
called_user_id: u64,
initial_project_id: Option<u64>,
cx: &mut ModelContext<Self>,
) -> Task<Result<()>> {
@@ -503,7 +716,7 @@ impl Room {
let result = client
.request(proto::Call {
room_id,
recipient_user_id,
called_user_id,
initial_project_id,
})
.await;
@@ -518,6 +731,32 @@ impl Room {
})
}
pub fn join_project(
&mut self,
id: u64,
language_registry: Arc<LanguageRegistry>,
fs: Arc<dyn Fs>,
cx: &mut ModelContext<Self>,
) -> Task<Result<ModelHandle<Project>>> {
let client = self.client.clone();
let user_store = self.user_store.clone();
cx.spawn(|this, mut cx| async move {
let project =
Project::remote(id, client, user_store, language_registry, fs, cx.clone()).await?;
this.update(&mut cx, |this, cx| {
this.joined_projects.retain(|project| {
if let Some(project) = project.upgrade(cx) {
!project.read(cx).is_read_only()
} else {
false
}
});
this.joined_projects.insert(project.downgrade());
});
Ok(project)
})
}
pub(crate) fn share_project(
&mut self,
project: ModelHandle<Project>,
@@ -529,31 +768,18 @@ impl Room {
let request = self.client.request(proto::ShareProject {
room_id: self.id(),
worktrees: project
.read(cx)
.worktrees(cx)
.map(|worktree| {
let worktree = worktree.read(cx);
proto::WorktreeMetadata {
id: worktree.id().to_proto(),
root_name: worktree.root_name().into(),
visible: worktree.is_visible(),
abs_path: worktree.abs_path().as_os_str().as_bytes().to_vec(),
}
})
.collect(),
worktrees: project.read(cx).worktree_metadata_protos(cx),
});
cx.spawn(|this, mut cx| async move {
let response = request.await?;
project.update(&mut cx, |project, cx| {
project
.shared(response.project_id, cx)
.detach_and_log_err(cx)
});
project.shared(response.project_id, cx)
})?;
// If the user's location is in this project, it changes from UnsharedProject to SharedProject.
this.update(&mut cx, |this, cx| {
this.shared_projects.insert(project.downgrade());
let active_project = this.local_participant.active_project.as_ref();
if active_project.map_or(false, |location| *location == project) {
this.set_location(Some(&project), cx)
@@ -746,6 +972,7 @@ impl Default for ScreenTrack {
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum RoomStatus {
Online,
Rejoining,
Offline,
}

View File

@@ -2,6 +2,7 @@
name = "cli"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/cli.rs"

View File

@@ -9,7 +9,13 @@ use core_foundation::{
use core_services::{kLSLaunchDefaults, LSLaunchURLSpec, LSOpenFromURLSpec, TCFType};
use ipc_channel::ipc::{IpcOneShotServer, IpcReceiver, IpcSender};
use serde::Deserialize;
use std::{ffi::OsStr, fs, path::PathBuf, ptr};
use std::{
ffi::OsStr,
fs::{self, OpenOptions},
io,
path::{Path, PathBuf},
ptr,
};
#[derive(Parser)]
#[clap(name = "zed", global_setting(clap::AppSettings::NoAutoVersion))]
@@ -54,6 +60,12 @@ fn main() -> Result<()> {
return Ok(());
}
for path in args.paths.iter() {
if !path.exists() {
touch(path.as_path())?;
}
}
let (tx, rx) = launch_app(bundle_path)?;
tx.send(CliRequest::Open {
@@ -77,6 +89,13 @@ fn main() -> Result<()> {
Ok(())
}
fn touch(path: &Path) -> io::Result<()> {
match OpenOptions::new().create(true).write(true).open(path) {
Ok(_) => Ok(()),
Err(e) => Err(e),
}
}
fn locate_bundle() -> Result<PathBuf> {
let cli_path = std::env::current_exe()?.canonicalize()?;
let mut app_path = cli_path.clone();

View File

@@ -2,6 +2,7 @@
name = "client"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/client.rs"

View File

@@ -1,820 +0,0 @@
use super::{
proto,
user::{User, UserStore},
Client, Status, Subscription, TypedEnvelope,
};
use anyhow::{anyhow, Context, Result};
use futures::lock::Mutex;
use gpui::{
AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakModelHandle,
};
use postage::prelude::Stream;
use rand::prelude::*;
use std::{
collections::{HashMap, HashSet},
mem,
ops::Range,
sync::Arc,
};
use sum_tree::{Bias, SumTree};
use time::OffsetDateTime;
use util::{post_inc, ResultExt as _, TryFutureExt};
pub struct ChannelList {
available_channels: Option<Vec<ChannelDetails>>,
channels: HashMap<u64, WeakModelHandle<Channel>>,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
_task: Task<Option<()>>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
pub id: u64,
pub name: String,
}
pub struct Channel {
details: ChannelDetails,
messages: SumTree<ChannelMessage>,
loaded_all_messages: bool,
next_pending_message_id: usize,
user_store: ModelHandle<UserStore>,
rpc: Arc<Client>,
outgoing_messages_lock: Arc<Mutex<()>>,
rng: StdRng,
_subscription: Subscription,
}
#[derive(Clone, Debug)]
pub struct ChannelMessage {
pub id: ChannelMessageId,
pub body: String,
pub timestamp: OffsetDateTime,
pub sender: Arc<User>,
pub nonce: u128,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum ChannelMessageId {
Saved(u64),
Pending(usize),
}
#[derive(Clone, Debug, Default)]
pub struct ChannelMessageSummary {
max_id: ChannelMessageId,
count: usize,
}
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
struct Count(usize);
pub enum ChannelListEvent {}
#[derive(Clone, Debug, PartialEq)]
pub enum ChannelEvent {
MessagesUpdated {
old_range: Range<usize>,
new_count: usize,
},
}
impl Entity for ChannelList {
type Event = ChannelListEvent;
}
impl ChannelList {
pub fn new(
user_store: ModelHandle<UserStore>,
rpc: Arc<Client>,
cx: &mut ModelContext<Self>,
) -> Self {
let _task = cx.spawn_weak(|this, mut cx| {
let rpc = rpc.clone();
async move {
let mut status = rpc.status();
while let Some((status, this)) = status.recv().await.zip(this.upgrade(&cx)) {
match status {
Status::Connected { .. } => {
let response = rpc
.request(proto::GetChannels {})
.await
.context("failed to fetch available channels")?;
this.update(&mut cx, |this, cx| {
this.available_channels =
Some(response.channels.into_iter().map(Into::into).collect());
let mut to_remove = Vec::new();
for (channel_id, channel) in &this.channels {
if let Some(channel) = channel.upgrade(cx) {
channel.update(cx, |channel, cx| channel.rejoin(cx))
} else {
to_remove.push(*channel_id);
}
}
for channel_id in to_remove {
this.channels.remove(&channel_id);
}
cx.notify();
});
}
Status::SignedOut { .. } => {
this.update(&mut cx, |this, cx| {
this.available_channels = None;
this.channels.clear();
cx.notify();
});
}
_ => {}
}
}
Ok(())
}
.log_err()
});
Self {
available_channels: None,
channels: Default::default(),
user_store,
client: rpc,
_task,
}
}
pub fn available_channels(&self) -> Option<&[ChannelDetails]> {
self.available_channels.as_deref()
}
pub fn get_channel(
&mut self,
id: u64,
cx: &mut MutableAppContext,
) -> Option<ModelHandle<Channel>> {
if let Some(channel) = self.channels.get(&id).and_then(|c| c.upgrade(cx)) {
return Some(channel);
}
let channels = self.available_channels.as_ref()?;
let details = channels.iter().find(|details| details.id == id)?.clone();
let channel = cx.add_model(|cx| {
Channel::new(details, self.user_store.clone(), self.client.clone(), cx)
});
self.channels.insert(id, channel.downgrade());
Some(channel)
}
}
impl Entity for Channel {
type Event = ChannelEvent;
fn release(&mut self, _: &mut MutableAppContext) {
self.rpc
.send(proto::LeaveChannel {
channel_id: self.details.id,
})
.log_err();
}
}
impl Channel {
pub fn init(rpc: &Arc<Client>) {
rpc.add_model_message_handler(Self::handle_message_sent);
}
pub fn new(
details: ChannelDetails,
user_store: ModelHandle<UserStore>,
rpc: Arc<Client>,
cx: &mut ModelContext<Self>,
) -> Self {
let _subscription = rpc.add_model_for_remote_entity(details.id, cx);
{
let user_store = user_store.clone();
let rpc = rpc.clone();
let channel_id = details.id;
cx.spawn(|channel, mut cx| {
async move {
let response = rpc.request(proto::JoinChannel { channel_id }).await?;
let messages =
messages_from_proto(response.messages, &user_store, &mut cx).await?;
let loaded_all_messages = response.done;
channel.update(&mut cx, |channel, cx| {
channel.insert_messages(messages, cx);
channel.loaded_all_messages = loaded_all_messages;
});
Ok(())
}
.log_err()
})
.detach();
}
Self {
details,
user_store,
rpc,
outgoing_messages_lock: Default::default(),
messages: Default::default(),
loaded_all_messages: false,
next_pending_message_id: 0,
rng: StdRng::from_entropy(),
_subscription,
}
}
pub fn name(&self) -> &str {
&self.details.name
}
pub fn send_message(
&mut self,
body: String,
cx: &mut ModelContext<Self>,
) -> Result<Task<Result<()>>> {
if body.is_empty() {
Err(anyhow!("message body can't be empty"))?;
}
let current_user = self
.user_store
.read(cx)
.current_user()
.ok_or_else(|| anyhow!("current_user is not present"))?;
let channel_id = self.details.id;
let pending_id = ChannelMessageId::Pending(post_inc(&mut self.next_pending_message_id));
let nonce = self.rng.gen();
self.insert_messages(
SumTree::from_item(
ChannelMessage {
id: pending_id,
body: body.clone(),
sender: current_user,
timestamp: OffsetDateTime::now_utc(),
nonce,
},
&(),
),
cx,
);
let user_store = self.user_store.clone();
let rpc = self.rpc.clone();
let outgoing_messages_lock = self.outgoing_messages_lock.clone();
Ok(cx.spawn(|this, mut cx| async move {
let outgoing_message_guard = outgoing_messages_lock.lock().await;
let request = rpc.request(proto::SendChannelMessage {
channel_id,
body,
nonce: Some(nonce.into()),
});
let response = request.await?;
drop(outgoing_message_guard);
let message = ChannelMessage::from_proto(
response.message.ok_or_else(|| anyhow!("invalid message"))?,
&user_store,
&mut cx,
)
.await?;
this.update(&mut cx, |this, cx| {
this.insert_messages(SumTree::from_item(message, &()), cx);
Ok(())
})
}))
}
pub fn load_more_messages(&mut self, cx: &mut ModelContext<Self>) -> bool {
if !self.loaded_all_messages {
let rpc = self.rpc.clone();
let user_store = self.user_store.clone();
let channel_id = self.details.id;
if let Some(before_message_id) =
self.messages.first().and_then(|message| match message.id {
ChannelMessageId::Saved(id) => Some(id),
ChannelMessageId::Pending(_) => None,
})
{
cx.spawn(|this, mut cx| {
async move {
let response = rpc
.request(proto::GetChannelMessages {
channel_id,
before_message_id,
})
.await?;
let loaded_all_messages = response.done;
let messages =
messages_from_proto(response.messages, &user_store, &mut cx).await?;
this.update(&mut cx, |this, cx| {
this.loaded_all_messages = loaded_all_messages;
this.insert_messages(messages, cx);
});
Ok(())
}
.log_err()
})
.detach();
return true;
}
}
false
}
pub fn rejoin(&mut self, cx: &mut ModelContext<Self>) {
let user_store = self.user_store.clone();
let rpc = self.rpc.clone();
let channel_id = self.details.id;
cx.spawn(|this, mut cx| {
async move {
let response = rpc.request(proto::JoinChannel { channel_id }).await?;
let messages = messages_from_proto(response.messages, &user_store, &mut cx).await?;
let loaded_all_messages = response.done;
let pending_messages = this.update(&mut cx, |this, cx| {
if let Some((first_new_message, last_old_message)) =
messages.first().zip(this.messages.last())
{
if first_new_message.id > last_old_message.id {
let old_messages = mem::take(&mut this.messages);
cx.emit(ChannelEvent::MessagesUpdated {
old_range: 0..old_messages.summary().count,
new_count: 0,
});
this.loaded_all_messages = loaded_all_messages;
}
}
this.insert_messages(messages, cx);
if loaded_all_messages {
this.loaded_all_messages = loaded_all_messages;
}
this.pending_messages().cloned().collect::<Vec<_>>()
});
for pending_message in pending_messages {
let request = rpc.request(proto::SendChannelMessage {
channel_id,
body: pending_message.body,
nonce: Some(pending_message.nonce.into()),
});
let response = request.await?;
let message = ChannelMessage::from_proto(
response.message.ok_or_else(|| anyhow!("invalid message"))?,
&user_store,
&mut cx,
)
.await?;
this.update(&mut cx, |this, cx| {
this.insert_messages(SumTree::from_item(message, &()), cx);
});
}
Ok(())
}
.log_err()
})
.detach();
}
pub fn message_count(&self) -> usize {
self.messages.summary().count
}
pub fn messages(&self) -> &SumTree<ChannelMessage> {
&self.messages
}
pub fn message(&self, ix: usize) -> &ChannelMessage {
let mut cursor = self.messages.cursor::<Count>();
cursor.seek(&Count(ix), Bias::Right, &());
cursor.item().unwrap()
}
pub fn messages_in_range(&self, range: Range<usize>) -> impl Iterator<Item = &ChannelMessage> {
let mut cursor = self.messages.cursor::<Count>();
cursor.seek(&Count(range.start), Bias::Right, &());
cursor.take(range.len())
}
pub fn pending_messages(&self) -> impl Iterator<Item = &ChannelMessage> {
let mut cursor = self.messages.cursor::<ChannelMessageId>();
cursor.seek(&ChannelMessageId::Pending(0), Bias::Left, &());
cursor
}
async fn handle_message_sent(
this: ModelHandle<Self>,
message: TypedEnvelope<proto::ChannelMessageSent>,
_: Arc<Client>,
mut cx: AsyncAppContext,
) -> Result<()> {
let user_store = this.read_with(&cx, |this, _| this.user_store.clone());
let message = message
.payload
.message
.ok_or_else(|| anyhow!("empty message"))?;
let message = ChannelMessage::from_proto(message, &user_store, &mut cx).await?;
this.update(&mut cx, |this, cx| {
this.insert_messages(SumTree::from_item(message, &()), cx)
});
Ok(())
}
fn insert_messages(&mut self, messages: SumTree<ChannelMessage>, cx: &mut ModelContext<Self>) {
if let Some((first_message, last_message)) = messages.first().zip(messages.last()) {
let nonces = messages
.cursor::<()>()
.map(|m| m.nonce)
.collect::<HashSet<_>>();
let mut old_cursor = self.messages.cursor::<(ChannelMessageId, Count)>();
let mut new_messages = old_cursor.slice(&first_message.id, Bias::Left, &());
let start_ix = old_cursor.start().1 .0;
let removed_messages = old_cursor.slice(&last_message.id, Bias::Right, &());
let removed_count = removed_messages.summary().count;
let new_count = messages.summary().count;
let end_ix = start_ix + removed_count;
new_messages.push_tree(messages, &());
let mut ranges = Vec::<Range<usize>>::new();
if new_messages.last().unwrap().is_pending() {
new_messages.push_tree(old_cursor.suffix(&()), &());
} else {
new_messages.push_tree(
old_cursor.slice(&ChannelMessageId::Pending(0), Bias::Left, &()),
&(),
);
while let Some(message) = old_cursor.item() {
let message_ix = old_cursor.start().1 .0;
if nonces.contains(&message.nonce) {
if ranges.last().map_or(false, |r| r.end == message_ix) {
ranges.last_mut().unwrap().end += 1;
} else {
ranges.push(message_ix..message_ix + 1);
}
} else {
new_messages.push(message.clone(), &());
}
old_cursor.next(&());
}
}
drop(old_cursor);
self.messages = new_messages;
for range in ranges.into_iter().rev() {
cx.emit(ChannelEvent::MessagesUpdated {
old_range: range,
new_count: 0,
});
}
cx.emit(ChannelEvent::MessagesUpdated {
old_range: start_ix..end_ix,
new_count,
});
cx.notify();
}
}
}
async fn messages_from_proto(
proto_messages: Vec<proto::ChannelMessage>,
user_store: &ModelHandle<UserStore>,
cx: &mut AsyncAppContext,
) -> Result<SumTree<ChannelMessage>> {
let unique_user_ids = proto_messages
.iter()
.map(|m| m.sender_id)
.collect::<HashSet<_>>()
.into_iter()
.collect();
user_store
.update(cx, |user_store, cx| {
user_store.get_users(unique_user_ids, cx)
})
.await?;
let mut messages = Vec::with_capacity(proto_messages.len());
for message in proto_messages {
messages.push(ChannelMessage::from_proto(message, user_store, cx).await?);
}
let mut result = SumTree::new();
result.extend(messages, &());
Ok(result)
}
impl From<proto::Channel> for ChannelDetails {
fn from(message: proto::Channel) -> Self {
Self {
id: message.id,
name: message.name,
}
}
}
impl ChannelMessage {
pub async fn from_proto(
message: proto::ChannelMessage,
user_store: &ModelHandle<UserStore>,
cx: &mut AsyncAppContext,
) -> Result<Self> {
let sender = user_store
.update(cx, |user_store, cx| {
user_store.get_user(message.sender_id, cx)
})
.await?;
Ok(ChannelMessage {
id: ChannelMessageId::Saved(message.id),
body: message.body,
timestamp: OffsetDateTime::from_unix_timestamp(message.timestamp as i64)?,
sender,
nonce: message
.nonce
.ok_or_else(|| anyhow!("nonce is required"))?
.into(),
})
}
pub fn is_pending(&self) -> bool {
matches!(self.id, ChannelMessageId::Pending(_))
}
}
impl sum_tree::Item for ChannelMessage {
type Summary = ChannelMessageSummary;
fn summary(&self) -> Self::Summary {
ChannelMessageSummary {
max_id: self.id,
count: 1,
}
}
}
impl Default for ChannelMessageId {
fn default() -> Self {
Self::Saved(0)
}
}
impl sum_tree::Summary for ChannelMessageSummary {
type Context = ();
fn add_summary(&mut self, summary: &Self, _: &()) {
self.max_id = summary.max_id;
self.count += summary.count;
}
}
impl<'a> sum_tree::Dimension<'a, ChannelMessageSummary> for ChannelMessageId {
fn add_summary(&mut self, summary: &'a ChannelMessageSummary, _: &()) {
debug_assert!(summary.max_id > *self);
*self = summary.max_id;
}
}
impl<'a> sum_tree::Dimension<'a, ChannelMessageSummary> for Count {
fn add_summary(&mut self, summary: &'a ChannelMessageSummary, _: &()) {
self.0 += summary.count;
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::{FakeHttpClient, FakeServer};
use gpui::TestAppContext;
#[gpui::test]
async fn test_channel_messages(cx: &mut TestAppContext) {
cx.foreground().forbid_parking();
let user_id = 5;
let http_client = FakeHttpClient::with_404_response();
let client = cx.update(|cx| Client::new(http_client.clone(), cx));
let server = FakeServer::for_client(user_id, &client, cx).await;
Channel::init(&client);
let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
let channel_list = cx.add_model(|cx| ChannelList::new(user_store, client.clone(), cx));
channel_list.read_with(cx, |list, _| assert_eq!(list.available_channels(), None));
// Get the available channels.
let get_channels = server.receive::<proto::GetChannels>().await.unwrap();
server
.respond(
get_channels.receipt(),
proto::GetChannelsResponse {
channels: vec![proto::Channel {
id: 5,
name: "the-channel".to_string(),
}],
},
)
.await;
channel_list.next_notification(cx).await;
channel_list.read_with(cx, |list, _| {
assert_eq!(
list.available_channels().unwrap(),
&[ChannelDetails {
id: 5,
name: "the-channel".into(),
}]
)
});
let get_users = server.receive::<proto::GetUsers>().await.unwrap();
assert_eq!(get_users.payload.user_ids, vec![5]);
server
.respond(
get_users.receipt(),
proto::UsersResponse {
users: vec![proto::User {
id: 5,
github_login: "nathansobo".into(),
avatar_url: "http://avatar.com/nathansobo".into(),
}],
},
)
.await;
// Join a channel and populate its existing messages.
let channel = channel_list
.update(cx, |list, cx| {
let channel_id = list.available_channels().unwrap()[0].id;
list.get_channel(channel_id, cx)
})
.unwrap();
channel.read_with(cx, |channel, _| assert!(channel.messages().is_empty()));
let join_channel = server.receive::<proto::JoinChannel>().await.unwrap();
server
.respond(
join_channel.receipt(),
proto::JoinChannelResponse {
messages: vec![
proto::ChannelMessage {
id: 10,
body: "a".into(),
timestamp: 1000,
sender_id: 5,
nonce: Some(1.into()),
},
proto::ChannelMessage {
id: 11,
body: "b".into(),
timestamp: 1001,
sender_id: 6,
nonce: Some(2.into()),
},
],
done: false,
},
)
.await;
// Client requests all users for the received messages
let mut get_users = server.receive::<proto::GetUsers>().await.unwrap();
get_users.payload.user_ids.sort();
assert_eq!(get_users.payload.user_ids, vec![6]);
server
.respond(
get_users.receipt(),
proto::UsersResponse {
users: vec![proto::User {
id: 6,
github_login: "maxbrunsfeld".into(),
avatar_url: "http://avatar.com/maxbrunsfeld".into(),
}],
},
)
.await;
assert_eq!(
channel.next_event(cx).await,
ChannelEvent::MessagesUpdated {
old_range: 0..0,
new_count: 2,
}
);
channel.read_with(cx, |channel, _| {
assert_eq!(
channel
.messages_in_range(0..2)
.map(|message| (message.sender.github_login.clone(), message.body.clone()))
.collect::<Vec<_>>(),
&[
("nathansobo".into(), "a".into()),
("maxbrunsfeld".into(), "b".into())
]
);
});
// Receive a new message.
server.send(proto::ChannelMessageSent {
channel_id: channel.read_with(cx, |channel, _| channel.details.id),
message: Some(proto::ChannelMessage {
id: 12,
body: "c".into(),
timestamp: 1002,
sender_id: 7,
nonce: Some(3.into()),
}),
});
// Client requests user for message since they haven't seen them yet
let get_users = server.receive::<proto::GetUsers>().await.unwrap();
assert_eq!(get_users.payload.user_ids, vec![7]);
server
.respond(
get_users.receipt(),
proto::UsersResponse {
users: vec![proto::User {
id: 7,
github_login: "as-cii".into(),
avatar_url: "http://avatar.com/as-cii".into(),
}],
},
)
.await;
assert_eq!(
channel.next_event(cx).await,
ChannelEvent::MessagesUpdated {
old_range: 2..2,
new_count: 1,
}
);
channel.read_with(cx, |channel, _| {
assert_eq!(
channel
.messages_in_range(2..3)
.map(|message| (message.sender.github_login.clone(), message.body.clone()))
.collect::<Vec<_>>(),
&[("as-cii".into(), "c".into())]
)
});
// Scroll up to view older messages.
channel.update(cx, |channel, cx| {
assert!(channel.load_more_messages(cx));
});
let get_messages = server.receive::<proto::GetChannelMessages>().await.unwrap();
assert_eq!(get_messages.payload.channel_id, 5);
assert_eq!(get_messages.payload.before_message_id, 10);
server
.respond(
get_messages.receipt(),
proto::GetChannelMessagesResponse {
done: true,
messages: vec![
proto::ChannelMessage {
id: 8,
body: "y".into(),
timestamp: 998,
sender_id: 5,
nonce: Some(4.into()),
},
proto::ChannelMessage {
id: 9,
body: "z".into(),
timestamp: 999,
sender_id: 6,
nonce: Some(5.into()),
},
],
},
)
.await;
assert_eq!(
channel.next_event(cx).await,
ChannelEvent::MessagesUpdated {
old_range: 0..0,
new_count: 2,
}
);
channel.read_with(cx, |channel, _| {
assert_eq!(
channel
.messages_in_range(0..2)
.map(|message| (message.sender.github_login.clone(), message.body.clone()))
.collect::<Vec<_>>(),
&[
("nathansobo".into(), "y".into()),
("maxbrunsfeld".into(), "z".into())
]
);
});
}
}

View File

@@ -1,7 +1,6 @@
#[cfg(any(test, feature = "test-support"))]
pub mod test;
pub mod channel;
pub mod http;
pub mod telemetry;
pub mod user;
@@ -12,29 +11,28 @@ use async_tungstenite::tungstenite::{
error::Error as WebsocketError,
http::{Request, StatusCode},
};
use db::Db;
use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt};
use gpui::{
actions,
serde_json::{self, Value},
AnyModelHandle, AnyViewHandle, AnyWeakModelHandle, AnyWeakViewHandle, AppContext,
AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, View, ViewContext,
ViewHandle,
AnyModelHandle, AnyViewHandle, AnyWeakModelHandle, AnyWeakViewHandle, AppContext, AppVersion,
AsyncAppContext, Entity, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle,
};
use http::HttpClient;
use lazy_static::lazy_static;
use parking_lot::RwLock;
use postage::watch;
use rand::prelude::*;
use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage};
use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, PeerId, RequestMessage};
use serde::Deserialize;
use settings::ReleaseChannel;
use settings::{Settings, TelemetrySettings};
use std::{
any::TypeId,
collections::HashMap,
convert::TryFrom,
fmt::Write as _,
future::Future,
marker::PhantomData,
path::PathBuf,
sync::{Arc, Weak},
time::{Duration, Instant},
@@ -42,9 +40,9 @@ use std::{
use telemetry::Telemetry;
use thiserror::Error;
use url::Url;
use util::channel::ReleaseChannel;
use util::{ResultExt, TryFutureExt};
pub use channel::*;
pub use rpc::*;
pub use user::*;
@@ -57,6 +55,11 @@ lazy_static! {
pub static ref ADMIN_API_TOKEN: Option<String> = std::env::var("ZED_ADMIN_API_TOKEN")
.ok()
.and_then(|s| if s.is_empty() { None } else { Some(s) });
pub static ref ZED_APP_VERSION: Option<AppVersion> = std::env::var("ZED_APP_VERSION")
.ok()
.and_then(|v| v.parse().ok());
pub static ref ZED_APP_PATH: Option<PathBuf> =
std::env::var("ZED_APP_PATH").ok().map(PathBuf::from);
}
pub const ZED_SECRET_CLIENT_TOKEN: &str = "618033988749894";
@@ -143,7 +146,7 @@ impl EstablishConnectionError {
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Status {
SignedOut,
UpgradeRequired,
@@ -174,7 +177,7 @@ struct ClientState {
entity_id_extractors: HashMap<TypeId, fn(&dyn AnyTypedEnvelope) -> u64>,
_reconnect_task: Option<Task<()>>,
reconnect_interval: Duration,
entities_by_type_and_remote_id: HashMap<(TypeId, u64), AnyWeakEntityHandle>,
entities_by_type_and_remote_id: HashMap<(TypeId, u64), WeakSubscriber>,
models_by_message_type: HashMap<TypeId, AnyWeakModelHandle>,
entity_types_by_message_type: HashMap<TypeId, TypeId>,
#[allow(clippy::type_complexity)]
@@ -184,7 +187,7 @@ struct ClientState {
dyn Send
+ Sync
+ Fn(
AnyEntityHandle,
Subscriber,
Box<dyn AnyTypedEnvelope>,
&Arc<Client>,
AsyncAppContext,
@@ -193,12 +196,13 @@ struct ClientState {
>,
}
enum AnyWeakEntityHandle {
enum WeakSubscriber {
Model(AnyWeakModelHandle),
View(AnyWeakViewHandle),
Pending(Vec<Box<dyn AnyTypedEnvelope>>),
}
enum AnyEntityHandle {
enum Subscriber {
Model(AnyModelHandle),
View(AnyViewHandle),
}
@@ -256,11 +260,59 @@ impl Drop for Subscription {
}
}
pub struct PendingEntitySubscription<T: Entity> {
client: Arc<Client>,
remote_id: u64,
_entity_type: PhantomData<T>,
consumed: bool,
}
impl<T: Entity> PendingEntitySubscription<T> {
pub fn set_model(mut self, model: &ModelHandle<T>, cx: &mut AsyncAppContext) -> Subscription {
self.consumed = true;
let mut state = self.client.state.write();
let id = (TypeId::of::<T>(), self.remote_id);
let Some(WeakSubscriber::Pending(messages)) =
state.entities_by_type_and_remote_id.remove(&id)
else {
unreachable!()
};
state
.entities_by_type_and_remote_id
.insert(id, WeakSubscriber::Model(model.downgrade().into()));
drop(state);
for message in messages {
self.client.handle_message(message, cx);
}
Subscription::Entity {
client: Arc::downgrade(&self.client),
id,
}
}
}
impl<T: Entity> Drop for PendingEntitySubscription<T> {
fn drop(&mut self) {
if !self.consumed {
let mut state = self.client.state.write();
if let Some(WeakSubscriber::Pending(messages)) = state
.entities_by_type_and_remote_id
.remove(&(TypeId::of::<T>(), self.remote_id))
{
for message in messages {
log::info!("unhandled message {}", message.payload_type_name());
}
}
}
}
}
impl Client {
pub fn new(http: Arc<dyn HttpClient>, cx: &AppContext) -> Arc<Self> {
Arc::new(Self {
id: 0,
peer: Peer::new(),
peer: Peer::new(0),
telemetry: Telemetry::new(http.clone(), cx),
http,
state: Default::default(),
@@ -287,14 +339,14 @@ impl Client {
}
#[cfg(any(test, feature = "test-support"))]
pub fn tear_down(&self) {
pub fn teardown(&self) {
let mut state = self.state.write();
state._reconnect_task.take();
state.message_handlers.clear();
state.models_by_message_type.clear();
state.entities_by_type_and_remote_id.clear();
state.entity_id_extractors.clear();
self.peer.reset();
self.peer.teardown();
}
#[cfg(any(test, feature = "test-support"))]
@@ -351,7 +403,11 @@ impl Client {
let this = self.clone();
let reconnect_interval = state.reconnect_interval;
state._reconnect_task = Some(cx.spawn(|cx| async move {
#[cfg(any(test, feature = "test-support"))]
let mut rng = StdRng::seed_from_u64(0);
#[cfg(not(any(test, feature = "test-support")))]
let mut rng = StdRng::from_entropy();
let mut delay = INITIAL_RECONNECTION_DELAY;
while let Err(error) = this.authenticate_and_connect(true, &cx).await {
log::error!("failed to connect {}", error);
@@ -373,7 +429,9 @@ impl Client {
}));
}
Status::SignedOut | Status::UpgradeRequired => {
self.telemetry.set_authenticated_user_info(None, false);
let telemetry_settings = cx.read(|cx| cx.global::<Settings>().telemetry());
self.telemetry
.set_authenticated_user_info(None, false, telemetry_settings);
state._reconnect_task.take();
}
_ => {}
@@ -389,26 +447,28 @@ impl Client {
self.state
.write()
.entities_by_type_and_remote_id
.insert(id, AnyWeakEntityHandle::View(cx.weak_handle().into()));
.insert(id, WeakSubscriber::View(cx.weak_handle().into()));
Subscription::Entity {
client: Arc::downgrade(self),
id,
}
}
pub fn add_model_for_remote_entity<T: Entity>(
pub fn subscribe_to_entity<T: Entity>(
self: &Arc<Self>,
remote_id: u64,
cx: &mut ModelContext<T>,
) -> Subscription {
) -> PendingEntitySubscription<T> {
let id = (TypeId::of::<T>(), remote_id);
self.state
.write()
.entities_by_type_and_remote_id
.insert(id, AnyWeakEntityHandle::Model(cx.weak_handle().into()));
Subscription::Entity {
client: Arc::downgrade(self),
id,
.insert(id, WeakSubscriber::Pending(Default::default()));
PendingEntitySubscription {
client: self.clone(),
remote_id,
consumed: false,
_entity_type: PhantomData,
}
}
@@ -436,7 +496,7 @@ impl Client {
let prev_handler = state.message_handlers.insert(
message_type_id,
Arc::new(move |handle, envelope, client, cx| {
let handle = if let AnyEntityHandle::Model(handle) = handle {
let handle = if let Subscriber::Model(handle) = handle {
handle
} else {
unreachable!();
@@ -490,7 +550,7 @@ impl Client {
F: 'static + Future<Output = Result<()>>,
{
self.add_entity_message_handler::<M, E, _, _>(move |handle, message, client, cx| {
if let AnyEntityHandle::View(handle) = handle {
if let Subscriber::View(handle) = handle {
handler(handle.downcast::<E>().unwrap(), message, client, cx)
} else {
unreachable!();
@@ -509,7 +569,7 @@ impl Client {
F: 'static + Future<Output = Result<()>>,
{
self.add_entity_message_handler::<M, E, _, _>(move |handle, message, client, cx| {
if let AnyEntityHandle::Model(handle) = handle {
if let Subscriber::Model(handle) = handle {
handler(handle.downcast::<E>().unwrap(), message, client, cx)
} else {
unreachable!();
@@ -524,7 +584,7 @@ impl Client {
H: 'static
+ Send
+ Sync
+ Fn(AnyEntityHandle, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
+ Fn(Subscriber, TypedEnvelope<M>, Arc<Self>, AsyncAppContext) -> F,
F: 'static + Future<Output = Result<()>>,
{
let model_type_id = TypeId::of::<E>();
@@ -654,7 +714,13 @@ impl Client {
credentials = read_credentials_from_keychain(cx);
read_from_keychain = credentials.is_some();
if read_from_keychain {
self.report_event("read credentials from keychain", Default::default());
cx.read(|cx| {
self.report_event(
"read credentials from keychain",
Default::default(),
cx.global::<Settings>().telemetry(),
);
});
}
}
if credentials.is_none() {
@@ -758,7 +824,11 @@ impl Client {
hello_message_type_name
)
})?;
Ok(PeerId(hello.payload.peer_id))
let peer_id = hello
.payload
.peer_id
.ok_or_else(|| anyhow!("invalid peer id"))?;
Ok(peer_id)
};
let peer_id = match peer_id.await {
@@ -770,7 +840,7 @@ impl Client {
};
log::info!(
"set status to connected (connection id: {}, peer id: {})",
"set status to connected (connection id: {:?}, peer id: {:?})",
connection_id,
peer_id
);
@@ -786,94 +856,8 @@ impl Client {
let cx = cx.clone();
let this = self.clone();
async move {
let mut message_id = 0_usize;
while let Some(message) = incoming.next().await {
let mut state = this.state.write();
message_id += 1;
let type_name = message.payload_type_name();
let payload_type_id = message.payload_type_id();
let sender_id = message.original_sender_id().map(|id| id.0);
let model = state
.models_by_message_type
.get(&payload_type_id)
.and_then(|model| model.upgrade(&cx))
.map(AnyEntityHandle::Model)
.or_else(|| {
let entity_type_id =
*state.entity_types_by_message_type.get(&payload_type_id)?;
let entity_id = state
.entity_id_extractors
.get(&message.payload_type_id())
.map(|extract_entity_id| {
(extract_entity_id)(message.as_ref())
})?;
let entity = state
.entities_by_type_and_remote_id
.get(&(entity_type_id, entity_id))?;
if let Some(entity) = entity.upgrade(&cx) {
Some(entity)
} else {
state
.entities_by_type_and_remote_id
.remove(&(entity_type_id, entity_id));
None
}
});
let model = if let Some(model) = model {
model
} else {
log::info!("unhandled message {}", type_name);
continue;
};
let handler = state.message_handlers.get(&payload_type_id).cloned();
// Dropping the state prevents deadlocks if the handler interacts with rpc::Client.
// It also ensures we don't hold the lock while yielding back to the executor, as
// that might cause the executor thread driving this future to block indefinitely.
drop(state);
if let Some(handler) = handler {
let future = handler(model, message, &this, cx.clone());
let client_id = this.id;
log::debug!(
"rpc message received. client_id:{}, message_id:{}, sender_id:{:?}, type:{}",
client_id,
message_id,
sender_id,
type_name
);
cx.foreground()
.spawn(async move {
match future.await {
Ok(()) => {
log::debug!(
"rpc message handled. client_id:{}, message_id:{}, sender_id:{:?}, type:{}",
client_id,
message_id,
sender_id,
type_name
);
}
Err(error) => {
log::error!(
"error handling message. client_id:{}, message_id:{}, sender_id:{:?}, type:{}, error:{:?}",
client_id,
message_id,
sender_id,
type_name,
error
);
}
}
})
.detach();
} else {
log::info!("unhandled message {}", type_name);
}
this.handle_message(message, &cx);
// Don't starve the main thread when receiving lots of messages at once.
smol::future::yield_now().await;
}
@@ -887,7 +871,7 @@ impl Client {
.spawn(async move {
match handle_io.await {
Ok(()) => {
if *this.status().borrow()
if this.status().borrow().clone()
== (Status::Connected {
connection_id,
peer_id,
@@ -1027,6 +1011,8 @@ impl Client {
let executor = cx.background();
let telemetry = self.telemetry.clone();
let http = self.http.clone();
let metrics_enabled = cx.read(|cx| cx.global::<Settings>().telemetry());
executor.clone().spawn(async move {
// Generate a pair of asymmetric encryption keys. The public key will be used by the
// zed server to encrypt the user's access token, so that it can'be intercepted by
@@ -1109,7 +1095,11 @@ impl Client {
.context("failed to decrypt access token")?;
platform.activate(true);
telemetry.report_event("authenticate with browser", Default::default());
telemetry.report_event(
"authenticate with browser",
Default::default(),
metrics_enabled,
);
Ok(Credentials {
user_id: user_id.parse()?,
@@ -1220,24 +1210,128 @@ impl Client {
self.peer.respond_with_error(receipt, error)
}
pub fn start_telemetry(&self, db: Db) {
self.telemetry.start(db.clone());
fn handle_message(
self: &Arc<Client>,
message: Box<dyn AnyTypedEnvelope>,
cx: &AsyncAppContext,
) {
let mut state = self.state.write();
let type_name = message.payload_type_name();
let payload_type_id = message.payload_type_id();
let sender_id = message.original_sender_id();
let mut subscriber = None;
if let Some(message_model) = state
.models_by_message_type
.get(&payload_type_id)
.and_then(|model| model.upgrade(cx))
{
subscriber = Some(Subscriber::Model(message_model));
} else if let Some((extract_entity_id, entity_type_id)) =
state.entity_id_extractors.get(&payload_type_id).zip(
state
.entity_types_by_message_type
.get(&payload_type_id)
.copied(),
)
{
let entity_id = (extract_entity_id)(message.as_ref());
match state
.entities_by_type_and_remote_id
.get_mut(&(entity_type_id, entity_id))
{
Some(WeakSubscriber::Pending(pending)) => {
pending.push(message);
return;
}
Some(weak_subscriber @ _) => subscriber = weak_subscriber.upgrade(cx),
_ => {}
}
}
let subscriber = if let Some(subscriber) = subscriber {
subscriber
} else {
log::info!("unhandled message {}", type_name);
self.peer.respond_with_unhandled_message(message).log_err();
return;
};
let handler = state.message_handlers.get(&payload_type_id).cloned();
// Dropping the state prevents deadlocks if the handler interacts with rpc::Client.
// It also ensures we don't hold the lock while yielding back to the executor, as
// that might cause the executor thread driving this future to block indefinitely.
drop(state);
if let Some(handler) = handler {
let future = handler(subscriber, message, &self, cx.clone());
let client_id = self.id;
log::debug!(
"rpc message received. client_id:{}, sender_id:{:?}, type:{}",
client_id,
sender_id,
type_name
);
cx.foreground()
.spawn(async move {
match future.await {
Ok(()) => {
log::debug!(
"rpc message handled. client_id:{}, sender_id:{:?}, type:{}",
client_id,
sender_id,
type_name
);
}
Err(error) => {
log::error!(
"error handling message. client_id:{}, sender_id:{:?}, type:{}, error:{:?}",
client_id,
sender_id,
type_name,
error
);
}
}
})
.detach();
} else {
log::info!("unhandled message {}", type_name);
self.peer.respond_with_unhandled_message(message).log_err();
}
}
pub fn report_event(&self, kind: &str, properties: Value) {
self.telemetry.report_event(kind, properties.clone());
pub fn start_telemetry(&self) {
self.telemetry.start();
}
pub fn report_event(
&self,
kind: &str,
properties: Value,
telemetry_settings: TelemetrySettings,
) {
self.telemetry
.report_event(kind, properties.clone(), telemetry_settings);
}
pub fn telemetry_log_file_path(&self) -> Option<PathBuf> {
self.telemetry.log_file_path()
}
pub fn metrics_id(&self) -> Option<Arc<str>> {
self.telemetry.metrics_id()
}
}
impl AnyWeakEntityHandle {
fn upgrade(&self, cx: &AsyncAppContext) -> Option<AnyEntityHandle> {
impl WeakSubscriber {
fn upgrade(&self, cx: &AsyncAppContext) -> Option<Subscriber> {
match self {
AnyWeakEntityHandle::Model(handle) => handle.upgrade(cx).map(AnyEntityHandle::Model),
AnyWeakEntityHandle::View(handle) => handle.upgrade(cx).map(AnyEntityHandle::View),
WeakSubscriber::Model(handle) => handle.upgrade(cx).map(Subscriber::Model),
WeakSubscriber::View(handle) => handle.upgrade(cx).map(Subscriber::View),
WeakSubscriber::Pending(_) => None,
}
}
}
@@ -1482,11 +1576,17 @@ mod tests {
subscription: None,
});
let _subscription1 = model1.update(cx, |_, cx| client.add_model_for_remote_entity(1, cx));
let _subscription2 = model2.update(cx, |_, cx| client.add_model_for_remote_entity(2, cx));
let _subscription1 = client
.subscribe_to_entity(1)
.set_model(&model1, &mut cx.to_async());
let _subscription2 = client
.subscribe_to_entity(2)
.set_model(&model2, &mut cx.to_async());
// Ensure dropping a subscription for the same entity type still allows receiving of
// messages for other entity IDs of the same type.
let subscription3 = model3.update(cx, |_, cx| client.add_model_for_remote_entity(3, cx));
let subscription3 = client
.subscribe_to_entity(3)
.set_model(&model3, &mut cx.to_async());
drop(subscription3);
server.send(proto::JoinProject { project_id: 1 });

View File

@@ -1,5 +1,5 @@
use crate::http::HttpClient;
use db::Db;
use db::kvp::KEY_VALUE_STORE;
use gpui::{
executor::Background,
serde_json::{self, value::Map, Value},
@@ -10,7 +10,7 @@ use lazy_static::lazy_static;
use parking_lot::Mutex;
use serde::Serialize;
use serde_json::json;
use settings::ReleaseChannel;
use settings::TelemetrySettings;
use std::{
io::Write,
mem,
@@ -19,7 +19,7 @@ use std::{
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tempfile::NamedTempFile;
use util::{post_inc, ResultExt, TryFutureExt};
use util::{channel::ReleaseChannel, post_inc, ResultExt, TryFutureExt};
use uuid::Uuid;
pub struct Telemetry {
@@ -107,7 +107,7 @@ impl Telemetry {
pub fn new(client: Arc<dyn HttpClient>, cx: &AppContext) -> Arc<Self> {
let platform = cx.platform();
let release_channel = if cx.has_global::<ReleaseChannel>() {
Some(cx.global::<ReleaseChannel>().name())
Some(cx.global::<ReleaseChannel>().display_name())
} else {
None
};
@@ -148,18 +148,21 @@ impl Telemetry {
Some(self.state.lock().log_file.as_ref()?.path().to_path_buf())
}
pub fn start(self: &Arc<Self>, db: Db) {
pub fn start(self: &Arc<Self>) {
let this = self.clone();
self.executor
.spawn(
async move {
let device_id = if let Ok(Some(device_id)) = db.read_kvp("device_id") {
device_id
} else {
let device_id = Uuid::new_v4().to_string();
db.write_kvp("device_id", &device_id)?;
device_id
};
let device_id =
if let Ok(Some(device_id)) = KEY_VALUE_STORE.read_kvp("device_id") {
device_id
} else {
let device_id = Uuid::new_v4().to_string();
KEY_VALUE_STORE
.write_kvp("device_id".to_string(), device_id.clone())
.await?;
device_id
};
let device_id: Arc<str> = device_id.into();
let mut state = this.state.lock();
@@ -182,11 +185,18 @@ impl Telemetry {
.detach();
}
/// This method takes the entire TelemetrySettings struct in order to force client code
/// to pull the struct out of the settings global. Do not remove!
pub fn set_authenticated_user_info(
self: &Arc<Self>,
metrics_id: Option<String>,
is_staff: bool,
telemetry_settings: TelemetrySettings,
) {
if !telemetry_settings.metrics() {
return;
}
let this = self.clone();
let mut state = self.state.lock();
let device_id = state.device_id.clone();
@@ -219,7 +229,16 @@ impl Telemetry {
}
}
pub fn report_event(self: &Arc<Self>, kind: &str, properties: Value) {
pub fn report_event(
self: &Arc<Self>,
kind: &str,
properties: Value,
telemetry_settings: TelemetrySettings,
) {
if !telemetry_settings.metrics() {
return;
}
let mut state = self.state.lock();
let event = MixpanelEvent {
event: kind.to_string(),
@@ -259,6 +278,10 @@ impl Telemetry {
}
}
pub fn metrics_id(self: &Arc<Self>) -> Option<Arc<str>> {
self.state.lock().metrics_id.clone()
}
fn flush(self: &Arc<Self>) {
let mut state = self.state.lock();
let mut events = mem::take(&mut state.queue);

View File

@@ -35,7 +35,7 @@ impl FakeServer {
cx: &TestAppContext,
) -> Self {
let server = Self {
peer: Peer::new(),
peer: Peer::new(0),
state: Default::default(),
user_id: client_user_id,
executor: cx.foreground(),
@@ -92,7 +92,7 @@ impl FakeServer {
peer.send(
connection_id,
proto::Hello {
peer_id: connection_id.0,
peer_id: Some(connection_id.into()),
},
)
.unwrap();

View File

@@ -5,8 +5,9 @@ use futures::{channel::mpsc, future, AsyncReadExt, Future, StreamExt};
use gpui::{AsyncAppContext, Entity, ImageData, ModelContext, ModelHandle, Task};
use postage::{sink::Sink, watch};
use rpc::proto::{RequestMessage, UsersResponse};
use settings::Settings;
use std::sync::{Arc, Weak};
use util::TryFutureExt as _;
use util::{StaffMode, TryFutureExt as _};
#[derive(Default, Debug)]
pub struct User {
@@ -141,16 +142,25 @@ impl UserStore {
let fetch_metrics_id =
client.request(proto::GetPrivateUserInfo {}).log_err();
let (user, info) = futures::join!(fetch_user, fetch_metrics_id);
if let Some(info) = info {
client.telemetry.set_authenticated_user_info(
Some(info.metrics_id.clone()),
info.staff,
);
} else {
client.telemetry.set_authenticated_user_info(None, false);
}
client.telemetry.set_authenticated_user_info(
info.as_ref().map(|info| info.metrics_id.clone()),
info.as_ref().map(|info| info.staff).unwrap_or(false),
cx.read(|cx| cx.global::<Settings>().telemetry()),
);
cx.update(|cx| {
cx.update_default_global(|staff_mode: &mut StaffMode, _| {
if !staff_mode.0 {
*staff_mode = StaffMode(
info.as_ref()
.map(|info| info.staff)
.unwrap_or_default(),
)
}
()
});
});
client.telemetry.report_event("sign in", Default::default());
current_user_tx.send(user).await.ok();
}
}

View File

@@ -2,6 +2,7 @@
name = "clock"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/clock.rs"

View File

@@ -2,6 +2,7 @@ DATABASE_URL = "postgres://postgres@localhost/zed"
HTTP_PORT = 8080
API_TOKEN = "secret"
INVITE_LINK_PREFIX = "http://localhost:3000/invites/"
ZED_ENVIRONMENT = "development"
LIVE_KIT_SERVER = "http://localhost:7880"
LIVE_KIT_KEY = "devkey"
LIVE_KIT_SECRET = "secret"

View File

@@ -3,7 +3,8 @@ authors = ["Nathan Sobo <nathan@zed.dev>"]
default-run = "collab"
edition = "2021"
name = "collab"
version = "0.2.0"
version = "0.5.4"
publish = false
[[bin]]
name = "collab"
@@ -19,12 +20,12 @@ rpc = { path = "../rpc" }
util = { path = "../util" }
anyhow = "1.0.40"
async-trait = "0.1.50"
async-tungstenite = "0.16"
axum = { version = "0.5", features = ["json", "headers", "ws"] }
axum-extra = { version = "0.3", features = ["erased-json"] }
base64 = "0.13"
clap = { version = "3.1", features = ["derive"], optional = true }
dashmap = "5.4"
envy = "0.4.2"
futures = "0.3"
hyper = "0.14"
@@ -36,9 +37,13 @@ prometheus = "0.13"
rand = "0.8"
reqwest = { version = "0.11", features = ["json"], optional = true }
scrypt = "0.7"
# Remove fork dependency when a version with https://github.com/SeaQL/sea-orm/pull/1283 is released.
sea-orm = { git = "https://github.com/zed-industries/sea-orm", rev = "18f4c691085712ad014a51792af75a9044bacee6", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls"] }
sea-query = "0.27"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
sha-1 = "0.9"
sqlx = { version = "0.6", features = ["runtime-tokio-rustls", "postgres", "json", "time", "uuid", "any"] }
time = { version = "0.3", features = ["serde", "serde-well-known"] }
tokio = { version = "1", features = ["full"] }
tokio-tungstenite = "0.17"
@@ -49,10 +54,6 @@ tracing = "0.1.34"
tracing-log = "0.1.3"
tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] }
[dependencies.sqlx]
version = "0.6"
features = ["runtime-tokio-rustls", "postgres", "time", "uuid"]
[dev-dependencies]
collections = { path = "../collections", features = ["test-support"] }
gpui = { path = "../gpui", features = ["test-support"] }
@@ -64,6 +65,7 @@ fs = { path = "../fs", features = ["test-support"] }
git = { path = "../git", features = ["test-support"] }
live_kit_client = { path = "../live_kit_client", features = ["test-support"] }
lsp = { path = "../lsp", features = ["test-support"] }
pretty_assertions = "1.3.0"
project = { path = "../project", features = ["test-support"] }
rpc = { path = "../rpc", features = ["test-support"] }
settings = { path = "../settings", features = ["test-support"] }
@@ -75,7 +77,9 @@ env_logger = "0.9"
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
util = { path = "../util" }
lazy_static = "1.4"
sea-orm = { git = "https://github.com/zed-industries/sea-orm", rev = "18f4c691085712ad014a51792af75a9044bacee6", features = ["sqlx-sqlite"] }
serde_json = { version = "1.0", features = ["preserve_order"] }
sqlx = { version = "0.6", features = ["sqlite"] }
unindent = "0.1"
[features]

View File

@@ -59,6 +59,12 @@ spec:
ports:
- containerPort: 8080
protocol: TCP
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 1
periodSeconds: 1
env:
- name: HTTP_PORT
value: "8080"
@@ -93,6 +99,8 @@ spec:
value: ${RUST_LOG}
- name: LOG_JSON
value: "true"
- name: ZED_ENVIRONMENT
value: ${ZED_ENVIRONMENT}
securityContext:
capabilities:
# FIXME - Switch to the more restrictive `PERFMON` capability.

View File

@@ -0,0 +1,145 @@
CREATE TABLE "users" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"github_login" VARCHAR,
"admin" BOOLEAN,
"email_address" VARCHAR(255) DEFAULT NULL,
"invite_code" VARCHAR(64),
"invite_count" INTEGER NOT NULL DEFAULT 0,
"inviter_id" INTEGER REFERENCES users (id),
"connected_once" BOOLEAN NOT NULL DEFAULT false,
"created_at" TIMESTAMP NOT NULL DEFAULT now,
"metrics_id" TEXT,
"github_user_id" INTEGER
);
CREATE UNIQUE INDEX "index_users_github_login" ON "users" ("github_login");
CREATE UNIQUE INDEX "index_invite_code_users" ON "users" ("invite_code");
CREATE INDEX "index_users_on_email_address" ON "users" ("email_address");
CREATE INDEX "index_users_on_github_user_id" ON "users" ("github_user_id");
CREATE TABLE "access_tokens" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id" INTEGER REFERENCES users (id),
"hash" VARCHAR(128)
);
CREATE INDEX "index_access_tokens_user_id" ON "access_tokens" ("user_id");
CREATE TABLE "contacts" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"user_id_a" INTEGER REFERENCES users (id) NOT NULL,
"user_id_b" INTEGER REFERENCES users (id) NOT NULL,
"a_to_b" BOOLEAN NOT NULL,
"should_notify" BOOLEAN NOT NULL,
"accepted" BOOLEAN NOT NULL
);
CREATE UNIQUE INDEX "index_contacts_user_ids" ON "contacts" ("user_id_a", "user_id_b");
CREATE INDEX "index_contacts_user_id_b" ON "contacts" ("user_id_b");
CREATE TABLE "rooms" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"live_kit_room" VARCHAR NOT NULL
);
CREATE TABLE "projects" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER REFERENCES rooms (id) NOT NULL,
"host_user_id" INTEGER REFERENCES users (id) NOT NULL,
"host_connection_id" INTEGER,
"host_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"unregistered" BOOLEAN NOT NULL DEFAULT FALSE
);
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
CREATE TABLE "worktrees" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INTEGER NOT NULL,
"root_name" VARCHAR NOT NULL,
"abs_path" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INTEGER NOT NULL,
"is_complete" BOOL NOT NULL DEFAULT FALSE,
"completed_scan_id" INTEGER NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"scan_id" INTEGER NOT NULL,
"id" INTEGER NOT NULL,
"is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL,
"inode" INTEGER NOT NULL,
"mtime_seconds" INTEGER NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
"is_deleted" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INTEGER NOT NULL,
"path" VARCHAR NOT NULL,
"language_server_id" INTEGER NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"id" INTEGER NOT NULL,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE,
"user_id" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
CREATE TABLE "room_participants" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"room_id" INTEGER NOT NULL REFERENCES rooms (id),
"user_id" INTEGER NOT NULL REFERENCES users (id),
"answering_connection_id" INTEGER,
"answering_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE,
"answering_connection_lost" BOOLEAN NOT NULL,
"location_kind" INTEGER,
"location_project_id" INTEGER,
"initial_project_id" INTEGER,
"calling_user_id" INTEGER NOT NULL REFERENCES users (id),
"calling_connection_id" INTEGER NOT NULL,
"calling_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE SET NULL
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");
CREATE TABLE "servers" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"environment" VARCHAR NOT NULL
);

View File

@@ -0,0 +1,90 @@
CREATE TABLE IF NOT EXISTS "rooms" (
"id" SERIAL PRIMARY KEY,
"live_kit_room" VARCHAR NOT NULL
);
ALTER TABLE "projects"
ADD "room_id" INTEGER REFERENCES rooms (id),
ADD "host_connection_id" INTEGER,
ADD "host_connection_epoch" UUID;
CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch");
CREATE TABLE "worktrees" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INT8 NOT NULL,
"root_name" VARCHAR NOT NULL,
"abs_path" VARCHAR NOT NULL,
"visible" BOOL NOT NULL,
"scan_id" INT8 NOT NULL,
"is_complete" BOOL NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_worktrees_on_project_id" ON "worktrees" ("project_id");
CREATE TABLE "worktree_entries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"id" INT8 NOT NULL,
"is_dir" BOOL NOT NULL,
"path" VARCHAR NOT NULL,
"inode" INT8 NOT NULL,
"mtime_seconds" INT8 NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
PRIMARY KEY(project_id, worktree_id, id),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_entries_on_project_id" ON "worktree_entries" ("project_id");
CREATE INDEX "index_worktree_entries_on_project_id_and_worktree_id" ON "worktree_entries" ("project_id", "worktree_id");
CREATE TABLE "worktree_diagnostic_summaries" (
"project_id" INTEGER NOT NULL,
"worktree_id" INT8 NOT NULL,
"path" VARCHAR NOT NULL,
"language_server_id" INT8 NOT NULL,
"error_count" INTEGER NOT NULL,
"warning_count" INTEGER NOT NULL,
PRIMARY KEY(project_id, worktree_id, path),
FOREIGN KEY(project_id, worktree_id) REFERENCES worktrees (project_id, id) ON DELETE CASCADE
);
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id" ON "worktree_diagnostic_summaries" ("project_id");
CREATE INDEX "index_worktree_diagnostic_summaries_on_project_id_and_worktree_id" ON "worktree_diagnostic_summaries" ("project_id", "worktree_id");
CREATE TABLE "language_servers" (
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"id" INT8 NOT NULL,
"name" VARCHAR NOT NULL,
PRIMARY KEY(project_id, id)
);
CREATE INDEX "index_language_servers_on_project_id" ON "language_servers" ("project_id");
CREATE TABLE "project_collaborators" (
"id" SERIAL PRIMARY KEY,
"project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE,
"connection_id" INTEGER NOT NULL,
"connection_epoch" UUID NOT NULL,
"user_id" INTEGER NOT NULL,
"replica_id" INTEGER NOT NULL,
"is_host" BOOLEAN NOT NULL
);
CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id");
CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch");
CREATE TABLE "room_participants" (
"id" SERIAL PRIMARY KEY,
"room_id" INTEGER NOT NULL REFERENCES rooms (id),
"user_id" INTEGER NOT NULL REFERENCES users (id),
"answering_connection_id" INTEGER,
"answering_connection_epoch" UUID,
"location_kind" INTEGER,
"location_project_id" INTEGER,
"initial_project_id" INTEGER,
"calling_user_id" INTEGER NOT NULL REFERENCES users (id),
"calling_connection_id" INTEGER NOT NULL,
"calling_connection_epoch" UUID NOT NULL
);
CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id");
CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch");
CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch");

View File

@@ -0,0 +1,2 @@
ALTER TABLE "signups"
ADD "added_to_mailing_list" BOOLEAN NOT NULL DEFAULT FALSE;

View File

@@ -0,0 +1,7 @@
ALTER TABLE "room_participants"
ADD "answering_connection_lost" BOOLEAN NOT NULL DEFAULT FALSE;
CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_epoch" ON "project_collaborators" ("project_id", "connection_id", "connection_epoch");
CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch");

View File

@@ -0,0 +1 @@
CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id");

View File

@@ -0,0 +1,30 @@
CREATE TABLE servers (
id SERIAL PRIMARY KEY,
environment VARCHAR NOT NULL
);
DROP TABLE worktree_extensions;
DROP TABLE project_activity_periods;
DELETE from projects;
ALTER TABLE projects
DROP COLUMN host_connection_epoch,
ADD COLUMN host_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE;
CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id");
CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id");
DELETE FROM project_collaborators;
ALTER TABLE project_collaborators
DROP COLUMN connection_epoch,
ADD COLUMN connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE;
CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id");
CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id");
DELETE FROM room_participants;
ALTER TABLE room_participants
DROP COLUMN answering_connection_epoch,
DROP COLUMN calling_connection_epoch,
ADD COLUMN answering_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE,
ADD COLUMN calling_connection_server_id INTEGER REFERENCES servers (id) ON DELETE SET NULL;
CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id");
CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id");
CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id");

View File

@@ -0,0 +1,3 @@
ALTER TABLE "worktree_entries"
ADD COLUMN "scan_id" INT8,
ADD COLUMN "is_deleted" BOOL;

View File

@@ -0,0 +1,3 @@
ALTER TABLE worktrees
ALTER COLUMN is_complete SET DEFAULT FALSE,
ADD COLUMN completed_scan_id INT8;

View File

@@ -1,6 +1,6 @@
use crate::{
auth,
db::{Invite, NewUserParams, ProjectId, Signup, User, UserId, WaitlistSummary},
db::{Invite, NewSignup, NewUserParams, User, UserId, WaitlistSummary},
rpc::{self, ResultExt},
AppState, Error, Result,
};
@@ -16,9 +16,7 @@ use axum::{
};
use axum_extra::response::ErasedJson;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::{sync::Arc, time::Duration};
use time::OffsetDateTime;
use std::sync::Arc;
use tower::ServiceBuilder;
use tracing::instrument;
@@ -32,16 +30,6 @@ pub fn routes(rpc_server: Arc<rpc::Server>, state: Arc<AppState>) -> Router<Body
.route("/invite_codes/:code", get(get_user_for_invite_code))
.route("/panic", post(trace_panic))
.route("/rpc_server_snapshot", get(get_rpc_server_snapshot))
.route(
"/user_activity/summary",
get(get_top_users_activity_summary),
)
.route(
"/user_activity/timeline/:user_id",
get(get_user_activity_timeline),
)
.route("/user_activity/counts", get(get_active_user_counts))
.route("/project_metadata", get(get_project_metadata))
.route("/signups", post(create_signup))
.route("/signups_summary", get(get_waitlist_summary))
.route("/user_invites", post(create_invite_from_code))
@@ -216,7 +204,7 @@ async fn create_user(
#[derive(Deserialize)]
struct UpdateUserParams {
admin: Option<bool>,
invite_count: Option<u32>,
invite_count: Option<i32>,
}
async fn update_user(
@@ -283,93 +271,6 @@ async fn get_rpc_server_snapshot(
Ok(ErasedJson::pretty(rpc_server.snapshot().await))
}
#[derive(Deserialize)]
struct TimePeriodParams {
#[serde(with = "time::serde::iso8601")]
start: OffsetDateTime,
#[serde(with = "time::serde::iso8601")]
end: OffsetDateTime,
}
async fn get_top_users_activity_summary(
Query(params): Query<TimePeriodParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<ErasedJson> {
let summary = app
.db
.get_top_users_activity_summary(params.start..params.end, 100)
.await?;
Ok(ErasedJson::pretty(summary))
}
async fn get_user_activity_timeline(
Path(user_id): Path<i32>,
Query(params): Query<TimePeriodParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<ErasedJson> {
let summary = app
.db
.get_user_activity_timeline(params.start..params.end, UserId(user_id))
.await?;
Ok(ErasedJson::pretty(summary))
}
#[derive(Deserialize)]
struct ActiveUserCountParams {
#[serde(flatten)]
period: TimePeriodParams,
durations_in_minutes: String,
#[serde(default)]
only_collaborative: bool,
}
#[derive(Serialize)]
struct ActiveUserSet {
active_time_in_minutes: u64,
user_count: usize,
}
async fn get_active_user_counts(
Query(params): Query<ActiveUserCountParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<ErasedJson> {
let durations_in_minutes = params.durations_in_minutes.split(',');
let mut user_sets = Vec::new();
for duration in durations_in_minutes {
let duration = duration
.parse()
.map_err(|_| anyhow!("invalid duration: {duration}"))?;
user_sets.push(ActiveUserSet {
active_time_in_minutes: duration,
user_count: app
.db
.get_active_user_count(
params.period.start..params.period.end,
Duration::from_secs(duration * 60),
params.only_collaborative,
)
.await?,
})
}
Ok(ErasedJson::pretty(user_sets))
}
#[derive(Deserialize)]
struct GetProjectMetadataParams {
project_id: u64,
}
async fn get_project_metadata(
Query(params): Query<GetProjectMetadataParams>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<ErasedJson> {
let extensions = app
.db
.get_project_extensions(ProjectId::from_proto(params.project_id))
.await?;
Ok(ErasedJson::pretty(json!({ "extensions": extensions })))
}
#[derive(Deserialize)]
struct CreateAccessTokenQueryParams {
public_key: String,
@@ -434,10 +335,10 @@ async fn get_user_for_invite_code(
}
async fn create_signup(
Json(params): Json<Signup>,
Json(params): Json<NewSignup>,
Extension(app): Extension<Arc<AppState>>,
) -> Result<()> {
app.db.create_signup(params).await?;
app.db.create_signup(&params).await?;
Ok(())
}
@@ -452,6 +353,8 @@ pub struct CreateInviteFromCodeParams {
invite_code: String,
email_address: String,
device_id: Option<String>,
#[serde(default)]
added_to_mailing_list: bool,
}
async fn create_invite_from_code(
@@ -464,6 +367,7 @@ async fn create_invite_from_code(
&params.invite_code,
&params.email_address,
params.device_id.as_deref(),
params.added_to_mailing_list,
)
.await?,
))

View File

@@ -75,7 +75,7 @@ pub async fn validate_header<B>(mut req: Request<B>, next: Next<B>) -> impl Into
const MAX_ACCESS_TOKENS_TO_STORE: usize = 8;
pub async fn create_access_token(db: &dyn db::Db, user_id: UserId) -> Result<String> {
pub async fn create_access_token(db: &db::Database, user_id: UserId) -> Result<String> {
let access_token = rpc::auth::random_token();
let access_token_hash =
hash_access_token(&access_token).context("failed to hash access token")?;

View File

@@ -1,13 +1,7 @@
use collab::{Error, Result};
use db::{Db, PostgresDb, UserId};
use rand::prelude::*;
use collab::db;
use db::{ConnectOptions, Database};
use serde::{de::DeserializeOwned, Deserialize};
use std::fmt::Write;
use time::{Duration, OffsetDateTime};
#[allow(unused)]
#[path = "../db.rs"]
mod db;
#[derive(Debug, Deserialize)]
struct GitHubUser {
@@ -18,9 +12,8 @@ struct GitHubUser {
#[tokio::main]
async fn main() {
let mut rng = StdRng::from_entropy();
let database_url = std::env::var("DATABASE_URL").expect("missing DATABASE_URL env var");
let db = PostgresDb::new(&database_url, 5)
let db = Database::new(ConnectOptions::new(database_url))
.await
.expect("failed to connect to postgres database");
let github_token = std::env::var("GITHUB_TOKEN").expect("missing GITHUB_TOKEN env var");
@@ -64,16 +57,14 @@ async fn main() {
}
}
let mut zed_user_ids = Vec::<UserId>::new();
for (github_user, admin) in zed_users {
if let Some(user) = db
if db
.get_user_by_github_account(&github_user.login, Some(github_user.id))
.await
.expect("failed to fetch user")
.is_none()
{
zed_user_ids.push(user.id);
} else if let Some(email) = &github_user.email {
zed_user_ids.push(
if let Some(email) = &github_user.email {
db.create_user(
email,
admin,
@@ -84,11 +75,8 @@ async fn main() {
},
)
.await
.expect("failed to insert user")
.user_id,
);
} else if admin {
zed_user_ids.push(
.expect("failed to insert user");
} else if admin {
db.create_user(
&format!("{}@zed.dev", github_user.login),
admin,
@@ -99,62 +87,10 @@ async fn main() {
},
)
.await
.expect("failed to insert user")
.user_id,
);
.expect("failed to insert user");
}
}
}
let zed_org_id = if let Some(org) = db
.find_org_by_slug("zed")
.await
.expect("failed to fetch org")
{
org.id
} else {
db.create_org("Zed", "zed")
.await
.expect("failed to insert org")
};
let general_channel_id = if let Some(channel) = db
.get_org_channels(zed_org_id)
.await
.expect("failed to fetch channels")
.iter()
.find(|c| c.name == "General")
{
channel.id
} else {
let channel_id = db
.create_org_channel(zed_org_id, "General")
.await
.expect("failed to insert channel");
let now = OffsetDateTime::now_utc();
let max_seconds = Duration::days(100).as_seconds_f64();
let mut timestamps = (0..1000)
.map(|_| now - Duration::seconds_f64(rng.gen_range(0_f64..=max_seconds)))
.collect::<Vec<_>>();
timestamps.sort();
for timestamp in timestamps {
let sender_id = *zed_user_ids.choose(&mut rng).unwrap();
let body = lipsum::lipsum_words(rng.gen_range(1..=50));
db.create_channel_message(channel_id, sender_id, &body, timestamp, rng.gen())
.await
.expect("failed to insert message");
}
channel_id
};
for user_id in zed_user_ids {
db.add_org_member(zed_org_id, user_id, true)
.await
.expect("failed to insert org membership");
db.add_channel_member(general_channel_id, user_id, true)
.await
.expect("failed to insert channel membership");
}
}
async fn fetch_github<T: DeserializeOwned>(

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
use super::{AccessTokenId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "access_tokens")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: AccessTokenId,
pub user_id: UserId,
pub hash: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,58 @@
use super::{ContactId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "contacts")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ContactId,
pub user_id_a: UserId,
pub user_id_b: UserId,
pub a_to_b: bool,
pub should_notify: bool,
pub accepted: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::room_participant::Entity",
from = "Column::UserIdA",
to = "super::room_participant::Column::UserId"
)]
UserARoomParticipant,
#[sea_orm(
belongs_to = "super::room_participant::Entity",
from = "Column::UserIdB",
to = "super::room_participant::Column::UserId"
)]
UserBRoomParticipant,
}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Contact {
Accepted {
user_id: UserId,
should_notify: bool,
busy: bool,
},
Outgoing {
user_id: UserId,
},
Incoming {
user_id: UserId,
should_notify: bool,
},
}
impl Contact {
pub fn user_id(&self) -> UserId {
match self {
Contact::Accepted { user_id, .. } => *user_id,
Contact::Outgoing { user_id } => *user_id,
Contact::Incoming { user_id, .. } => *user_id,
}
}
}

View File

@@ -0,0 +1,30 @@
use super::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "language_servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub id: i64,
pub name: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,84 @@
use super::{ProjectId, Result, RoomId, ServerId, UserId};
use anyhow::anyhow;
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "projects")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ProjectId,
pub room_id: RoomId,
pub host_user_id: UserId,
pub host_connection_id: Option<i32>,
pub host_connection_server_id: Option<ServerId>,
}
impl Model {
pub fn host_connection(&self) -> Result<ConnectionId> {
let host_connection_server_id = self
.host_connection_server_id
.ok_or_else(|| anyhow!("empty host_connection_server_id"))?;
let host_connection_id = self
.host_connection_id
.ok_or_else(|| anyhow!("empty host_connection_id"))?;
Ok(ConnectionId {
owner_id: host_connection_server_id.0 as u32,
id: host_connection_id as u32,
})
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::HostUserId",
to = "super::user::Column::Id"
)]
HostUser,
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
#[sea_orm(has_many = "super::worktree::Entity")]
Worktrees,
#[sea_orm(has_many = "super::project_collaborator::Entity")]
Collaborators,
#[sea_orm(has_many = "super::language_server::Entity")]
LanguageServers,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::HostUser.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl Related<super::worktree::Entity> for Entity {
fn to() -> RelationDef {
Relation::Worktrees.def()
}
}
impl Related<super::project_collaborator::Entity> for Entity {
fn to() -> RelationDef {
Relation::Collaborators.def()
}
}
impl Related<super::language_server::Entity> for Entity {
fn to() -> RelationDef {
Relation::LanguageServers.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,43 @@
use super::{ProjectCollaboratorId, ProjectId, ReplicaId, ServerId, UserId};
use rpc::ConnectionId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "project_collaborators")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ProjectCollaboratorId,
pub project_id: ProjectId,
pub connection_id: i32,
pub connection_server_id: ServerId,
pub user_id: UserId,
pub replica_id: ReplicaId,
pub is_host: bool,
}
impl Model {
pub fn connection(&self) -> ConnectionId {
ConnectionId {
owner_id: self.connection_server_id.0 as u32,
id: self.connection_id as u32,
}
}
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,32 @@
use super::RoomId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "rooms")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: RoomId,
pub live_kit_room: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::room_participant::Entity")]
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
Project,
}
impl Related<super::room_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::RoomParticipant.def()
}
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,50 @@
use super::{ProjectId, RoomId, RoomParticipantId, ServerId, UserId};
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "room_participants")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: RoomParticipantId,
pub room_id: RoomId,
pub user_id: UserId,
pub answering_connection_id: Option<i32>,
pub answering_connection_server_id: Option<ServerId>,
pub answering_connection_lost: bool,
pub location_kind: Option<i32>,
pub location_project_id: Option<ProjectId>,
pub initial_project_id: Option<ProjectId>,
pub calling_user_id: UserId,
pub calling_connection_id: i32,
pub calling_connection_server_id: Option<ServerId>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::UserId",
to = "super::user::Column::Id"
)]
User,
#[sea_orm(
belongs_to = "super::room::Entity",
from = "Column::RoomId",
to = "super::room::Column::Id"
)]
Room,
}
impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl Related<super::room::Entity> for Entity {
fn to() -> RelationDef {
Relation::Room.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,15 @@
use super::ServerId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "servers")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: ServerId,
pub environment: String,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,57 @@
use super::{SignupId, UserId};
use sea_orm::{entity::prelude::*, FromQueryResult};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "signups")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: SignupId,
pub email_address: String,
pub email_confirmation_code: String,
pub email_confirmation_sent: bool,
pub created_at: DateTime,
pub device_id: Option<String>,
pub user_id: Option<UserId>,
pub inviting_user_id: Option<UserId>,
pub platform_mac: bool,
pub platform_linux: bool,
pub platform_windows: bool,
pub platform_unknown: bool,
pub editor_features: Option<Vec<String>>,
pub programming_languages: Option<Vec<String>>,
pub added_to_mailing_list: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}
#[derive(Clone, Debug, PartialEq, Eq, FromQueryResult, Serialize, Deserialize)]
pub struct Invite {
pub email_address: String,
pub email_confirmation_code: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct NewSignup {
pub email_address: String,
pub platform_mac: bool,
pub platform_windows: bool,
pub platform_linux: bool,
pub editor_features: Vec<String>,
pub programming_languages: Vec<String>,
pub device_id: Option<String>,
pub added_to_mailing_list: bool,
pub created_at: Option<DateTime>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, FromQueryResult)]
pub struct WaitlistSummary {
pub count: i64,
pub linux_count: i64,
pub mac_count: i64,
pub windows_count: i64,
pub unknown_count: i64,
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
use super::UserId;
use sea_orm::entity::prelude::*;
use serde::Serialize;
#[derive(Clone, Debug, Default, PartialEq, Eq, DeriveEntityModel, Serialize)]
#[sea_orm(table_name = "users")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: UserId,
pub github_login: String,
pub github_user_id: Option<i32>,
pub email_address: Option<String>,
pub admin: bool,
pub invite_code: Option<String>,
pub invite_count: i32,
pub inviter_id: Option<UserId>,
pub connected_once: bool,
pub metrics_id: Uuid,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::access_token::Entity")]
AccessToken,
#[sea_orm(has_one = "super::room_participant::Entity")]
RoomParticipant,
#[sea_orm(has_many = "super::project::Entity")]
HostedProjects,
}
impl Related<super::access_token::Entity> for Entity {
fn to() -> RelationDef {
Relation::AccessToken.def()
}
}
impl Related<super::room_participant::Entity> for Entity {
fn to() -> RelationDef {
Relation::RoomParticipant.def()
}
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::HostedProjects.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,36 @@
use super::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktrees")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: i64,
#[sea_orm(primary_key)]
pub project_id: ProjectId,
pub abs_path: String,
pub root_name: String,
pub visible: bool,
/// The last scan for which we've observed entries. It may be in progress.
pub scan_id: i64,
/// The last scan that fully completed.
pub completed_scan_id: i64,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::project::Entity",
from = "Column::ProjectId",
to = "super::project::Column::Id"
)]
Project,
}
impl Related<super::project::Entity> for Entity {
fn to() -> RelationDef {
Relation::Project.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,21 @@
use super::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_diagnostic_summaries")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub path: String,
pub language_server_id: i64,
pub error_count: i32,
pub warning_count: i32,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@@ -0,0 +1,27 @@
use super::ProjectId;
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)]
#[sea_orm(table_name = "worktree_entries")]
pub struct Model {
#[sea_orm(primary_key)]
pub project_id: ProjectId,
#[sea_orm(primary_key)]
pub worktree_id: i64,
#[sea_orm(primary_key)]
pub id: i64,
pub is_dir: bool,
pub path: String,
pub inode: i64,
pub mtime_seconds: i64,
pub mtime_nanos: i32,
pub is_symlink: bool,
pub is_ignored: bool,
pub is_deleted: bool,
pub scan_id: i64,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
use std::{future::Future, time::Duration};
#[derive(Clone)]
pub enum Executor {
Production,
#[cfg(test)]
Deterministic(std::sync::Arc<gpui::executor::Background>),
}
impl Executor {
pub fn spawn_detached<F>(&self, future: F)
where
F: 'static + Send + Future<Output = ()>,
{
match self {
Executor::Production => {
tokio::spawn(future);
}
#[cfg(test)]
Executor::Deterministic(background) => {
background.spawn(future).detach();
}
}
}
pub fn sleep(&self, duration: Duration) -> impl Future<Output = ()> {
let this = self.clone();
async move {
match this {
Executor::Production => tokio::time::sleep(duration).await,
#[cfg(test)]
Executor::Deterministic(background) => background.timer(duration).await,
}
}
}
pub fn record_backtrace(&self) {
match self {
Executor::Production => {}
#[cfg(test)]
Executor::Deterministic(background) => background.record_backtrace(),
}
}
}

View File

@@ -1,9 +1,23 @@
pub mod api;
pub mod auth;
pub mod db;
pub mod env;
pub mod executor;
pub mod rpc;
#[cfg(test)]
mod tests;
use axum::{http::StatusCode, response::IntoResponse};
use db::Database;
use serde::Deserialize;
use std::{path::PathBuf, sync::Arc};
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub enum Error {
Http(StatusCode, String),
Database(sea_orm::error::DbErr),
Internal(anyhow::Error),
}
@@ -13,9 +27,9 @@ impl From<anyhow::Error> for Error {
}
}
impl From<sqlx::Error> for Error {
fn from(error: sqlx::Error) -> Self {
Self::Internal(error.into())
impl From<sea_orm::error::DbErr> for Error {
fn from(error: sea_orm::error::DbErr) -> Self {
Self::Database(error)
}
}
@@ -41,6 +55,9 @@ impl IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
match self {
Error::Http(code, message) => (code, message).into_response(),
Error::Database(error) => {
(StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response()
}
Error::Internal(error) => {
(StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response()
}
@@ -52,6 +69,7 @@ impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Http(code, message) => (code, message).fmt(f),
Error::Database(error) => error.fmt(f),
Error::Internal(error) => error.fmt(f),
}
}
@@ -61,9 +79,65 @@ impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Http(code, message) => write!(f, "{code}: {message}"),
Error::Database(error) => error.fmt(f),
Error::Internal(error) => error.fmt(f),
}
}
}
impl std::error::Error for Error {}
#[derive(Default, Deserialize)]
pub struct Config {
pub http_port: u16,
pub database_url: String,
pub api_token: String,
pub invite_link_prefix: String,
pub live_kit_server: Option<String>,
pub live_kit_key: Option<String>,
pub live_kit_secret: Option<String>,
pub rust_log: Option<String>,
pub log_json: Option<bool>,
pub zed_environment: String,
}
#[derive(Default, Deserialize)]
pub struct MigrateConfig {
pub database_url: String,
pub migrations_path: Option<PathBuf>,
}
pub struct AppState {
pub db: Arc<Database>,
pub live_kit_client: Option<Arc<dyn live_kit_server::api::Client>>,
pub config: Config,
}
impl AppState {
pub async fn new(config: Config) -> Result<Arc<Self>> {
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(5);
let db = Database::new(db_options).await?;
let live_kit_client = if let Some(((server, key), secret)) = config
.live_kit_server
.as_ref()
.zip(config.live_kit_key.as_ref())
.zip(config.live_kit_secret.as_ref())
{
Some(Arc::new(live_kit_server::api::LiveKitClient::new(
server.clone(),
key.clone(),
secret.clone(),
)) as Arc<dyn live_kit_server::api::Client>)
} else {
None
};
let this = Self {
db: Arc::new(db),
live_kit_client,
config,
};
Ok(Arc::new(this))
}
}

View File

@@ -1,86 +1,19 @@
mod api;
mod auth;
mod db;
mod env;
mod rpc;
#[cfg(test)]
mod db_tests;
#[cfg(test)]
mod integration_tests;
use crate::rpc::ResultExt as _;
use anyhow::anyhow;
use axum::{routing::get, Router};
use collab::{Error, Result};
use db::{Db, PostgresDb};
use serde::Deserialize;
use collab::{db, env, executor::Executor, AppState, Config, MigrateConfig, Result};
use db::Database;
use std::{
env::args,
net::{SocketAddr, TcpListener},
path::PathBuf,
sync::Arc,
time::Duration,
path::Path,
};
use tokio::signal;
use tokio::signal::unix::SignalKind;
use tracing_log::LogTracer;
use tracing_subscriber::{filter::EnvFilter, fmt::format::JsonFields, Layer};
use util::ResultExt;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
#[derive(Default, Deserialize)]
pub struct Config {
pub http_port: u16,
pub database_url: String,
pub api_token: String,
pub invite_link_prefix: String,
pub live_kit_server: Option<String>,
pub live_kit_key: Option<String>,
pub live_kit_secret: Option<String>,
pub rust_log: Option<String>,
pub log_json: Option<bool>,
}
#[derive(Default, Deserialize)]
pub struct MigrateConfig {
pub database_url: String,
pub migrations_path: Option<PathBuf>,
}
pub struct AppState {
db: Arc<dyn Db>,
live_kit_client: Option<Arc<dyn live_kit_server::api::Client>>,
config: Config,
}
impl AppState {
async fn new(config: Config) -> Result<Arc<Self>> {
let db = PostgresDb::new(&config.database_url, 5).await?;
let live_kit_client = if let Some(((server, key), secret)) = config
.live_kit_server
.as_ref()
.zip(config.live_kit_key.as_ref())
.zip(config.live_kit_secret.as_ref())
{
Some(Arc::new(live_kit_server::api::LiveKitClient::new(
server.clone(),
key.clone(),
secret.clone(),
)) as Arc<dyn live_kit_server::api::Client>)
} else {
None
};
let this = Self {
db: Arc::new(db),
live_kit_client,
config,
};
Ok(Arc::new(this))
}
}
#[tokio::main]
async fn main() -> Result<()> {
if let Err(error) = env::load_dotenv() {
@@ -96,13 +29,14 @@ async fn main() -> Result<()> {
}
Some("migrate") => {
let config = envy::from_env::<MigrateConfig>().expect("error loading config");
let db = PostgresDb::new(&config.database_url, 5).await?;
let mut db_options = db::ConnectOptions::new(config.database_url.clone());
db_options.max_connections(5);
let db = Database::new(db_options).await?;
let migrations_path = config
.migrations_path
.as_deref()
.or(db::DEFAULT_MIGRATIONS_PATH.map(|s| s.as_ref()))
.ok_or_else(|| anyhow!("missing MIGRATIONS_PATH environment variable"))?;
.unwrap_or_else(|| Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations")));
let migrations = db.migrate(&migrations_path, false).await?;
for (migration, duration) in migrations {
@@ -119,20 +53,35 @@ async fn main() -> Result<()> {
init_tracing(&config);
let state = AppState::new(config).await?;
let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port))
.expect("failed to bind TCP listener");
let rpc_server = rpc::Server::new(state.clone(), None);
rpc_server
.start_recording_project_activity(Duration::from_secs(5 * 60), rpc::RealExecutor);
let epoch = state
.db
.create_server(&state.config.zed_environment)
.await?;
let rpc_server = collab::rpc::Server::new(epoch, state.clone(), Executor::Production);
rpc_server.start().await?;
let app = api::routes(rpc_server.clone(), state.clone())
.merge(rpc::routes(rpc_server.clone()))
let app = collab::api::routes(rpc_server.clone(), state.clone())
.merge(collab::rpc::routes(rpc_server.clone()))
.merge(Router::new().route("/", get(handle_root)));
axum::Server::from_tcp(listener)?
.serve(app.into_make_service_with_connect_info::<SocketAddr>())
.with_graceful_shutdown(graceful_shutdown(rpc_server, state))
.with_graceful_shutdown(async move {
let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate())
.expect("failed to listen for interrupt signal");
let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())
.expect("failed to listen for interrupt signal");
let sigterm = sigterm.recv();
let sigint = sigint.recv();
futures::pin_mut!(sigterm, sigint);
futures::future::select(sigterm, sigint).await;
tracing::info!("Received interrupt signal");
rpc_server.teardown();
})
.await?;
}
_ => {
@@ -177,52 +126,3 @@ pub fn init_tracing(config: &Config) -> Option<()> {
None
}
async fn graceful_shutdown(rpc_server: Arc<rpc::Server>, state: Arc<AppState>) {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
}
if let Some(live_kit) = state.live_kit_client.as_ref() {
let deletions = rpc_server
.store()
.await
.rooms()
.values()
.map(|room| {
let name = room.live_kit_room.clone();
async {
live_kit.delete_room(name).await.trace_err();
}
})
.collect::<Vec<_>>();
tracing::info!("deleting all live-kit rooms");
if let Err(_) = tokio::time::timeout(
Duration::from_secs(10),
futures::future::join_all(deletions),
)
.await
{
tracing::error!("timed out waiting for live-kit room deletion");
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,98 @@
use crate::db::UserId;
use anyhow::{anyhow, Result};
use collections::{BTreeMap, HashSet};
use rpc::ConnectionId;
use serde::Serialize;
use tracing::instrument;
#[derive(Default, Serialize)]
pub struct ConnectionPool {
connections: BTreeMap<ConnectionId, Connection>,
connected_users: BTreeMap<UserId, ConnectedUser>,
}
#[derive(Default, Serialize)]
struct ConnectedUser {
connection_ids: HashSet<ConnectionId>,
}
#[derive(Serialize)]
pub struct Connection {
pub user_id: UserId,
pub admin: bool,
}
impl ConnectionPool {
pub fn reset(&mut self) {
self.connections.clear();
self.connected_users.clear();
}
#[instrument(skip(self))]
pub fn add_connection(&mut self, connection_id: ConnectionId, user_id: UserId, admin: bool) {
self.connections
.insert(connection_id, Connection { user_id, admin });
let connected_user = self.connected_users.entry(user_id).or_default();
connected_user.connection_ids.insert(connection_id);
}
#[instrument(skip(self))]
pub fn remove_connection(&mut self, connection_id: ConnectionId) -> Result<()> {
let connection = self
.connections
.get_mut(&connection_id)
.ok_or_else(|| anyhow!("no such connection"))?;
let user_id = connection.user_id;
let connected_user = self.connected_users.get_mut(&user_id).unwrap();
connected_user.connection_ids.remove(&connection_id);
if connected_user.connection_ids.is_empty() {
self.connected_users.remove(&user_id);
}
self.connections.remove(&connection_id).unwrap();
Ok(())
}
pub fn connections(&self) -> impl Iterator<Item = &Connection> {
self.connections.values()
}
pub fn user_connection_ids(&self, user_id: UserId) -> impl Iterator<Item = ConnectionId> + '_ {
self.connected_users
.get(&user_id)
.into_iter()
.map(|state| &state.connection_ids)
.flatten()
.copied()
}
pub fn is_user_online(&self, user_id: UserId) -> bool {
!self
.connected_users
.get(&user_id)
.unwrap_or(&Default::default())
.connection_ids
.is_empty()
}
#[cfg(test)]
pub fn check_invariants(&self) {
for (connection_id, connection) in &self.connections {
assert!(self
.connected_users
.get(&connection.user_id)
.unwrap()
.connection_ids
.contains(connection_id));
}
for (user_id, state) in &self.connected_users {
for connection_id in &state.connection_ids {
assert_eq!(
self.connections.get(connection_id).unwrap().user_id,
*user_id
);
}
}
}
}

File diff suppressed because it is too large Load Diff

462
crates/collab/src/tests.rs Normal file
View File

@@ -0,0 +1,462 @@
use crate::{
db::{NewUserParams, TestDb, UserId},
executor::Executor,
rpc::{Server, CLEANUP_TIMEOUT},
AppState,
};
use anyhow::anyhow;
use call::ActiveCall;
use client::{
self, proto::PeerId, test::FakeHttpClient, Client, Connection, Credentials,
EstablishConnectionError, UserStore,
};
use collections::{HashMap, HashSet};
use fs::FakeFs;
use futures::{channel::oneshot, StreamExt as _};
use gpui::{
executor::Deterministic, test::EmptyView, ModelHandle, Task, TestAppContext, ViewHandle,
};
use language::LanguageRegistry;
use parking_lot::Mutex;
use project::{Project, WorktreeId};
use settings::Settings;
use std::{
env,
ops::Deref,
path::{Path, PathBuf},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
Arc,
},
};
use theme::ThemeRegistry;
use workspace::Workspace;
mod integration_tests;
mod randomized_integration_tests;
struct TestServer {
app_state: Arc<AppState>,
server: Arc<Server>,
connection_killers: Arc<Mutex<HashMap<PeerId, Arc<AtomicBool>>>>,
forbid_connections: Arc<AtomicBool>,
_test_db: TestDb,
test_live_kit_server: Arc<live_kit_client::TestServer>,
}
impl TestServer {
async fn start(deterministic: &Arc<Deterministic>) -> Self {
static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0);
let use_postgres = env::var("USE_POSTGRES").ok();
let use_postgres = use_postgres.as_deref();
let test_db = if use_postgres == Some("true") || use_postgres == Some("1") {
TestDb::postgres(deterministic.build_background())
} else {
TestDb::sqlite(deterministic.build_background())
};
let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst);
let live_kit_server = live_kit_client::TestServer::create(
format!("http://livekit.{}.test", live_kit_server_id),
format!("devkey-{}", live_kit_server_id),
format!("secret-{}", live_kit_server_id),
deterministic.build_background(),
)
.unwrap();
let app_state = Self::build_app_state(&test_db, &live_kit_server).await;
let epoch = app_state
.db
.create_server(&app_state.config.zed_environment)
.await
.unwrap();
let server = Server::new(
epoch,
app_state.clone(),
Executor::Deterministic(deterministic.build_background()),
);
server.start().await.unwrap();
// Advance clock to ensure the server's cleanup task is finished.
deterministic.advance_clock(CLEANUP_TIMEOUT);
Self {
app_state,
server,
connection_killers: Default::default(),
forbid_connections: Default::default(),
_test_db: test_db,
test_live_kit_server: live_kit_server,
}
}
async fn reset(&self) {
self.app_state.db.reset();
let epoch = self
.app_state
.db
.create_server(&self.app_state.config.zed_environment)
.await
.unwrap();
self.server.reset(epoch);
}
async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient {
cx.update(|cx| {
cx.set_global(Settings::test(cx));
});
let http = FakeHttpClient::with_404_response();
let user_id = if let Ok(Some(user)) = self
.app_state
.db
.get_user_by_github_account(name, None)
.await
{
user.id
} else {
self.app_state
.db
.create_user(
&format!("{name}@example.com"),
false,
NewUserParams {
github_login: name.into(),
github_user_id: 0,
invite_count: 0,
},
)
.await
.expect("creating user failed")
.user_id
};
let client_name = name.to_string();
let mut client = cx.read(|cx| Client::new(http.clone(), cx));
let server = self.server.clone();
let db = self.app_state.db.clone();
let connection_killers = self.connection_killers.clone();
let forbid_connections = self.forbid_connections.clone();
Arc::get_mut(&mut client)
.unwrap()
.set_id(user_id.0 as usize)
.override_authenticate(move |cx| {
cx.spawn(|_| async move {
let access_token = "the-token".to_string();
Ok(Credentials {
user_id: user_id.0 as u64,
access_token,
})
})
})
.override_establish_connection(move |credentials, cx| {
assert_eq!(credentials.user_id, user_id.0 as u64);
assert_eq!(credentials.access_token, "the-token");
let server = server.clone();
let db = db.clone();
let connection_killers = connection_killers.clone();
let forbid_connections = forbid_connections.clone();
let client_name = client_name.clone();
cx.spawn(move |cx| async move {
if forbid_connections.load(SeqCst) {
Err(EstablishConnectionError::other(anyhow!(
"server is forbidding connections"
)))
} else {
let (client_conn, server_conn, killed) =
Connection::in_memory(cx.background());
let (connection_id_tx, connection_id_rx) = oneshot::channel();
let user = db
.get_user_by_id(user_id)
.await
.expect("retrieving user failed")
.unwrap();
cx.background()
.spawn(server.handle_connection(
server_conn,
client_name,
user,
Some(connection_id_tx),
Executor::Deterministic(cx.background()),
))
.detach();
let connection_id = connection_id_rx.await.unwrap();
connection_killers
.lock()
.insert(connection_id.into(), killed);
Ok(client_conn)
}
})
});
let fs = FakeFs::new(cx.background());
let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
let app_state = Arc::new(workspace::AppState {
client: client.clone(),
user_store: user_store.clone(),
languages: Arc::new(LanguageRegistry::new(Task::ready(()))),
themes: ThemeRegistry::new((), cx.font_cache()),
fs: fs.clone(),
build_window_options: |_, _, _| Default::default(),
initialize_workspace: |_, _, _| unimplemented!(),
dock_default_item_factory: |_, _| unimplemented!(),
});
Project::init(&client);
cx.update(|cx| {
workspace::init(app_state.clone(), cx);
call::init(client.clone(), user_store.clone(), cx);
});
client
.authenticate_and_connect(false, &cx.to_async())
.await
.unwrap();
let client = TestClient {
client,
username: name.to_string(),
local_projects: Default::default(),
remote_projects: Default::default(),
next_root_dir_id: 0,
user_store,
fs,
language_registry: Arc::new(LanguageRegistry::test()),
buffers: Default::default(),
};
client.wait_for_current_user(cx).await;
client
}
fn disconnect_client(&self, peer_id: PeerId) {
self.connection_killers
.lock()
.remove(&peer_id)
.unwrap()
.store(true, SeqCst);
}
fn forbid_connections(&self) {
self.forbid_connections.store(true, SeqCst);
}
fn allow_connections(&self) {
self.forbid_connections.store(false, SeqCst);
}
async fn make_contacts(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
for ix in 1..clients.len() {
let (left, right) = clients.split_at_mut(ix);
let (client_a, cx_a) = left.last_mut().unwrap();
for (client_b, cx_b) in right {
client_a
.user_store
.update(*cx_a, |store, cx| {
store.request_contact(client_b.user_id().unwrap(), cx)
})
.await
.unwrap();
cx_a.foreground().run_until_parked();
client_b
.user_store
.update(*cx_b, |store, cx| {
store.respond_to_contact_request(client_a.user_id().unwrap(), true, cx)
})
.await
.unwrap();
}
}
}
async fn create_room(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
self.make_contacts(clients).await;
let (left, right) = clients.split_at_mut(1);
let (_client_a, cx_a) = &mut left[0];
let active_call_a = cx_a.read(ActiveCall::global);
for (client_b, cx_b) in right {
let user_id_b = client_b.current_user_id(*cx_b).to_proto();
active_call_a
.update(*cx_a, |call, cx| call.invite(user_id_b, None, cx))
.await
.unwrap();
cx_b.foreground().run_until_parked();
let active_call_b = cx_b.read(ActiveCall::global);
active_call_b
.update(*cx_b, |call, cx| call.accept_incoming(cx))
.await
.unwrap();
}
}
async fn build_app_state(
test_db: &TestDb,
fake_server: &live_kit_client::TestServer,
) -> Arc<AppState> {
Arc::new(AppState {
db: test_db.db().clone(),
live_kit_client: Some(Arc::new(fake_server.create_api_client())),
config: Default::default(),
})
}
}
impl Deref for TestServer {
type Target = Server;
fn deref(&self) -> &Self::Target {
&self.server
}
}
impl Drop for TestServer {
fn drop(&mut self) {
self.server.teardown();
self.test_live_kit_server.teardown().unwrap();
}
}
struct TestClient {
client: Arc<Client>,
username: String,
local_projects: Vec<ModelHandle<Project>>,
remote_projects: Vec<ModelHandle<Project>>,
next_root_dir_id: usize,
pub user_store: ModelHandle<UserStore>,
language_registry: Arc<LanguageRegistry>,
fs: Arc<FakeFs>,
buffers: HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>,
}
impl Deref for TestClient {
type Target = Arc<Client>;
fn deref(&self) -> &Self::Target {
&self.client
}
}
struct ContactsSummary {
pub current: Vec<String>,
pub outgoing_requests: Vec<String>,
pub incoming_requests: Vec<String>,
}
impl TestClient {
pub fn current_user_id(&self, cx: &TestAppContext) -> UserId {
UserId::from_proto(
self.user_store
.read_with(cx, |user_store, _| user_store.current_user().unwrap().id),
)
}
async fn wait_for_current_user(&self, cx: &TestAppContext) {
let mut authed_user = self
.user_store
.read_with(cx, |user_store, _| user_store.watch_current_user());
while authed_user.next().await.unwrap().is_none() {}
}
async fn clear_contacts(&self, cx: &mut TestAppContext) {
self.user_store
.update(cx, |store, _| store.clear_contacts())
.await;
}
fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary {
self.user_store.read_with(cx, |store, _| ContactsSummary {
current: store
.contacts()
.iter()
.map(|contact| contact.user.github_login.clone())
.collect(),
outgoing_requests: store
.outgoing_contact_requests()
.iter()
.map(|user| user.github_login.clone())
.collect(),
incoming_requests: store
.incoming_contact_requests()
.iter()
.map(|user| user.github_login.clone())
.collect(),
})
}
async fn build_local_project(
&self,
root_path: impl AsRef<Path>,
cx: &mut TestAppContext,
) -> (ModelHandle<Project>, WorktreeId) {
let project = cx.update(|cx| {
Project::local(
self.client.clone(),
self.user_store.clone(),
self.language_registry.clone(),
self.fs.clone(),
cx,
)
});
let (worktree, _) = project
.update(cx, |p, cx| {
p.find_or_create_local_worktree(root_path, true, cx)
})
.await
.unwrap();
worktree
.read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
(project, worktree.read_with(cx, |tree, _| tree.id()))
}
async fn build_remote_project(
&self,
host_project_id: u64,
guest_cx: &mut TestAppContext,
) -> ModelHandle<Project> {
let active_call = guest_cx.read(ActiveCall::global);
let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone());
room.update(guest_cx, |room, cx| {
room.join_project(
host_project_id,
self.language_registry.clone(),
self.fs.clone(),
cx,
)
})
.await
.unwrap()
}
fn build_workspace(
&self,
project: &ModelHandle<Project>,
cx: &mut TestAppContext,
) -> ViewHandle<Workspace> {
let (_, root_view) = cx.add_window(|_| EmptyView);
cx.add_view(&root_view, |cx| {
Workspace::new(
Default::default(),
0,
project.clone(),
|_, _| unimplemented!(),
cx,
)
})
}
fn create_new_root_dir(&mut self) -> PathBuf {
format!(
"/{}-root-{}",
self.username,
util::post_inc(&mut self.next_root_dir_id)
)
.into()
}
}
impl Drop for TestClient {
fn drop(&mut self) {
self.client.teardown();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@
name = "collab_ui"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/collab_ui.rs"

View File

@@ -1,6 +1,6 @@
use crate::{contact_notification::ContactNotification, contacts_popover};
use call::{ActiveCall, ParticipantLocation};
use client::{Authenticate, ContactEventKind, PeerId, User, UserStore};
use client::{proto::PeerId, Authenticate, ContactEventKind, User, UserStore};
use clock::ReplicaId;
use contacts_popover::ContactsPopover;
use gpui::{
@@ -342,24 +342,27 @@ impl CollabTitlebarItem {
let mut participants = room
.read(cx)
.remote_participants()
.iter()
.map(|(peer_id, collaborator)| (*peer_id, collaborator.clone()))
.values()
.cloned()
.collect::<Vec<_>>();
participants
.sort_by_key(|(peer_id, _)| Some(project.collaborators().get(peer_id)?.replica_id));
participants.sort_by_key(|p| Some(project.collaborators().get(&p.peer_id)?.replica_id));
participants
.into_iter()
.filter_map(|(peer_id, participant)| {
.filter_map(|participant| {
let project = workspace.read(cx).project().read(cx);
let replica_id = project
.collaborators()
.get(&peer_id)
.get(&participant.peer_id)
.map(|collaborator| collaborator.replica_id);
let user = participant.user.clone();
Some(self.render_avatar(
&user,
replica_id,
Some((peer_id, &user.github_login, participant.location)),
Some((
participant.peer_id,
&user.github_login,
participant.location,
)),
workspace,
theme,
cx,
@@ -474,7 +477,7 @@ impl CollabTitlebarItem {
cx.dispatch_action(ToggleFollow(peer_id))
})
.with_tooltip::<ToggleFollow, _>(
peer_id.0 as usize,
peer_id.as_u64() as usize,
if is_followed {
format!("Unfollow {}", peer_github_login)
} else {
@@ -487,22 +490,24 @@ impl CollabTitlebarItem {
.boxed()
} else if let ParticipantLocation::SharedProject { project_id } = location {
let user_id = user.id;
MouseEventHandler::<JoinProject>::new(peer_id.0 as usize, cx, move |_, _| content)
.with_cursor_style(CursorStyle::PointingHand)
.on_click(MouseButton::Left, move |_, cx| {
cx.dispatch_action(JoinProject {
project_id,
follow_user_id: user_id,
})
MouseEventHandler::<JoinProject>::new(peer_id.as_u64() as usize, cx, move |_, _| {
content
})
.with_cursor_style(CursorStyle::PointingHand)
.on_click(MouseButton::Left, move |_, cx| {
cx.dispatch_action(JoinProject {
project_id,
follow_user_id: user_id,
})
.with_tooltip::<JoinProject, _>(
peer_id.0 as usize,
format!("Follow {} into external project", peer_github_login),
Some(Box::new(FollowNextCollaborator)),
theme.tooltip.clone(),
cx,
)
.boxed()
})
.with_tooltip::<JoinProject, _>(
peer_id.as_u64() as usize,
format!("Follow {} into external project", peer_github_login),
Some(Box::new(FollowNextCollaborator)),
theme.tooltip.clone(),
cx,
)
.boxed()
} else {
content
}

View File

@@ -7,10 +7,10 @@ mod incoming_call_notification;
mod notifications;
mod project_shared_notification;
use anyhow::anyhow;
use call::ActiveCall;
pub use collab_titlebar_item::{CollabTitlebarItem, ToggleCollaborationMenu};
use gpui::MutableAppContext;
use project::Project;
use std::sync::Arc;
use workspace::{AppState, JoinProject, ToggleFollow, Workspace};
@@ -39,22 +39,35 @@ pub fn init(app_state: Arc<AppState>, cx: &mut MutableAppContext) {
let workspace = if let Some(existing_workspace) = existing_workspace {
existing_workspace
} else {
let project = Project::remote(
project_id,
app_state.client.clone(),
app_state.user_store.clone(),
app_state.project_store.clone(),
app_state.languages.clone(),
app_state.fs.clone(),
cx.clone(),
)
.await?;
let active_call = cx.read(ActiveCall::global);
let room = active_call
.read_with(&cx, |call, _| call.room().cloned())
.ok_or_else(|| anyhow!("not in a call"))?;
let project = room
.update(&mut cx, |room, cx| {
room.join_project(
project_id,
app_state.languages.clone(),
app_state.fs.clone(),
cx,
)
})
.await?;
let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| {
let mut workspace = Workspace::new(project, app_state.default_item_factory, cx);
(app_state.initialize_workspace)(&mut workspace, &app_state, cx);
workspace
});
let (_, workspace) = cx.add_window(
(app_state.build_window_options)(None, None, cx.platform().as_ref()),
|cx| {
let mut workspace = Workspace::new(
Default::default(),
0,
project,
app_state.dock_default_item_factory,
cx,
);
(app_state.initialize_workspace)(&mut workspace, &app_state, cx);
workspace
},
);
workspace
};
@@ -68,7 +81,7 @@ pub fn init(app_state: Arc<AppState>, cx: &mut MutableAppContext) {
.remote_participants()
.iter()
.find(|(_, participant)| participant.user.id == follow_user_id)
.map(|(peer_id, _)| *peer_id)
.map(|(_, p)| p.peer_id)
.or_else(|| {
// If we couldn't follow the given user, follow the host instead.
let collaborator = workspace

View File

@@ -170,8 +170,8 @@ impl ContactFinder {
let this = cx.weak_handle();
Self {
picker: cx.add_view(|cx| {
Picker::new(this, cx)
.with_theme(|cx| &cx.global::<Settings>().theme.contact_finder.picker)
Picker::new("Search collaborator by username...", this, cx)
.with_theme(|theme| theme.contact_finder.picker.clone())
}),
potential_contacts: Arc::from([]),
user_store,

View File

@@ -1,20 +1,22 @@
use std::{mem, sync::Arc};
use crate::contacts_popover;
use call::ActiveCall;
use client::{Contact, PeerId, User, UserStore};
use client::{proto::PeerId, Contact, User, UserStore};
use editor::{Cancel, Editor};
use futures::StreamExt;
use fuzzy::{match_strings, StringMatchCandidate};
use gpui::{
elements::*,
geometry::{rect::RectF, vector::vec2f},
impl_actions, impl_internal_actions, keymap, AppContext, CursorStyle, Entity, ModelHandle,
MouseButton, MutableAppContext, RenderContext, Subscription, View, ViewContext, ViewHandle,
impl_actions, impl_internal_actions,
keymap_matcher::KeymapContext,
AppContext, CursorStyle, Entity, ModelHandle, MouseButton, MutableAppContext, PromptLevel,
RenderContext, Subscription, View, ViewContext, ViewHandle,
};
use menu::{Confirm, SelectNext, SelectPrev};
use project::Project;
use serde::Deserialize;
use settings::Settings;
use std::{mem, sync::Arc};
use theme::IconButton;
use util::ResultExt;
use workspace::{JoinProject, OpenSharedScreen};
@@ -175,7 +177,9 @@ impl ContactList {
) -> Self {
let filter_editor = cx.add_view(|cx| {
let mut editor = Editor::single_line(
Some(|theme| theme.contact_list.user_query_editor.clone()),
Some(Arc::new(|theme| {
theme.contact_list.user_query_editor.clone()
})),
cx,
);
editor.set_placeholder_text("Filter contacts", cx);
@@ -295,9 +299,19 @@ impl ContactList {
}
fn remove_contact(&mut self, request: &RemoveContact, cx: &mut ViewContext<Self>) {
self.user_store
.update(cx, |store, cx| store.remove_contact(request.0, cx))
.detach();
let user_id = request.0;
let user_store = self.user_store.clone();
let prompt_message = "Are you sure you want to remove this contact?";
let mut answer = cx.prompt(PromptLevel::Warning, prompt_message, &["Remove", "Cancel"]);
cx.spawn(|_, mut cx| async move {
if answer.next().await == Some(0) {
user_store
.update(&mut cx, |store, cx| store.remove_contact(user_id, cx))
.await
.unwrap();
}
})
.detach();
}
fn respond_to_contact_request(
@@ -459,15 +473,13 @@ impl ContactList {
// Populate remote participants.
self.match_candidates.clear();
self.match_candidates
.extend(
room.remote_participants()
.iter()
.map(|(peer_id, participant)| StringMatchCandidate {
id: peer_id.0 as usize,
string: participant.user.github_login.clone(),
char_bag: participant.user.github_login.chars().collect(),
}),
);
.extend(room.remote_participants().iter().map(|(_, participant)| {
StringMatchCandidate {
id: participant.user.id as usize,
string: participant.user.github_login.clone(),
char_bag: participant.user.github_login.chars().collect(),
}
}));
let matches = executor.block(match_strings(
&self.match_candidates,
&query,
@@ -477,8 +489,8 @@ impl ContactList {
executor.clone(),
));
for mat in matches {
let peer_id = PeerId(mat.candidate_id as u32);
let participant = &room.remote_participants()[&peer_id];
let user_id = mat.candidate_id as u64;
let participant = &room.remote_participants()[&user_id];
participant_entries.push(ContactEntry::CallParticipant {
user: participant.user.clone(),
is_pending: false,
@@ -494,7 +506,7 @@ impl ContactList {
}
if !participant.tracks.is_empty() {
participant_entries.push(ContactEntry::ParticipantScreen {
peer_id,
peer_id: participant.peer_id,
is_last: true,
});
}
@@ -879,75 +891,80 @@ impl ContactList {
let baseline_offset =
row.name.text.baseline_offset(font_cache) + (theme.row_height - line_height) / 2.;
MouseEventHandler::<OpenSharedScreen>::new(peer_id.0 as usize, cx, |mouse_state, _| {
let tree_branch = *tree_branch.style_for(mouse_state, is_selected);
let row = theme.project_row.style_for(mouse_state, is_selected);
MouseEventHandler::<OpenSharedScreen>::new(
peer_id.as_u64() as usize,
cx,
|mouse_state, _| {
let tree_branch = *tree_branch.style_for(mouse_state, is_selected);
let row = theme.project_row.style_for(mouse_state, is_selected);
Flex::row()
.with_child(
Stack::new()
.with_child(
Canvas::new(move |bounds, _, cx| {
let start_x = bounds.min_x() + (bounds.width() / 2.)
- (tree_branch.width / 2.);
let end_x = bounds.max_x();
let start_y = bounds.min_y();
let end_y = bounds.min_y() + baseline_offset - (cap_height / 2.);
Flex::row()
.with_child(
Stack::new()
.with_child(
Canvas::new(move |bounds, _, cx| {
let start_x = bounds.min_x() + (bounds.width() / 2.)
- (tree_branch.width / 2.);
let end_x = bounds.max_x();
let start_y = bounds.min_y();
let end_y =
bounds.min_y() + baseline_offset - (cap_height / 2.);
cx.scene.push_quad(gpui::Quad {
bounds: RectF::from_points(
vec2f(start_x, start_y),
vec2f(
start_x + tree_branch.width,
if is_last { end_y } else { bounds.max_y() },
cx.scene.push_quad(gpui::Quad {
bounds: RectF::from_points(
vec2f(start_x, start_y),
vec2f(
start_x + tree_branch.width,
if is_last { end_y } else { bounds.max_y() },
),
),
),
background: Some(tree_branch.color),
border: gpui::Border::default(),
corner_radius: 0.,
});
cx.scene.push_quad(gpui::Quad {
bounds: RectF::from_points(
vec2f(start_x, end_y),
vec2f(end_x, end_y + tree_branch.width),
),
background: Some(tree_branch.color),
border: gpui::Border::default(),
corner_radius: 0.,
});
})
background: Some(tree_branch.color),
border: gpui::Border::default(),
corner_radius: 0.,
});
cx.scene.push_quad(gpui::Quad {
bounds: RectF::from_points(
vec2f(start_x, end_y),
vec2f(end_x, end_y + tree_branch.width),
),
background: Some(tree_branch.color),
border: gpui::Border::default(),
corner_radius: 0.,
});
})
.boxed(),
)
.constrained()
.with_width(host_avatar_height)
.boxed(),
)
.constrained()
.with_width(host_avatar_height)
.boxed(),
)
.with_child(
Svg::new("icons/disable_screen_sharing_12.svg")
.with_color(row.icon.color)
.constrained()
.with_width(row.icon.width)
.aligned()
.left()
.contained()
.with_style(row.icon.container)
.boxed(),
)
.with_child(
Label::new("Screen".into(), row.name.text.clone())
.aligned()
.left()
.contained()
.with_style(row.name.container)
.flex(1., false)
.boxed(),
)
.constrained()
.with_height(theme.row_height)
.contained()
.with_style(row.container)
.boxed()
})
)
.with_child(
Svg::new("icons/disable_screen_sharing_12.svg")
.with_color(row.icon.color)
.constrained()
.with_width(row.icon.width)
.aligned()
.left()
.contained()
.with_style(row.icon.container)
.boxed(),
)
.with_child(
Label::new("Screen".into(), row.name.text.clone())
.aligned()
.left()
.contained()
.with_style(row.name.container)
.flex(1., false)
.boxed(),
)
.constrained()
.with_height(theme.row_height)
.contained()
.with_style(row.container)
.boxed()
},
)
.with_cursor_style(CursorStyle::PointingHand)
.on_click(MouseButton::Left, move |_, cx| {
cx.dispatch_action(OpenSharedScreen { peer_id });
@@ -1044,7 +1061,7 @@ impl ContactList {
let user_id = contact.user.id;
let initial_project = project.clone();
let mut element =
MouseEventHandler::<Contact>::new(contact.user.id as usize, cx, |_, _| {
MouseEventHandler::<Contact>::new(contact.user.id as usize, cx, |_, cx| {
Flex::row()
.with_children(contact.user.avatar.clone().map(|avatar| {
let status_badge = if contact.online {
@@ -1086,6 +1103,27 @@ impl ContactList {
.flex(1., true)
.boxed(),
)
.with_child(
MouseEventHandler::<Cancel>::new(
contact.user.id as usize,
cx,
|mouse_state, _| {
let button_style =
theme.contact_button.style_for(mouse_state, false);
render_icon_button(button_style, "icons/x_mark_8.svg")
.aligned()
.flex_float()
.boxed()
},
)
.with_padding(Padding::uniform(2.))
.with_cursor_style(CursorStyle::PointingHand)
.on_click(MouseButton::Left, move |_, cx| {
cx.dispatch_action(RemoveContact(user_id))
})
.flex_float()
.boxed(),
)
.with_children(if calling {
Some(
Label::new("Calling".to_string(), theme.calling_indicator.text.clone())
@@ -1262,7 +1300,7 @@ impl View for ContactList {
"ContactList"
}
fn keymap_context(&self, _: &AppContext) -> keymap::Context {
fn keymap_context(&self, _: &AppContext) -> KeymapContext {
let mut cx = Self::default_keymap_context();
cx.set.insert("menu".into());
cx

View File

@@ -6,7 +6,7 @@ use gpui::{
elements::*, impl_internal_actions, Entity, ModelHandle, MutableAppContext, RenderContext,
View, ViewContext,
};
use workspace::Notification;
use workspace::notifications::Notification;
impl_internal_actions!(contact_notifications, [Dismiss, RespondToContactRequest]);
@@ -48,7 +48,7 @@ impl View for ContactNotification {
ContactEventKind::Requested => render_user_notification(
self.user.clone(),
"wants to add you as a contact",
Some("They won't know if you decline."),
Some("They won't be alerted if you decline."),
Dismiss(self.user.id),
vec![
(

View File

@@ -32,11 +32,12 @@ pub fn init(cx: &mut MutableAppContext) {
});
for screen in cx.platform().screens() {
let screen_size = screen.size();
let screen_bounds = screen.bounds();
let (window_id, _) = cx.add_window(
WindowOptions {
bounds: WindowBounds::Fixed(RectF::new(
vec2f(screen_size.x() - window_size.x() - PADDING, PADDING),
screen_bounds.upper_right()
- vec2f(PADDING + window_size.x(), PADDING),
window_size,
)),
titlebar: None,
@@ -48,6 +49,7 @@ pub fn init(cx: &mut MutableAppContext) {
},
|_| IncomingCallNotification::new(incoming_call.clone()),
);
notification_windows.push(window_id);
}
}
@@ -74,7 +76,7 @@ impl IncomingCallNotification {
let active_call = ActiveCall::global(cx);
if action.accept {
let join = active_call.update(cx, |active_call, cx| active_call.accept_incoming(cx));
let caller_user_id = self.call.caller.id;
let caller_user_id = self.call.calling_user.id;
let initial_project_id = self.call.initial_project.as_ref().map(|project| project.id);
cx.spawn_weak(|_, mut cx| async move {
join.await?;
@@ -105,7 +107,7 @@ impl IncomingCallNotification {
.as_ref()
.unwrap_or(&default_project);
Flex::row()
.with_children(self.call.caller.avatar.clone().map(|avatar| {
.with_children(self.call.calling_user.avatar.clone().map(|avatar| {
Image::new(avatar)
.with_style(theme.caller_avatar)
.aligned()
@@ -115,7 +117,7 @@ impl IncomingCallNotification {
Flex::column()
.with_child(
Label::new(
self.call.caller.github_login.clone(),
self.call.calling_user.github_login.clone(),
theme.caller_username.text.clone(),
)
.contained()
@@ -225,6 +227,7 @@ impl View for IncomingCallNotification {
.theme
.incoming_call_notification
.background;
Flex::row()
.with_child(self.render_caller(cx))
.with_child(self.render_buttons(cx))

View File

@@ -31,11 +31,11 @@ pub fn init(cx: &mut MutableAppContext) {
let window_size = vec2f(theme.window_width, theme.window_height);
for screen in cx.platform().screens() {
let screen_size = screen.size();
let screen_bounds = screen.bounds();
let (window_id, _) = cx.add_window(
WindowOptions {
bounds: WindowBounds::Fixed(RectF::new(
vec2f(screen_size.x() - window_size.x() - PADDING, PADDING),
screen_bounds.upper_right() - vec2f(PADDING + window_size.x(), PADDING),
window_size,
)),
titlebar: None,

View File

@@ -2,6 +2,7 @@
name = "collections"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/collections.rs"

View File

@@ -2,6 +2,7 @@
name = "command_palette"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/command_palette.rs"

View File

@@ -3,7 +3,7 @@ use fuzzy::{StringMatch, StringMatchCandidate};
use gpui::{
actions,
elements::{ChildView, Flex, Label, ParentElement},
keymap::Keystroke,
keymap_matcher::Keystroke,
Action, AnyViewHandle, Element, Entity, MouseState, MutableAppContext, RenderContext, View,
ViewContext, ViewHandle,
};
@@ -64,13 +64,15 @@ impl CommandPalette {
name: humanize_action_name(name),
action,
keystrokes: bindings
.iter()
.filter_map(|binding| binding.keystrokes())
.last()
.map_or(Vec::new(), |binding| binding.keystrokes().to_vec()),
.map_or(Vec::new(), |keystrokes| keystrokes.to_vec()),
})
})
.collect();
let picker = cx.add_view(|cx| Picker::new(this, cx));
let picker = cx.add_view(|cx| Picker::new("Execute a command...", this, cx));
Self {
picker,
actions,
@@ -350,8 +352,9 @@ mod tests {
});
let project = Project::test(app_state.fs.clone(), [], cx).await;
let (_, workspace) =
cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx));
let (_, workspace) = cx.add_window(|cx| {
Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx)
});
let editor = cx.add_view(&workspace, |cx| {
let mut editor = Editor::single_line(None, cx);
editor.set_text("abc", cx);

View File

@@ -2,6 +2,7 @@
name = "context_menu"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/context_menu.rs"

View File

@@ -1,7 +1,7 @@
use gpui::{
elements::*, geometry::vector::Vector2F, impl_internal_actions, keymap, platform::CursorStyle,
Action, AnyViewHandle, AppContext, Axis, Entity, MouseButton, MutableAppContext, RenderContext,
SizeConstraint, Subscription, View, ViewContext,
elements::*, geometry::vector::Vector2F, impl_internal_actions, keymap_matcher::KeymapContext,
platform::CursorStyle, Action, AnyViewHandle, AppContext, Axis, Entity, MouseButton,
MutableAppContext, RenderContext, SizeConstraint, Subscription, View, ViewContext,
};
use menu::*;
use settings::Settings;
@@ -63,6 +63,7 @@ pub struct ContextMenu {
visible: bool,
previously_focused_view_id: Option<usize>,
clicked: bool,
parent_view_id: usize,
_actions_observation: Subscription,
}
@@ -75,7 +76,7 @@ impl View for ContextMenu {
"ContextMenu"
}
fn keymap_context(&self, _: &AppContext) -> keymap::Context {
fn keymap_context(&self, _: &AppContext) -> KeymapContext {
let mut cx = Self::default_keymap_context();
cx.set.insert("menu".into());
cx
@@ -114,6 +115,8 @@ impl View for ContextMenu {
impl ContextMenu {
pub fn new(cx: &mut ViewContext<Self>) -> Self {
let parent_view_id = cx.parent().unwrap();
Self {
show_count: 0,
anchor_position: Default::default(),
@@ -123,6 +126,7 @@ impl ContextMenu {
visible: Default::default(),
previously_focused_view_id: Default::default(),
clicked: false,
parent_view_id,
_actions_observation: cx.observe_actions(Self::action_dispatched),
}
}
@@ -251,6 +255,7 @@ impl ContextMenu {
}
fn render_menu_for_measurement(&self, cx: &mut RenderContext<Self>) -> impl Element {
let window_id = cx.window_id();
let style = cx.global::<Settings>().theme.context_menu.clone();
Flex::row()
.with_child(
@@ -289,6 +294,8 @@ impl ContextMenu {
Some(ix) == self.selected_index,
);
KeystrokeLabel::new(
window_id,
self.parent_view_id,
action.boxed_clone(),
style.keystroke.container,
style.keystroke.text.clone(),
@@ -318,6 +325,7 @@ impl ContextMenu {
let style = cx.global::<Settings>().theme.context_menu.clone();
let window_id = cx.window_id();
MouseEventHandler::<Menu>::new(0, cx, |_, cx| {
Flex::column()
.with_children(self.items.iter().enumerate().map(|(ix, item)| {
@@ -337,6 +345,8 @@ impl ContextMenu {
)
.with_child({
KeystrokeLabel::new(
window_id,
self.parent_view_id,
action.boxed_clone(),
style.keystroke.container,
style.keystroke.text.clone(),

View File

@@ -2,6 +2,7 @@
name = "db"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/db.rs"
@@ -12,16 +13,20 @@ test-support = []
[dependencies]
collections = { path = "../collections" }
gpui = { path = "../gpui" }
sqlez = { path = "../sqlez" }
sqlez_macros = { path = "../sqlez_macros" }
util = { path = "../util" }
anyhow = "1.0.57"
indoc = "1.0.4"
async-trait = "0.1"
lazy_static = "1.4.0"
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
parking_lot = "0.11.1"
rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] }
rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" }
serde = { workspace = true }
serde_rusqlite = "0.31.0"
serde = { version = "1.0", features = ["derive"] }
smol = "1.2"
[dev-dependencies]
gpui = { path = "../gpui", features = ["test-support"] }
env_logger = "0.9.1"
tempdir = { version = "0.3.7" }

5
crates/db/README.md Normal file
View File

@@ -0,0 +1,5 @@
# Building Queries
First, craft your test data. The examples folder shows a template for building a test-db, and can be ran with `cargo run --example [your-example]`.
To actually use and test your queries, import the generated DB file into https://sqliteonline.com/

View File

@@ -1,119 +1,394 @@
mod kvp;
mod migrations;
pub mod kvp;
pub mod query;
use std::fs;
// Re-export
pub use anyhow;
use anyhow::Context;
pub use indoc::indoc;
pub use lazy_static;
use parking_lot::{Mutex, RwLock};
pub use smol;
pub use sqlez;
pub use sqlez_macros;
pub use util::channel::{RELEASE_CHANNEL, RELEASE_CHANNEL_NAME};
pub use util::paths::DB_DIR;
use sqlez::domain::Migrator;
use sqlez::thread_safe_connection::ThreadSafeConnection;
use sqlez_macros::sql;
use std::fs::create_dir_all;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{SystemTime, UNIX_EPOCH};
use util::channel::ReleaseChannel;
use util::{async_iife, ResultExt};
use anyhow::Result;
use log::error;
use parking_lot::Mutex;
use rusqlite::Connection;
const CONNECTION_INITIALIZE_QUERY: &'static str = sql!(
PRAGMA foreign_keys=TRUE;
);
use migrations::MIGRATIONS;
const DB_INITIALIZE_QUERY: &'static str = sql!(
PRAGMA journal_mode=WAL;
PRAGMA busy_timeout=1;
PRAGMA case_sensitive_like=TRUE;
PRAGMA synchronous=NORMAL;
);
#[derive(Clone)]
pub enum Db {
Real(Arc<RealDb>),
Null,
const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB";
const DB_FILE_NAME: &'static str = "db.sqlite";
lazy_static::lazy_static! {
static ref ZED_STATELESS: bool = std::env::var("ZED_STATELESS").map_or(false, |v| !v.is_empty());
static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(());
pub static ref BACKUP_DB_PATH: RwLock<Option<PathBuf>> = RwLock::new(None);
pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false);
}
pub struct RealDb {
connection: Mutex<Connection>,
path: Option<PathBuf>,
}
impl Db {
/// Open or create a database at the given directory path.
pub fn open(db_dir: &Path, channel: &'static str) -> Self {
// Use 0 for now. Will implement incrementing and clearing of old db files soon TM
let current_db_dir = db_dir.join(Path::new(&format!("0-{}", channel)));
fs::create_dir_all(&current_db_dir)
.expect("Should be able to create the database directory");
let db_path = current_db_dir.join(Path::new("db.sqlite"));
Connection::open(db_path)
.map_err(Into::into)
.and_then(|connection| Self::initialize(connection))
.map(|connection| {
Db::Real(Arc::new(RealDb {
connection,
path: Some(db_dir.to_path_buf()),
}))
})
.unwrap_or_else(|e| {
error!(
"Connecting to file backed db failed. Reverting to null db. {}",
e
);
Self::Null
})
/// Open or create a database at the given directory path.
/// This will retry a couple times if there are failures. If opening fails once, the db directory
/// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created.
/// In either case, static variables are set so that the user can be notified.
pub async fn open_db<M: Migrator + 'static>(
db_dir: &Path,
release_channel: &ReleaseChannel,
) -> ThreadSafeConnection<M> {
if *ZED_STATELESS {
return open_fallback_db().await;
}
/// Open a in memory database for testing and as a fallback.
#[cfg(any(test, feature = "test-support"))]
pub fn open_in_memory() -> Self {
Connection::open_in_memory()
.map_err(Into::into)
.and_then(|connection| Self::initialize(connection))
.map(|connection| {
Db::Real(Arc::new(RealDb {
connection,
path: None,
}))
})
.unwrap_or_else(|e| {
error!(
"Connecting to in memory db failed. Reverting to null db. {}",
e
);
Self::Null
})
}
let release_channel_name = release_channel.dev_name();
let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name)));
fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
MIGRATIONS.to_latest(&mut conn)?;
let connection = async_iife!({
// Note: This still has a race condition where 1 set of migrations succeeds
// (e.g. (Workspace, Editor)) and another fails (e.g. (Workspace, Terminal))
// This will cause the first connection to have the database taken out
// from under it. This *should* be fine though. The second dabatase failure will
// cause errors in the log and so should be observed by developers while writing
// soon-to-be good migrations. If user databases are corrupted, we toss them out
// and try again from a blank. As long as running all migrations from start to end
// on a blank database is ok, this race condition will never be triggered.
//
// Basically: Don't ever push invalid migrations to stable or everyone will have
// a bad time.
conn.pragma_update(None, "journal_mode", "WAL")?;
conn.pragma_update(None, "synchronous", "NORMAL")?;
conn.pragma_update(None, "foreign_keys", true)?;
conn.pragma_update(None, "case_sensitive_like", true)?;
// If no db folder, create one at 0-{channel}
create_dir_all(&main_db_dir).context("Could not create db directory")?;
let db_path = main_db_dir.join(Path::new(DB_FILE_NAME));
Ok(Mutex::new(conn))
}
pub fn persisting(&self) -> bool {
self.real().and_then(|db| db.path.as_ref()).is_some()
}
pub fn real(&self) -> Option<&RealDb> {
match self {
Db::Real(db) => Some(&db),
_ => None,
// Optimistically open databases in parallel
if !DB_FILE_OPERATIONS.is_locked() {
// Try building a connection
if let Some(connection) = open_main_db(&db_path).await {
return Ok(connection)
};
}
// Take a lock in the failure case so that we move the db once per process instead
// of potentially multiple times from different threads. This shouldn't happen in the
// normal path
let _lock = DB_FILE_OPERATIONS.lock();
if let Some(connection) = open_main_db(&db_path).await {
return Ok(connection)
};
let backup_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("System clock is set before the unix timestamp, Zed does not support this region of spacetime")
.as_millis();
// If failed, move 0-{channel} to {current unix timestamp}-{channel}
let backup_db_dir = db_dir.join(Path::new(&format!(
"{}-{}",
backup_timestamp,
release_channel_name,
)));
std::fs::rename(&main_db_dir, &backup_db_dir)
.context("Failed clean up corrupted database, panicking.")?;
// Set a static ref with the failed timestamp and error so we can notify the user
{
let mut guard = BACKUP_DB_PATH.write();
*guard = Some(backup_db_dir);
}
// Create a new 0-{channel}
create_dir_all(&main_db_dir).context("Should be able to create the database directory")?;
let db_path = main_db_dir.join(Path::new(DB_FILE_NAME));
// Try again
open_main_db(&db_path).await.context("Could not newly created db")
}).await.log_err();
if let Some(connection) = connection {
return connection;
}
// Set another static ref so that we can escalate the notification
ALL_FILE_DB_FAILED.store(true, Ordering::Release);
// If still failed, create an in memory db with a known name
open_fallback_db().await
}
impl Drop for Db {
fn drop(&mut self) {
match self {
Db::Real(real_db) => {
let lock = real_db.connection.lock();
async fn open_main_db<M: Migrator>(db_path: &PathBuf) -> Option<ThreadSafeConnection<M>> {
log::info!("Opening main db");
ThreadSafeConnection::<M>::builder(db_path.to_string_lossy().as_ref(), true)
.with_db_initialization_query(DB_INITIALIZE_QUERY)
.with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
.build()
.await
.log_err()
}
let _ = lock.pragma_update(None, "analysis_limit", "500");
let _ = lock.pragma_update(None, "optimize", "");
async fn open_fallback_db<M: Migrator>() -> ThreadSafeConnection<M> {
log::info!("Opening fallback db");
ThreadSafeConnection::<M>::builder(FALLBACK_DB_NAME, false)
.with_db_initialization_query(DB_INITIALIZE_QUERY)
.with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
.build()
.await
.expect(
"Fallback in memory database failed. Likely initialization queries or migrations have fundamental errors",
)
}
#[cfg(any(test, feature = "test-support"))]
pub async fn open_test_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<M> {
use sqlez::thread_safe_connection::locking_queue;
ThreadSafeConnection::<M>::builder(db_name, false)
.with_db_initialization_query(DB_INITIALIZE_QUERY)
.with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
// Serialize queued writes via a mutex and run them synchronously
.with_write_queue_constructor(locking_queue())
.build()
.await
.unwrap()
}
/// Implements a basic DB wrapper for a given domain
#[macro_export]
macro_rules! define_connection {
(pub static ref $id:ident: $t:ident<()> = $migrations:expr;) => {
pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>);
impl ::std::ops::Deref for $t {
type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>;
fn deref(&self) -> &Self::Target {
&self.0
}
Db::Null => {}
}
}
impl $crate::sqlez::domain::Domain for $t {
fn name() -> &'static str {
stringify!($t)
}
fn migrations() -> &'static [&'static str] {
$migrations
}
}
#[cfg(any(test, feature = "test-support"))]
$crate::lazy_static::lazy_static! {
pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id))));
}
#[cfg(not(any(test, feature = "test-support")))]
$crate::lazy_static::lazy_static! {
pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(&$crate::DB_DIR, &$crate::RELEASE_CHANNEL)));
}
};
(pub static ref $id:ident: $t:ident<$($d:ty),+> = $migrations:expr;) => {
pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<( $($d),+, $t )>);
impl ::std::ops::Deref for $t {
type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<($($d),+, $t)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl $crate::sqlez::domain::Domain for $t {
fn name() -> &'static str {
stringify!($t)
}
fn migrations() -> &'static [&'static str] {
$migrations
}
}
#[cfg(any(test, feature = "test-support"))]
$crate::lazy_static::lazy_static! {
pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id))));
}
#[cfg(not(any(test, feature = "test-support")))]
$crate::lazy_static::lazy_static! {
pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(&$crate::DB_DIR, &$crate::RELEASE_CHANNEL)));
}
};
}
#[cfg(test)]
mod tests {
use crate::migrations::MIGRATIONS;
use std::{fs, thread};
#[test]
fn test_migrations() {
assert!(MIGRATIONS.validate().is_ok());
use sqlez::{connection::Connection, domain::Domain};
use sqlez_macros::sql;
use tempdir::TempDir;
use crate::{open_db, DB_FILE_NAME};
// Test bad migration panics
#[gpui::test]
#[should_panic]
async fn test_bad_migration_panics() {
enum BadDB {}
impl Domain for BadDB {
fn name() -> &'static str {
"db_tests"
}
fn migrations() -> &'static [&'static str] {
&[
sql!(CREATE TABLE test(value);),
// failure because test already exists
sql!(CREATE TABLE test(value);),
]
}
}
let tempdir = TempDir::new("DbTests").unwrap();
let _bad_db = open_db::<BadDB>(tempdir.path(), &util::channel::ReleaseChannel::Dev).await;
}
/// Test that DB exists but corrupted (causing recreate)
#[gpui::test]
async fn test_db_corruption() {
enum CorruptedDB {}
impl Domain for CorruptedDB {
fn name() -> &'static str {
"db_tests"
}
fn migrations() -> &'static [&'static str] {
&[sql!(CREATE TABLE test(value);)]
}
}
enum GoodDB {}
impl Domain for GoodDB {
fn name() -> &'static str {
"db_tests" //Notice same name
}
fn migrations() -> &'static [&'static str] {
&[sql!(CREATE TABLE test2(value);)] //But different migration
}
}
let tempdir = TempDir::new("DbTests").unwrap();
{
let corrupt_db =
open_db::<CorruptedDB>(tempdir.path(), &util::channel::ReleaseChannel::Dev).await;
assert!(corrupt_db.persistent());
}
let good_db = open_db::<GoodDB>(tempdir.path(), &util::channel::ReleaseChannel::Dev).await;
assert!(
good_db.select_row::<usize>("SELECT * FROM test2").unwrap()()
.unwrap()
.is_none()
);
let mut corrupted_backup_dir = fs::read_dir(tempdir.path())
.unwrap()
.find(|entry| {
!entry
.as_ref()
.unwrap()
.file_name()
.to_str()
.unwrap()
.starts_with("0")
})
.unwrap()
.unwrap()
.path();
corrupted_backup_dir.push(DB_FILE_NAME);
let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy());
assert!(backup.select_row::<usize>("SELECT * FROM test").unwrap()()
.unwrap()
.is_none());
}
/// Test that DB exists but corrupted (causing recreate)
#[gpui::test]
async fn test_simultaneous_db_corruption() {
enum CorruptedDB {}
impl Domain for CorruptedDB {
fn name() -> &'static str {
"db_tests"
}
fn migrations() -> &'static [&'static str] {
&[sql!(CREATE TABLE test(value);)]
}
}
enum GoodDB {}
impl Domain for GoodDB {
fn name() -> &'static str {
"db_tests" //Notice same name
}
fn migrations() -> &'static [&'static str] {
&[sql!(CREATE TABLE test2(value);)] //But different migration
}
}
let tempdir = TempDir::new("DbTests").unwrap();
{
// Setup the bad database
let corrupt_db =
open_db::<CorruptedDB>(tempdir.path(), &util::channel::ReleaseChannel::Dev).await;
assert!(corrupt_db.persistent());
}
// Try to connect to it a bunch of times at once
let mut guards = vec![];
for _ in 0..10 {
let tmp_path = tempdir.path().to_path_buf();
let guard = thread::spawn(move || {
let good_db = smol::block_on(open_db::<GoodDB>(
tmp_path.as_path(),
&util::channel::ReleaseChannel::Dev,
));
assert!(
good_db.select_row::<usize>("SELECT * FROM test2").unwrap()()
.unwrap()
.is_none()
);
});
guards.push(guard);
}
for guard in guards.into_iter() {
assert!(guard.join().is_ok());
}
}
}

View File

@@ -1,311 +0,0 @@
use std::{ffi::OsStr, fmt::Display, hash::Hash, os::unix::prelude::OsStrExt, path::PathBuf};
use anyhow::Result;
use collections::HashSet;
use rusqlite::{named_params, params};
use super::Db;
pub(crate) const ITEMS_M_1: &str = "
CREATE TABLE items(
id INTEGER PRIMARY KEY,
kind TEXT
) STRICT;
CREATE TABLE item_path(
item_id INTEGER PRIMARY KEY,
path BLOB
) STRICT;
CREATE TABLE item_query(
item_id INTEGER PRIMARY KEY,
query TEXT
) STRICT;
";
#[derive(PartialEq, Eq, Hash, Debug)]
pub enum SerializedItemKind {
Editor,
Terminal,
ProjectSearch,
Diagnostics,
}
impl Display for SerializedItemKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&format!("{:?}", self))
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum SerializedItem {
Editor(usize, PathBuf),
Terminal(usize),
ProjectSearch(usize, String),
Diagnostics(usize),
}
impl SerializedItem {
fn kind(&self) -> SerializedItemKind {
match self {
SerializedItem::Editor(_, _) => SerializedItemKind::Editor,
SerializedItem::Terminal(_) => SerializedItemKind::Terminal,
SerializedItem::ProjectSearch(_, _) => SerializedItemKind::ProjectSearch,
SerializedItem::Diagnostics(_) => SerializedItemKind::Diagnostics,
}
}
fn id(&self) -> usize {
match self {
SerializedItem::Editor(id, _)
| SerializedItem::Terminal(id)
| SerializedItem::ProjectSearch(id, _)
| SerializedItem::Diagnostics(id) => *id,
}
}
}
impl Db {
fn write_item(&self, serialized_item: SerializedItem) -> Result<()> {
self.real()
.map(|db| {
let mut lock = db.connection.lock();
let tx = lock.transaction()?;
// Serialize the item
let id = serialized_item.id();
{
let mut stmt = tx.prepare_cached(
"INSERT OR REPLACE INTO items(id, kind) VALUES ((?), (?))",
)?;
dbg!("inserting item");
stmt.execute(params![id, serialized_item.kind().to_string()])?;
}
// Serialize item data
match &serialized_item {
SerializedItem::Editor(_, path) => {
dbg!("inserting path");
let mut stmt = tx.prepare_cached(
"INSERT OR REPLACE INTO item_path(item_id, path) VALUES ((?), (?))",
)?;
let path_bytes = path.as_os_str().as_bytes();
stmt.execute(params![id, path_bytes])?;
}
SerializedItem::ProjectSearch(_, query) => {
dbg!("inserting query");
let mut stmt = tx.prepare_cached(
"INSERT OR REPLACE INTO item_query(item_id, query) VALUES ((?), (?))",
)?;
stmt.execute(params![id, query])?;
}
_ => {}
}
tx.commit()?;
let mut stmt = lock.prepare_cached("SELECT id, kind FROM items")?;
let _ = stmt
.query_map([], |row| {
let zero: usize = row.get(0)?;
let one: String = row.get(1)?;
dbg!(zero, one);
Ok(())
})?
.collect::<Vec<Result<(), _>>>();
Ok(())
})
.unwrap_or(Ok(()))
}
fn delete_item(&self, item_id: usize) -> Result<()> {
self.real()
.map(|db| {
let lock = db.connection.lock();
let mut stmt = lock.prepare_cached(
r#"
DELETE FROM items WHERE id = (:id);
DELETE FROM item_path WHERE id = (:id);
DELETE FROM item_query WHERE id = (:id);
"#,
)?;
stmt.execute(named_params! {":id": item_id})?;
Ok(())
})
.unwrap_or(Ok(()))
}
fn take_items(&self) -> Result<HashSet<SerializedItem>> {
self.real()
.map(|db| {
let mut lock = db.connection.lock();
let tx = lock.transaction()?;
// When working with transactions in rusqlite, need to make this kind of scope
// To make the borrow stuff work correctly. Don't know why, rust is wild.
let result = {
let mut editors_stmt = tx.prepare_cached(
r#"
SELECT items.id, item_path.path
FROM items
LEFT JOIN item_path
ON items.id = item_path.item_id
WHERE items.kind = ?;
"#,
)?;
let editors_iter = editors_stmt.query_map(
[SerializedItemKind::Editor.to_string()],
|row| {
let id: usize = row.get(0)?;
let buf: Vec<u8> = row.get(1)?;
let path: PathBuf = OsStr::from_bytes(&buf).into();
Ok(SerializedItem::Editor(id, path))
},
)?;
let mut terminals_stmt = tx.prepare_cached(
r#"
SELECT items.id
FROM items
WHERE items.kind = ?;
"#,
)?;
let terminals_iter = terminals_stmt.query_map(
[SerializedItemKind::Terminal.to_string()],
|row| {
let id: usize = row.get(0)?;
Ok(SerializedItem::Terminal(id))
},
)?;
let mut search_stmt = tx.prepare_cached(
r#"
SELECT items.id, item_query.query
FROM items
LEFT JOIN item_query
ON items.id = item_query.item_id
WHERE items.kind = ?;
"#,
)?;
let searches_iter = search_stmt.query_map(
[SerializedItemKind::ProjectSearch.to_string()],
|row| {
let id: usize = row.get(0)?;
let query = row.get(1)?;
Ok(SerializedItem::ProjectSearch(id, query))
},
)?;
#[cfg(debug_assertions)]
let tmp =
searches_iter.collect::<Vec<Result<SerializedItem, rusqlite::Error>>>();
#[cfg(debug_assertions)]
debug_assert!(tmp.len() == 0 || tmp.len() == 1);
#[cfg(debug_assertions)]
let searches_iter = tmp.into_iter();
let mut diagnostic_stmt = tx.prepare_cached(
r#"
SELECT items.id
FROM items
WHERE items.kind = ?;
"#,
)?;
let diagnostics_iter = diagnostic_stmt.query_map(
[SerializedItemKind::Diagnostics.to_string()],
|row| {
let id: usize = row.get(0)?;
Ok(SerializedItem::Diagnostics(id))
},
)?;
#[cfg(debug_assertions)]
let tmp =
diagnostics_iter.collect::<Vec<Result<SerializedItem, rusqlite::Error>>>();
#[cfg(debug_assertions)]
debug_assert!(tmp.len() == 0 || tmp.len() == 1);
#[cfg(debug_assertions)]
let diagnostics_iter = tmp.into_iter();
let res = editors_iter
.chain(terminals_iter)
.chain(diagnostics_iter)
.chain(searches_iter)
.collect::<Result<HashSet<SerializedItem>, rusqlite::Error>>()?;
let mut delete_stmt = tx.prepare_cached(
r#"
DELETE FROM items;
DELETE FROM item_path;
DELETE FROM item_query;
"#,
)?;
delete_stmt.execute([])?;
res
};
tx.commit()?;
Ok(result)
})
.unwrap_or(Ok(HashSet::default()))
}
}
#[cfg(test)]
mod test {
use anyhow::Result;
use super::*;
#[test]
fn test_items_round_trip() -> Result<()> {
let db = Db::open_in_memory();
let mut items = vec![
SerializedItem::Editor(0, PathBuf::from("/tmp/test.txt")),
SerializedItem::Terminal(1),
SerializedItem::ProjectSearch(2, "Test query!".to_string()),
SerializedItem::Diagnostics(3),
]
.into_iter()
.collect::<HashSet<_>>();
for item in items.iter() {
dbg!("Inserting... ");
db.write_item(item.clone())?;
}
assert_eq!(items, db.take_items()?);
// Check that it's empty, as expected
assert_eq!(HashSet::default(), db.take_items()?);
for item in items.iter() {
db.write_item(item.clone())?;
}
items.remove(&SerializedItem::ProjectSearch(2, "Test query!".to_string()));
db.delete_item(2)?;
assert_eq!(items, db.take_items()?);
Ok(())
}
}

View File

@@ -1,82 +1,62 @@
use anyhow::Result;
use rusqlite::OptionalExtension;
use sqlez_macros::sql;
use super::Db;
use crate::{define_connection, query};
pub(crate) const KVP_M_1_UP: &str = "
CREATE TABLE kv_store(
key TEXT PRIMARY KEY,
value TEXT NOT NULL
) STRICT;
";
define_connection!(pub static ref KEY_VALUE_STORE: KeyValueStore<()> =
&[sql!(
CREATE TABLE IF NOT EXISTS kv_store(
key TEXT PRIMARY KEY,
value TEXT NOT NULL
) STRICT;
)];
);
impl Db {
pub fn read_kvp(&self, key: &str) -> Result<Option<String>> {
self.real()
.map(|db| {
let lock = db.connection.lock();
let mut stmt = lock.prepare_cached("SELECT value FROM kv_store WHERE key = (?)")?;
Ok(stmt.query_row([key], |row| row.get(0)).optional()?)
})
.unwrap_or(Ok(None))
impl KeyValueStore {
query! {
pub fn read_kvp(key: &str) -> Result<Option<String>> {
SELECT value FROM kv_store WHERE key = (?)
}
}
pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> {
self.real()
.map(|db| {
let lock = db.connection.lock();
let mut stmt = lock.prepare_cached(
"INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))",
)?;
stmt.execute([key, value])?;
Ok(())
})
.unwrap_or(Ok(()))
query! {
pub async fn write_kvp(key: String, value: String) -> Result<()> {
INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))
}
}
pub fn delete_kvp(&self, key: &str) -> Result<()> {
self.real()
.map(|db| {
let lock = db.connection.lock();
let mut stmt = lock.prepare_cached("DELETE FROM kv_store WHERE key = (?)")?;
stmt.execute([key])?;
Ok(())
})
.unwrap_or(Ok(()))
query! {
pub async fn delete_kvp(key: String) -> Result<()> {
DELETE FROM kv_store WHERE key = (?)
}
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use crate::kvp::KeyValueStore;
use super::*;
#[gpui::test]
async fn test_kvp() {
let db = KeyValueStore(crate::open_test_db("test_kvp").await);
#[test]
fn test_kvp() -> Result<()> {
let db = Db::open_in_memory();
assert_eq!(db.read_kvp("key-1").unwrap(), None);
assert_eq!(db.read_kvp("key-1")?, None);
db.write_kvp("key-1".to_string(), "one".to_string())
.await
.unwrap();
assert_eq!(db.read_kvp("key-1").unwrap(), Some("one".to_string()));
db.write_kvp("key-1", "one")?;
assert_eq!(db.read_kvp("key-1")?, Some("one".to_string()));
db.write_kvp("key-1".to_string(), "one-2".to_string())
.await
.unwrap();
assert_eq!(db.read_kvp("key-1").unwrap(), Some("one-2".to_string()));
db.write_kvp("key-1", "one-2")?;
assert_eq!(db.read_kvp("key-1")?, Some("one-2".to_string()));
db.write_kvp("key-2".to_string(), "two".to_string())
.await
.unwrap();
assert_eq!(db.read_kvp("key-2").unwrap(), Some("two".to_string()));
db.write_kvp("key-2", "two")?;
assert_eq!(db.read_kvp("key-2")?, Some("two".to_string()));
db.delete_kvp("key-1")?;
assert_eq!(db.read_kvp("key-1")?, None);
Ok(())
db.delete_kvp("key-1".to_string()).await.unwrap();
assert_eq!(db.read_kvp("key-1").unwrap(), None);
}
}

View File

@@ -1,15 +0,0 @@
use rusqlite_migration::{Migrations, M};
// use crate::items::ITEMS_M_1;
use crate::kvp::KVP_M_1_UP;
// This must be ordered by development time! Only ever add new migrations to the end!!
// Bad things will probably happen if you don't monotonically edit this vec!!!!
// And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's
// file system and so everything we do here is locked in _f_o_r_e_v_e_r_.
lazy_static::lazy_static! {
pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
M::up(KVP_M_1_UP),
// M::up(ITEMS_M_1),
]);
}

314
crates/db/src/query.rs Normal file
View File

@@ -0,0 +1,314 @@
#[macro_export]
macro_rules! query {
($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => {
$vis fn $id(&self) -> $crate::anyhow::Result<()> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.exec(sql_stmt)?().context(::std::format!(
"Error in {}, exec failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt,
))
}
};
($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => {
$vis async fn $id(&self) -> $crate::anyhow::Result<()> {
use $crate::anyhow::Context;
self.write(|connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.exec(sql_stmt)?().context(::std::format!(
"Error in {}, exec failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => {
$vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, exec_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => {
$vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> {
use $crate::anyhow::Context;
self.write(move |connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.exec_bound::<$arg_type>(sql_stmt)?($arg)
.context(::std::format!(
"Error in {}, exec_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => {
$vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> {
use $crate::anyhow::Context;
self.write(move |connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, exec_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident() -> Result<Vec<$return_type:ty>> { $($sql:tt)+ }) => {
$vis fn $id(&self) -> $crate::anyhow::Result<Vec<$return_type>> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select::<$return_type>(sql_stmt)?()
.context(::std::format!(
"Error in {}, select_row failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis async fn $id:ident() -> Result<Vec<$return_type:ty>> { $($sql:tt)+ }) => {
pub async fn $id(&self) -> $crate::anyhow::Result<Vec<$return_type>> {
use $crate::anyhow::Context;
self.write(|connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.select::<$return_type>(sql_stmt)?()
.context(::std::format!(
"Error in {}, select_row failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<Vec<$return_type:ty>> { $($sql:tt)+ }) => {
$vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<Vec<$return_type>> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, exec_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<Vec<$return_type:ty>> { $($sql:tt)+ }) => {
$vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<Vec<$return_type>> {
use $crate::anyhow::Context;
self.write(|connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, exec_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident() -> Result<Option<$return_type:ty>> { $($sql:tt)+ }) => {
$vis fn $id(&self) -> $crate::anyhow::Result<Option<$return_type>> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_row::<$return_type>(sql_stmt)?()
.context(::std::format!(
"Error in {}, select_row failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis async fn $id:ident() -> Result<Option<$return_type:ty>> { $($sql:tt)+ }) => {
$vis async fn $id(&self) -> $crate::anyhow::Result<Option<$return_type>> {
use $crate::anyhow::Context;
self.write(|connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.select_row::<$return_type>(sql_stmt)?()
.context(::std::format!(
"Error in {}, select_row failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<Option<$return_type:ty>> { $($sql:tt)+ }) => {
$vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<Option<$return_type>> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg)
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<Option<$return_type:ty>> { $($sql:tt)+ }) => {
$vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<Option<$return_type>> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<Option<$return_type:ty>> { $($sql:tt)+ }) => {
$vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<Option<$return_type>> {
use $crate::anyhow::Context;
self.write(move |connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => {
$vis fn $id(&self) -> $crate::anyhow::Result<$return_type> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_row::<$return_type>(indoc! { $sql })?()
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))?
.context(::std::format!(
"Error in {}, select_row_bound expected single row result but found none for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => {
$vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> {
use $crate::anyhow::Context;
self.write(|connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.select_row::<$return_type>(sql_stmt)?()
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))?
.context(::std::format!(
"Error in {}, select_row_bound expected single row result but found none for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => {
pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg)
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))?
.context(::std::format!(
"Error in {}, select_row_bound expected single row result but found none for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => {
$vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> {
use $crate::anyhow::Context;
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))?
.context(::std::format!(
"Error in {}, select_row_bound expected single row result but found none for: {}",
::std::stringify!($id),
sql_stmt
))
}
};
($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => {
$vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> {
use $crate::anyhow::Context;
self.write(|connection| {
let sql_stmt = $crate::sqlez_macros::sql!($($sql)+);
connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+))
.context(::std::format!(
"Error in {}, select_row_bound failed to execute or parse for: {}",
::std::stringify!($id),
sql_stmt
))?
.context(::std::format!(
"Error in {}, select_row_bound expected single row result but found none for: {}",
::std::stringify!($id),
sql_stmt
))
}).await
}
};
}

View File

@@ -2,6 +2,7 @@
name = "diagnostics"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/diagnostics.rs"

View File

@@ -5,8 +5,9 @@ use collections::{BTreeMap, HashSet};
use editor::{
diagnostic_block_renderer,
display_map::{BlockDisposition, BlockId, BlockProperties, BlockStyle, RenderBlock},
highlight_diagnostic_message, Autoscroll, Editor, ExcerptId, ExcerptRange, MultiBuffer,
ToOffset,
highlight_diagnostic_message,
scroll::autoscroll::Autoscroll,
Editor, ExcerptId, ExcerptRange, MultiBuffer, ToOffset,
};
use gpui::{
actions, elements::*, fonts::TextStyle, impl_internal_actions, serde_json, AnyViewHandle,
@@ -20,7 +21,6 @@ use language::{
use project::{DiagnosticSummary, Project, ProjectPath};
use serde_json::json;
use settings::Settings;
use smallvec::SmallVec;
use std::{
any::{Any, TypeId},
cmp::Ordering,
@@ -29,7 +29,10 @@ use std::{
sync::Arc,
};
use util::TryFutureExt;
use workspace::{ItemHandle as _, ItemNavHistory, Workspace};
use workspace::{
item::{Item, ItemEvent, ItemHandle},
ItemNavHistory, Pane, Workspace,
};
actions!(diagnostics, [Deploy]);
@@ -160,7 +163,7 @@ impl ProjectDiagnosticsEditor {
editor.set_vertical_scroll_margin(5, cx);
editor
});
cx.subscribe(&editor, |_, _, event, cx| cx.emit(*event))
cx.subscribe(&editor, |_, _, event, cx| cx.emit(event.clone()))
.detach();
let project = project_handle.read(cx);
@@ -322,7 +325,7 @@ impl ProjectDiagnosticsEditor {
);
let excerpt_id = excerpts
.insert_excerpts_after(
&prev_excerpt_id,
prev_excerpt_id,
buffer.clone(),
[ExcerptRange {
context: excerpt_start..excerpt_end,
@@ -384,7 +387,7 @@ impl ProjectDiagnosticsEditor {
groups_to_add.push(group_state);
} else if let Some((group_ix, group_state)) = to_remove {
excerpts.remove_excerpts(group_state.excerpts.iter(), excerpts_cx);
excerpts.remove_excerpts(group_state.excerpts.iter().copied(), excerpts_cx);
group_ixs_to_remove.push(group_ix);
blocks_to_remove.extend(group_state.blocks.iter().copied());
} else if let Some((_, group)) = to_keep {
@@ -452,15 +455,20 @@ impl ProjectDiagnosticsEditor {
} else {
groups = self.path_states.get(path_ix)?.diagnostic_groups.as_slice();
new_excerpt_ids_by_selection_id =
editor.change_selections(Some(Autoscroll::Fit), cx, |s| s.refresh());
editor.change_selections(Some(Autoscroll::fit()), cx, |s| s.refresh());
selections = editor.selections.all::<usize>(cx);
}
// If any selection has lost its position, move it to start of the next primary diagnostic.
let snapshot = editor.snapshot(cx);
for selection in &mut selections {
if let Some(new_excerpt_id) = new_excerpt_ids_by_selection_id.get(&selection.id) {
let group_ix = match groups.binary_search_by(|probe| {
probe.excerpts.last().unwrap().cmp(new_excerpt_id)
probe
.excerpts
.last()
.unwrap()
.cmp(new_excerpt_id, &snapshot.buffer_snapshot)
}) {
Ok(ix) | Err(ix) => ix,
};
@@ -498,7 +506,7 @@ impl ProjectDiagnosticsEditor {
}
}
impl workspace::Item for ProjectDiagnosticsEditor {
impl Item for ProjectDiagnosticsEditor {
fn tab_content(
&self,
_detail: Option<usize>,
@@ -512,12 +520,8 @@ impl workspace::Item for ProjectDiagnosticsEditor {
)
}
fn project_path(&self, _: &AppContext) -> Option<project::ProjectPath> {
None
}
fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[project::ProjectEntryId; 3]> {
self.editor.project_entry_ids(cx)
fn for_each_project_item(&self, cx: &AppContext, f: &mut dyn FnMut(usize, &dyn project::Item)) {
self.editor.for_each_project_item(cx, f)
}
fn is_singleton(&self, _: &AppContext) -> bool {
@@ -566,7 +570,16 @@ impl workspace::Item for ProjectDiagnosticsEditor {
unreachable!()
}
fn to_item_events(event: &Self::Event) -> Vec<workspace::ItemEvent> {
fn git_diff_recalc(
&mut self,
project: ModelHandle<Project>,
cx: &mut ViewContext<Self>,
) -> Task<Result<()>> {
self.editor
.update(cx, |editor, cx| editor.git_diff_recalc(project, cx))
}
fn to_item_events(event: &Self::Event) -> Vec<ItemEvent> {
Editor::to_item_events(event)
}
@@ -576,7 +589,11 @@ impl workspace::Item for ProjectDiagnosticsEditor {
});
}
fn clone_on_split(&self, cx: &mut ViewContext<Self>) -> Option<Self>
fn clone_on_split(
&self,
_workspace_id: workspace::WorkspaceId,
cx: &mut ViewContext<Self>,
) -> Option<Self>
where
Self: Sized,
{
@@ -605,6 +622,20 @@ impl workspace::Item for ProjectDiagnosticsEditor {
fn deactivated(&mut self, cx: &mut ViewContext<Self>) {
self.editor.update(cx, |editor, cx| editor.deactivated(cx));
}
fn serialized_item_kind() -> Option<&'static str> {
Some("diagnostics")
}
fn deserialize(
project: ModelHandle<Project>,
workspace: WeakViewHandle<Workspace>,
_workspace_id: workspace::WorkspaceId,
_item_id: workspace::ItemId,
cx: &mut ViewContext<Pane>,
) -> Task<Result<ViewHandle<Self>>> {
Task::ready(Ok(cx.add_view(|cx| Self::new(project, workspace, cx))))
}
}
fn diagnostic_header_renderer(diagnostic: Diagnostic) -> RenderBlock {
@@ -738,7 +769,7 @@ mod tests {
DisplayPoint,
};
use gpui::TestAppContext;
use language::{Diagnostic, DiagnosticEntry, DiagnosticSeverity, PointUtf16};
use language::{Diagnostic, DiagnosticEntry, DiagnosticSeverity, PointUtf16, Unclipped};
use serde_json::json;
use unindent::Unindent as _;
use workspace::AppState;
@@ -776,8 +807,15 @@ mod tests {
.await;
let project = Project::test(app_state.fs.clone(), ["/test".as_ref()], cx).await;
let (_, workspace) =
cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx));
let (_, workspace) = cx.add_window(|cx| {
Workspace::new(
Default::default(),
0,
project.clone(),
|_, _| unimplemented!(),
cx,
)
});
// Create some diagnostics
project.update(cx, |project, cx| {
@@ -788,7 +826,7 @@ mod tests {
None,
vec![
DiagnosticEntry {
range: PointUtf16::new(1, 8)..PointUtf16::new(1, 9),
range: Unclipped(PointUtf16::new(1, 8))..Unclipped(PointUtf16::new(1, 9)),
diagnostic: Diagnostic {
message:
"move occurs because `x` has type `Vec<char>`, which does not implement the `Copy` trait"
@@ -801,7 +839,7 @@ mod tests {
},
},
DiagnosticEntry {
range: PointUtf16::new(2, 8)..PointUtf16::new(2, 9),
range: Unclipped(PointUtf16::new(2, 8))..Unclipped(PointUtf16::new(2, 9)),
diagnostic: Diagnostic {
message:
"move occurs because `y` has type `Vec<char>`, which does not implement the `Copy` trait"
@@ -814,7 +852,7 @@ mod tests {
},
},
DiagnosticEntry {
range: PointUtf16::new(3, 6)..PointUtf16::new(3, 7),
range: Unclipped(PointUtf16::new(3, 6))..Unclipped(PointUtf16::new(3, 7)),
diagnostic: Diagnostic {
message: "value moved here".to_string(),
severity: DiagnosticSeverity::INFORMATION,
@@ -825,7 +863,7 @@ mod tests {
},
},
DiagnosticEntry {
range: PointUtf16::new(4, 6)..PointUtf16::new(4, 7),
range: Unclipped(PointUtf16::new(4, 6))..Unclipped(PointUtf16::new(4, 7)),
diagnostic: Diagnostic {
message: "value moved here".to_string(),
severity: DiagnosticSeverity::INFORMATION,
@@ -836,7 +874,7 @@ mod tests {
},
},
DiagnosticEntry {
range: PointUtf16::new(7, 6)..PointUtf16::new(7, 7),
range: Unclipped(PointUtf16::new(7, 6))..Unclipped(PointUtf16::new(7, 7)),
diagnostic: Diagnostic {
message: "use of moved value\nvalue used here after move".to_string(),
severity: DiagnosticSeverity::ERROR,
@@ -847,7 +885,7 @@ mod tests {
},
},
DiagnosticEntry {
range: PointUtf16::new(8, 6)..PointUtf16::new(8, 7),
range: Unclipped(PointUtf16::new(8, 6))..Unclipped(PointUtf16::new(8, 7)),
diagnostic: Diagnostic {
message: "use of moved value\nvalue used here after move".to_string(),
severity: DiagnosticSeverity::ERROR,
@@ -939,7 +977,7 @@ mod tests {
PathBuf::from("/test/consts.rs"),
None,
vec![DiagnosticEntry {
range: PointUtf16::new(0, 15)..PointUtf16::new(0, 15),
range: Unclipped(PointUtf16::new(0, 15))..Unclipped(PointUtf16::new(0, 15)),
diagnostic: Diagnostic {
message: "mismatched types\nexpected `usize`, found `char`".to_string(),
severity: DiagnosticSeverity::ERROR,
@@ -1040,7 +1078,8 @@ mod tests {
None,
vec![
DiagnosticEntry {
range: PointUtf16::new(0, 15)..PointUtf16::new(0, 15),
range: Unclipped(PointUtf16::new(0, 15))
..Unclipped(PointUtf16::new(0, 15)),
diagnostic: Diagnostic {
message: "mismatched types\nexpected `usize`, found `char`"
.to_string(),
@@ -1052,7 +1091,8 @@ mod tests {
},
},
DiagnosticEntry {
range: PointUtf16::new(1, 15)..PointUtf16::new(1, 15),
range: Unclipped(PointUtf16::new(1, 15))
..Unclipped(PointUtf16::new(1, 15)),
diagnostic: Diagnostic {
message: "unresolved name `c`".to_string(),
severity: DiagnosticSeverity::ERROR,

View File

@@ -7,7 +7,7 @@ use gpui::{
use language::Diagnostic;
use project::Project;
use settings::Settings;
use workspace::StatusItemView;
use workspace::{item::ItemHandle, StatusItemView};
pub struct DiagnosticIndicator {
summary: project::DiagnosticSummary,
@@ -219,7 +219,7 @@ impl View for DiagnosticIndicator {
impl StatusItemView for DiagnosticIndicator {
fn set_active_pane_item(
&mut self,
active_pane_item: Option<&dyn workspace::ItemHandle>,
active_pane_item: Option<&dyn ItemHandle>,
cx: &mut ViewContext<Self>,
) {
if let Some(editor) = active_pane_item.and_then(|item| item.downcast::<Editor>()) {

View File

@@ -2,6 +2,7 @@
name = "drag_and_drop"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/drag_and_drop.rs"
@@ -12,4 +13,4 @@ collections = { path = "../collections" }
gpui = { path = "../gpui" }
[dev-dependencies]
gpui = { path = "../gpui", features = ["test-support"] }
gpui = { path = "../gpui", features = ["test-support"] }

View File

@@ -2,29 +2,68 @@ use std::{any::Any, rc::Rc};
use collections::HashSet;
use gpui::{
elements::{MouseEventHandler, Overlay},
geometry::vector::Vector2F,
scene::MouseDrag,
elements::{Empty, MouseEventHandler, Overlay},
geometry::{rect::RectF, vector::Vector2F},
scene::{MouseDown, MouseDrag},
CursorStyle, Element, ElementBox, EventContext, MouseButton, MutableAppContext, RenderContext,
View, WeakViewHandle,
};
struct State<V: View> {
window_id: usize,
position: Vector2F,
region_offset: Vector2F,
payload: Rc<dyn Any + 'static>,
render: Rc<dyn Fn(Rc<dyn Any>, &mut RenderContext<V>) -> ElementBox>,
const DEAD_ZONE: f32 = 4.;
enum State<V: View> {
Down {
region_offset: Vector2F,
region: RectF,
},
DeadZone {
region_offset: Vector2F,
region: RectF,
},
Dragging {
window_id: usize,
position: Vector2F,
region_offset: Vector2F,
region: RectF,
payload: Rc<dyn Any + 'static>,
render: Rc<dyn Fn(Rc<dyn Any>, &mut RenderContext<V>) -> ElementBox>,
},
Canceled,
}
impl<V: View> Clone for State<V> {
fn clone(&self) -> Self {
Self {
window_id: self.window_id.clone(),
position: self.position.clone(),
region_offset: self.region_offset.clone(),
payload: self.payload.clone(),
render: self.render.clone(),
match self {
&State::Down {
region_offset,
region,
} => State::Down {
region_offset,
region,
},
&State::DeadZone {
region_offset,
region,
} => State::DeadZone {
region_offset,
region,
},
State::Dragging {
window_id,
position,
region_offset,
region,
payload,
render,
} => Self::Dragging {
window_id: window_id.clone(),
position: position.clone(),
region_offset: region_offset.clone(),
region: region.clone(),
payload: payload.clone(),
render: render.clone(),
},
State::Canceled => State::Canceled,
}
}
}
@@ -49,24 +88,36 @@ impl<V: View> DragAndDrop<V> {
}
pub fn currently_dragged<T: Any>(&self, window_id: usize) -> Option<(Vector2F, Rc<T>)> {
self.currently_dragged.as_ref().and_then(
|State {
position,
payload,
window_id: window_dragged_from,
..
}| {
self.currently_dragged.as_ref().and_then(|state| {
if let State::Dragging {
position,
payload,
window_id: window_dragged_from,
..
} = state
{
if &window_id != window_dragged_from {
return None;
}
payload
.clone()
.downcast::<T>()
.ok()
.is::<T>()
.then(|| payload.clone().downcast::<T>().ok())
.flatten()
.map(|payload| (position.clone(), payload))
},
)
} else {
None
}
})
}
pub fn drag_started(event: MouseDown, cx: &mut EventContext) {
cx.update_global(|this: &mut Self, _| {
this.currently_dragged = Some(State::Down {
region_offset: event.position - event.region.origin(),
region: event.region,
});
})
}
pub fn dragging<T: Any>(
@@ -76,75 +127,155 @@ impl<V: View> DragAndDrop<V> {
render: Rc<impl 'static + Fn(&T, &mut RenderContext<V>) -> ElementBox>,
) {
let window_id = cx.window_id();
cx.update_global::<Self, _, _>(|this, cx| {
let region_offset = if let Some(previous_state) = this.currently_dragged.as_ref() {
previous_state.region_offset
} else {
event.region.origin() - event.prev_mouse_position
};
this.currently_dragged = Some(State {
window_id,
region_offset,
position: event.position,
payload,
render: Rc::new(move |payload, cx| {
render(payload.downcast_ref::<T>().unwrap(), cx)
}),
});
cx.update_global(|this: &mut Self, cx| {
this.notify_containers_for_window(window_id, cx);
match this.currently_dragged.as_ref() {
Some(&State::Down {
region_offset,
region,
})
| Some(&State::DeadZone {
region_offset,
region,
}) => {
if (event.position - (region.origin() + region_offset)).length() > DEAD_ZONE {
this.currently_dragged = Some(State::Dragging {
window_id,
region_offset,
region,
position: event.position,
payload,
render: Rc::new(move |payload, cx| {
render(payload.downcast_ref::<T>().unwrap(), cx)
}),
});
} else {
this.currently_dragged = Some(State::DeadZone {
region_offset,
region,
})
}
}
Some(&State::Dragging {
region_offset,
region,
..
}) => {
this.currently_dragged = Some(State::Dragging {
window_id,
region_offset,
region,
position: event.position,
payload,
render: Rc::new(move |payload, cx| {
render(payload.downcast_ref::<T>().unwrap(), cx)
}),
});
}
_ => {}
}
});
}
pub fn render(cx: &mut RenderContext<V>) -> Option<ElementBox> {
let currently_dragged = cx.global::<Self>().currently_dragged.clone();
enum DraggedElementHandler {}
cx.global::<Self>()
.currently_dragged
.clone()
.and_then(|state| {
match state {
State::Down { .. } => None,
State::DeadZone { .. } => None,
State::Dragging {
window_id,
region_offset,
position,
region,
payload,
render,
} => {
if cx.window_id() != window_id {
return None;
}
currently_dragged.and_then(
|State {
window_id,
region_offset,
position,
payload,
render,
}| {
if cx.window_id() != window_id {
return None;
}
let position = position - region_offset;
Some(
Overlay::new(
MouseEventHandler::<DraggedElementHandler>::new(0, cx, |_, cx| {
render(payload, cx)
})
.with_cursor_style(CursorStyle::Arrow)
.on_up(MouseButton::Left, |_, cx| {
cx.defer(|cx| {
cx.update_global::<Self, _, _>(|this, cx| {
this.finish_dragging(cx)
});
});
cx.propagate_event();
})
.on_up_out(MouseButton::Left, |_, cx| {
cx.defer(|cx| {
cx.update_global::<Self, _, _>(|this, cx| {
this.finish_dragging(cx)
});
});
})
// Don't block hover events or invalidations
.with_hoverable(false)
.constrained()
.with_width(region.width())
.with_height(region.height())
.boxed(),
)
.with_anchor_position(position)
.boxed(),
)
}
let position = position + region_offset;
enum DraggedElementHandler {}
Some(
Overlay::new(
MouseEventHandler::<DraggedElementHandler>::new(0, cx, |_, cx| {
render(payload, cx)
State::Canceled => Some(
MouseEventHandler::<DraggedElementHandler>::new(0, cx, |_, _| {
Empty::new()
.constrained()
.with_width(0.)
.with_height(0.)
.boxed()
})
.with_cursor_style(CursorStyle::Arrow)
.on_up(MouseButton::Left, |_, cx| {
cx.defer(|cx| {
cx.update_global::<Self, _, _>(|this, cx| this.stop_dragging(cx));
cx.update_global::<Self, _, _>(|this, _| {
this.currently_dragged = None;
});
});
cx.propagate_event();
})
.on_up_out(MouseButton::Left, |_, cx| {
cx.defer(|cx| {
cx.update_global::<Self, _, _>(|this, cx| this.stop_dragging(cx));
cx.update_global::<Self, _, _>(|this, _| {
this.currently_dragged = None;
});
});
})
// Don't block hover events or invalidations
.with_hoverable(false)
.boxed(),
)
.with_anchor_position(position)
.boxed(),
)
},
)
),
}
})
}
fn stop_dragging(&mut self, cx: &mut MutableAppContext) {
if let Some(State { window_id, .. }) = self.currently_dragged.take() {
pub fn cancel_dragging<P: Any>(&mut self, cx: &mut MutableAppContext) {
if let Some(State::Dragging {
payload, window_id, ..
}) = &self.currently_dragged
{
if payload.is::<P>() {
let window_id = *window_id;
self.currently_dragged = Some(State::Canceled);
self.notify_containers_for_window(window_id, cx);
}
}
}
fn finish_dragging(&mut self, cx: &mut MutableAppContext) {
if let Some(State::Dragging { window_id, .. }) = self.currently_dragged.take() {
self.notify_containers_for_window(window_id, cx);
}
}
@@ -184,7 +315,11 @@ impl<Tag> Draggable for MouseEventHandler<Tag> {
{
let payload = Rc::new(payload);
let render = Rc::new(render);
self.on_drag(MouseButton::Left, move |e, cx| {
self.on_down(MouseButton::Left, move |e, cx| {
cx.propagate_event();
DragAndDrop::<V>::drag_started(e, cx);
})
.on_drag(MouseButton::Left, move |e, cx| {
let payload = payload.clone();
let render = render.clone();
DragAndDrop::<V>::dragging(e, payload, cx, render)

View File

@@ -2,6 +2,7 @@
name = "editor"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
path = "src/editor.rs"
@@ -23,6 +24,7 @@ test-support = [
drag_and_drop = { path = "../drag_and_drop" }
text = { path = "../text" }
clock = { path = "../clock" }
db = { path = "../db" }
collections = { path = "../collections" }
context_menu = { path = "../context_menu" }
fuzzy = { path = "../fuzzy" }
@@ -37,6 +39,7 @@ snippet = { path = "../snippet" }
sum_tree = { path = "../sum_tree" }
theme = { path = "../theme" }
util = { path = "../util" }
sqlez = { path = "../sqlez" }
workspace = { path = "../workspace" }
aho-corasick = "0.7"
anyhow = "1.0"

View File

@@ -93,6 +93,9 @@ impl BlinkManager {
pub fn enable(&mut self, cx: &mut ModelContext<Self>) {
self.enabled = true;
// Set cursors as invisible and start blinking: this causes cursors
// to be visible during the next render.
self.visible = false;
self.blink_cursors(self.blink_epoch, cx);
}

View File

@@ -2,7 +2,7 @@ use super::{
wrap_map::{self, WrapEdit, WrapPoint, WrapSnapshot},
TextHighlights,
};
use crate::{Anchor, ExcerptRange, ToPoint as _};
use crate::{Anchor, ExcerptId, ExcerptRange, ToPoint as _};
use collections::{Bound, HashMap, HashSet};
use gpui::{ElementBox, RenderContext};
use language::{BufferSnapshot, Chunk, Patch, Point};
@@ -107,7 +107,7 @@ struct Transform {
pub enum TransformBlock {
Custom(Arc<Block>),
ExcerptHeader {
key: usize,
id: ExcerptId,
buffer: BufferSnapshot,
range: ExcerptRange<text::Anchor>,
height: u8,
@@ -371,7 +371,7 @@ impl BlockMap {
.make_wrap_point(Point::new(excerpt_boundary.row, 0), Bias::Left)
.row(),
TransformBlock::ExcerptHeader {
key: excerpt_boundary.key,
id: excerpt_boundary.id,
buffer: excerpt_boundary.buffer,
range: excerpt_boundary.range,
height: if excerpt_boundary.starts_new_buffer {

Some files were not shown because too many files have changed in this diff Show More