Compare commits

...

19 Commits

Author SHA1 Message Date
Cole Miller
da36317a98 Disable more 2025-03-17 11:38:01 -04:00
Cole Miller
ba25690ac0 fix? 2025-03-14 16:59:28 -04:00
Cole Miller
e99c289845 debug 2025-03-14 16:56:06 -04:00
Cole Miller
1a2120da9b fix 2025-03-14 16:13:23 -04:00
Cole Miller
eb698e158e scope scans_running to the whole background scanner 2025-03-14 16:11:29 -04:00
Cole Miller
b7ea7f9045 fix 2025-03-14 15:46:38 -04:00
Cole Miller
b31fe42ec1 address 2025-03-14 15:42:42 -04:00
Cole Miller
9bb472f308 debug more 2025-03-14 15:35:47 -04:00
Cole Miller
816a9fec7d many 2025-03-14 15:31:31 -04:00
Cole Miller
df5efb2d84 fix? 2025-03-14 15:24:18 -04:00
Cole Miller
029d360578 debug 2025-03-14 15:10:16 -04:00
Cole Miller
7739c608a1 debug 2025-03-14 15:05:25 -04:00
Cole Miller
9016b45928 debug 2025-03-14 15:01:39 -04:00
Cole Miller
8111f11c32 debug 2025-03-14 14:50:57 -04:00
Cole Miller
f2f60d9170 again 2025-03-14 14:37:02 -04:00
Cole Miller
e0090e2c09 Debug more 2025-03-14 14:33:36 -04:00
Cole Miller
bbd9b7f18e Put back the other test 2025-03-14 03:22:48 -04:00
Cole Miller
a88994a6c1 Merge remote-tracking branch 'origin/main' into cole/reinstate-tests 2025-03-14 03:21:41 -04:00
Cole Miller
931a44ba1f Fix the tests?? 2025-03-14 03:21:12 -04:00
5 changed files with 221 additions and 170 deletions

View File

@@ -20,4 +20,7 @@ runs:
- name: Run tests
shell: bash -euxo pipefail {0}
run: cargo nextest run --workspace --no-fail-fast
env:
RUST_LOG: worktree=trace
run: |
for i in $(seq 100); do cargo nextest run -p worktree --no-fail-fast; done

View File

@@ -169,32 +169,32 @@ jobs:
mkdir -p ./../.cargo
cp ./.cargo/ci-config.toml ./../.cargo/config.toml
- name: cargo clippy
run: ./script/clippy
# - name: cargo clippy
# run: ./script/clippy
- name: Install cargo-machete
uses: clechasseur/rs-cargo@8435b10f6e71c2e3d4d3b7573003a8ce4bfc6386 # v2
with:
command: install
args: cargo-machete@0.7.0
# - name: Install cargo-machete
# uses: clechasseur/rs-cargo@8435b10f6e71c2e3d4d3b7573003a8ce4bfc6386 # v2
# with:
# command: install
# args: cargo-machete@0.7.0
- name: Check unused dependencies
uses: clechasseur/rs-cargo@8435b10f6e71c2e3d4d3b7573003a8ce4bfc6386 # v2
with:
command: machete
# - name: Check unused dependencies
# uses: clechasseur/rs-cargo@8435b10f6e71c2e3d4d3b7573003a8ce4bfc6386 # v2
# with:
# command: machete
- name: Check licenses
run: |
script/check-licenses
if [[ "${{ needs.job_spec.outputs.run_license }}" == "true" ]]; then
script/generate-licenses /tmp/zed_licenses_output
fi
# - name: Check licenses
# run: |
# script/check-licenses
# if [[ "${{ needs.job_spec.outputs.run_license }}" == "true" ]]; then
# script/generate-licenses /tmp/zed_licenses_output
# fi
- name: Check for new vulnerable dependencies
if: github.event_name == 'pull_request'
uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4
with:
license-check: false
# - name: Check for new vulnerable dependencies
# if: github.event_name == 'pull_request'
# uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4
# with:
# license-check: false
- name: Run tests
uses: ./.github/actions/run_tests
@@ -222,7 +222,8 @@ jobs:
needs: [job_spec]
if: |
github.repository_owner == 'zed-industries' &&
needs.job_spec.outputs.run_tests == 'true'
needs.job_spec.outputs.run_tests == 'true' &&
false
runs-on:
- buildjet-16vcpu-ubuntu-2204
steps:
@@ -371,7 +372,8 @@ jobs:
needs: [job_spec]
if: |
github.repository_owner == 'zed-industries' &&
needs.job_spec.outputs.run_tests == 'true'
needs.job_spec.outputs.run_tests == 'true' &&
false
# Use bigger runners for PRs (speed); smaller for async (cost)
runs-on: ${{ github.event_name == 'pull_request' && 'windows-2025-32' || 'windows-2025-16' }}
steps:

View File

@@ -588,6 +588,7 @@ impl GitRepository for RealGitRepository {
let working_directory = self.working_directory();
let git_binary_path = self.git_binary_path.clone();
async move {
eprintln!("branches");
let fields = [
"%(HEAD)",
"%(objectname)",
@@ -607,6 +608,7 @@ impl GitRepository for RealGitRepository {
.output()
.await?;
eprintln!("got output");
if !output.status.success() {
return Err(anyhow!(
"Failed to git git branches:\n{}",
@@ -620,11 +622,13 @@ impl GitRepository for RealGitRepository {
if branches.is_empty() {
let args = vec!["symbolic-ref", "--quiet", "--short", "HEAD"];
eprintln!("no branches");
let output = new_smol_command(&git_binary_path)
.current_dir(&working_directory)
.args(args)
.output()
.await?;
eprintln!("symbolic-ref");
// git symbolic-ref returns a non-0 exit code if HEAD points
// to something other than a branch

View File

@@ -61,7 +61,7 @@ use std::{
path::{Component, Path, PathBuf},
pin::Pin,
sync::{
atomic::{self, AtomicU32, AtomicUsize, Ordering::SeqCst},
atomic::{self, AtomicI32, AtomicUsize, Ordering::SeqCst},
Arc,
},
time::{Duration, Instant},
@@ -1525,6 +1525,7 @@ impl LocalWorktree {
fs,
fs_case_sensitive,
status_updates_tx: scan_states_tx,
scans_running: Arc::new(AtomicI32::new(0)),
executor: background,
scan_requests_rx,
path_prefixes_to_scan_rx,
@@ -1565,6 +1566,7 @@ impl LocalWorktree {
barrier,
scanning,
} => {
eprintln!("set is scanning {scanning:?}");
*this.is_scanning.0.borrow_mut() = scanning;
this.set_snapshot(snapshot, changes, cx);
drop(barrier);
@@ -4249,11 +4251,6 @@ struct PathEntry {
scan_id: usize,
}
#[derive(Debug, Default)]
struct FsScanned {
status_scans: Arc<AtomicU32>,
}
impl sum_tree::Item for PathEntry {
type Summary = PathEntrySummary;
@@ -4321,6 +4318,7 @@ struct BackgroundScanner {
fs: Arc<dyn Fs>,
fs_case_sensitive: bool,
status_updates_tx: UnboundedSender<ScanState>,
scans_running: Arc<AtomicI32>,
executor: BackgroundExecutor,
scan_requests_rx: channel::Receiver<ScanRequest>,
path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
@@ -4339,6 +4337,17 @@ enum BackgroundScannerPhase {
}
impl BackgroundScanner {
fn inc_scans_running(&self) {
let old = self.scans_running.fetch_add(1, atomic::Ordering::SeqCst);
log::trace!("inc scans running old={} new={}", old, old + 1);
}
fn dec_scans_running(&self, by: i32) {
let old = self.scans_running.fetch_sub(by, atomic::Ordering::SeqCst);
assert!(old >= by);
log::trace!("dec scans running old={} new={}", old, old - by);
}
async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>) {
// If the worktree root does not contain a git repository, then find
// the git repository in an ancestor directory. Find any gitignore files
@@ -4428,13 +4437,14 @@ impl BackgroundScanner {
// Perform an initial scan of the directory.
drop(scan_job_tx);
let scans_running = self.scan_dirs(true, scan_job_rx).await;
self.scan_dirs(true, scan_job_rx).await;
{
let mut state = self.state.lock();
state.snapshot.completed_scan_id = state.snapshot.scan_id;
}
let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
let scanning = self.scans_running.load(atomic::Ordering::SeqCst) > 0;
log::trace!("new");
self.send_status_update(scanning, SmallVec::new());
// Process any any FS events that occurred while performing the initial scan.
@@ -4461,7 +4471,7 @@ impl BackgroundScanner {
// these before handling changes reported by the filesystem.
request = self.next_scan_request().fuse() => {
let Ok(request) = request else { break };
let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
let scanning = self.scans_running.load(atomic::Ordering::SeqCst) > 0;
if !self.process_scan_request(request, scanning).await {
return;
}
@@ -4484,7 +4494,8 @@ impl BackgroundScanner {
self.process_events(vec![abs_path]).await;
}
}
let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
let scanning = self.scans_running.load(atomic::Ordering::SeqCst) > 0;
log::trace!("path prefix request");
self.send_status_update(scanning, request.done);
}
@@ -4543,6 +4554,7 @@ impl BackgroundScanner {
)
.await;
log::trace!("process scan request");
self.send_status_update(scanning, request.done)
}
@@ -4678,7 +4690,7 @@ impl BackgroundScanner {
.await;
self.update_ignore_statuses(scan_job_tx).await;
let scans_running = self.scan_dirs(false, scan_job_rx).await;
self.scan_dirs(false, scan_job_rx).await;
let status_update = if !dot_git_abs_paths.is_empty() {
Some(self.update_git_repositories(dot_git_abs_paths))
@@ -4689,6 +4701,7 @@ impl BackgroundScanner {
let phase = self.phase;
let status_update_tx = self.status_updates_tx.clone();
let state = self.state.clone();
let scans_running = self.scans_running.clone();
self.executor
.spawn(async move {
if let Some(status_update) = status_update {
@@ -4704,7 +4717,8 @@ impl BackgroundScanner {
#[cfg(test)]
state.snapshot.check_git_invariants();
}
let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
let scanning = scans_running.load(atomic::Ordering::SeqCst) > 0;
log::trace!("process events");
send_status_update_inner(phase, state, status_update_tx, scanning, SmallVec::new());
})
.detach();
@@ -4729,9 +4743,8 @@ impl BackgroundScanner {
}
drop(scan_job_tx);
}
let scans_running = Arc::new(AtomicU32::new(0));
while let Ok(job) = scan_job_rx.recv().await {
self.scan_dir(&scans_running, &job).await.log_err();
self.scan_dir(&job).await.log_err();
}
!mem::take(&mut self.state.lock().paths_to_scan).is_empty()
@@ -4741,16 +4754,17 @@ impl BackgroundScanner {
&self,
enable_progress_updates: bool,
scan_jobs_rx: channel::Receiver<ScanJob>,
) -> FsScanned {
) {
if self
.status_updates_tx
.unbounded_send(ScanState::Started)
.is_err()
{
return FsScanned::default();
return;
}
let scans_running = Arc::new(AtomicU32::new(1));
log::trace!("start fs scan scans_running += 1");
self.inc_scans_running();
let progress_update_count = AtomicUsize::new(0);
self.executor
.scoped(|scope| {
@@ -4783,6 +4797,7 @@ impl BackgroundScanner {
) {
Ok(_) => {
last_progress_update_count += 1;
log::trace!("periodic");
self.send_status_update(true, SmallVec::new());
}
Err(count) => {
@@ -4795,7 +4810,7 @@ impl BackgroundScanner {
// Recursively load directories from the file system.
job = scan_jobs_rx.recv().fuse() => {
let Ok(job) = job else { break };
if let Err(err) = self.scan_dir(&scans_running, &job).await {
if let Err(err) = self.scan_dir(&job).await {
if job.path.as_ref() != Path::new("") {
log::error!("error scanning directory {:?}: {}", job.abs_path, err);
}
@@ -4808,13 +4823,12 @@ impl BackgroundScanner {
})
.await;
scans_running.fetch_sub(1, atomic::Ordering::Release);
FsScanned {
status_scans: scans_running,
}
log::trace!("fs scanned: {:p} scans_running -= 1", self.scans_running);
self.dec_scans_running(1);
}
fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool {
eprintln!("status update outer");
send_status_update_inner(
self.phase,
self.state.clone(),
@@ -4824,7 +4838,7 @@ impl BackgroundScanner {
)
}
async fn scan_dir(&self, scans_running: &Arc<AtomicU32>, job: &ScanJob) -> Result<()> {
async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
let root_abs_path;
let root_char_bag;
{
@@ -4864,6 +4878,7 @@ impl BackgroundScanner {
swap_to_front(&mut child_paths, *GITIGNORE);
swap_to_front(&mut child_paths, *DOT_GIT);
//let mut git_status_update_jobs = HashMap::default();
let mut git_status_update_jobs = Vec::new();
for child_abs_path in child_paths {
let child_abs_path: Arc<Path> = child_abs_path.into();
@@ -4879,9 +4894,22 @@ impl BackgroundScanner {
self.watcher.as_ref(),
);
if let Some(local_repo) = repo {
scans_running.fetch_add(1, atomic::Ordering::Release);
git_status_update_jobs
.push(self.schedule_git_statuses_update(&mut state, local_repo));
let _path_key = local_repo.work_directory.path_key();
log::trace!(
"schedule {:p} scans_running += 1",
Arc::as_ptr(&self.scans_running)
);
self.inc_scans_running();
let (_old, rx) = self.schedule_git_statuses_update(&mut state, local_repo);
//if old.is_some() {
// log::trace!(
// "schedule {:p} scans_running -= 1",
// Arc::as_ptr(&self.scans_running)
// );
// self.dec_scans_running(1);
//}
//git_status_update_jobs.insert(path_key, rx);
git_status_update_jobs.push(rx);
}
}
} else if child_name == *GITIGNORE {
@@ -5002,7 +5030,7 @@ impl BackgroundScanner {
let task_state = self.state.clone();
let phase = self.phase;
let status_updates_tx = self.status_updates_tx.clone();
let scans_running = scans_running.clone();
let scans_running = self.scans_running.clone();
self.executor
.spawn(async move {
if !git_status_update_jobs.is_empty() {
@@ -5010,9 +5038,19 @@ impl BackgroundScanner {
let status_updated = status_updates
.iter()
.any(|update_result| update_result.is_ok());
scans_running.fetch_sub(status_updates.len() as u32, atomic::Ordering::Release);
log::trace!(
"status updates: {scans_running:p} scans_running -= {}",
status_updates.len()
);
{
let by = status_updates.len() as i32;
let old = scans_running.fetch_sub(by, atomic::Ordering::SeqCst);
assert!(old >= by);
log::trace!("dec scans running old={} new={}", old, old - by);
};
if status_updated {
let scanning = scans_running.load(atomic::Ordering::Acquire) > 0;
let scanning = scans_running.load(atomic::Ordering::SeqCst) > 0;
log::trace!("scan dir");
send_status_update_inner(
phase,
task_state,
@@ -5470,8 +5508,10 @@ impl BackgroundScanner {
}
};
status_updates
.push(self.schedule_git_statuses_update(&mut state, local_repository));
status_updates.push(
self.schedule_git_statuses_update(&mut state, local_repository)
.1,
);
}
// Remove any git repositories whose .git entry no longer exists.
@@ -5512,108 +5552,17 @@ impl BackgroundScanner {
fn schedule_git_statuses_update(
&self,
state: &mut BackgroundScannerState,
mut local_repository: LocalRepositoryEntry,
) -> oneshot::Receiver<()> {
let repository_name = local_repository.work_directory.display_name();
let path_key = local_repository.work_directory.path_key();
local_repository: LocalRepositoryEntry,
) -> (Option<Task<()>>, oneshot::Receiver<()>) {
let job_state = self.state.clone();
let (tx, rx) = oneshot::channel();
state.repository_scans.insert(
path_key.clone(),
self.executor.spawn(async move {
update_branches(&job_state, &mut local_repository)
.await
.log_err();
log::trace!("updating git statuses for repo {repository_name}",);
let t0 = Instant::now();
let Some(statuses) = local_repository
.repo()
.status(&[git::WORK_DIRECTORY_REPO_PATH.clone()])
.log_err()
else {
return;
};
log::trace!(
"computed git statuses for repo {repository_name} in {:?}",
t0.elapsed()
);
let t0 = Instant::now();
let mut changed_paths = Vec::new();
let snapshot = job_state.lock().snapshot.snapshot.clone();
let Some(mut repository) = snapshot
.repository(path_key)
.context(
"Tried to update git statuses for a repository that isn't in the snapshot",
)
.log_err()
else {
return;
};
let merge_head_shas = local_repository.repo().merge_head_shas();
if merge_head_shas != local_repository.current_merge_head_shas {
mem::take(&mut repository.current_merge_conflicts);
}
let mut new_entries_by_path = SumTree::new(&());
for (repo_path, status) in statuses.entries.iter() {
let project_path = repository.work_directory.try_unrelativize(repo_path);
new_entries_by_path.insert_or_replace(
StatusEntry {
repo_path: repo_path.clone(),
status: *status,
},
&(),
);
if status.is_conflicted() {
repository.current_merge_conflicts.insert(repo_path.clone());
}
if let Some(path) = project_path {
changed_paths.push(path);
}
}
repository.statuses_by_path = new_entries_by_path;
let mut state = job_state.lock();
state
.snapshot
.repositories
.insert_or_replace(repository, &());
state.snapshot.git_repositories.update(
&local_repository.work_directory_id,
|entry| {
entry.current_merge_head_shas = merge_head_shas;
entry.merge_message = std::fs::read_to_string(
local_repository.dot_git_dir_abs_path.join("MERGE_MSG"),
)
.ok()
.and_then(|merge_msg| Some(merge_msg.lines().next()?.to_owned()));
entry.status_scan_id += 1;
},
);
util::extend_sorted(
&mut state.changed_paths,
changed_paths,
usize::MAX,
Ord::cmp,
);
log::trace!(
"applied git status updates for repo {repository_name} in {:?}",
t0.elapsed(),
);
tx.send(()).ok();
}),
let old = state.repository_scans.insert(
local_repository.work_directory.path_key(),
self.executor
.spawn(do_git_status_update(job_state, local_repository, tx)),
);
rx
(old, rx)
}
async fn progress_timer(&self, running: bool) {
@@ -5650,6 +5599,7 @@ fn send_status_update_inner(
scanning: bool,
barrier: SmallVec<[barrier::Sender; 1]>,
) -> bool {
log::trace!("status update inner {scanning:?}");
let mut state = state.lock();
if state.changed_paths.is_empty() && scanning {
return true;
@@ -5675,6 +5625,7 @@ async fn update_branches(
repository: &mut LocalRepositoryEntry,
) -> Result<()> {
let branches = repository.repo().branches().await?;
log::trace!("retrieved branches");
let snapshot = state.lock().snapshot.snapshot.clone();
let mut repository = snapshot
.repository(repository.work_directory.path_key())
@@ -5690,6 +5641,109 @@ async fn update_branches(
Ok(())
}
struct NoisyDrop;
impl Drop for NoisyDrop {
fn drop(&mut self) {
log::trace!("dropped a noisydrop");
}
}
async fn do_git_status_update(
job_state: Arc<Mutex<BackgroundScannerState>>,
mut local_repository: LocalRepositoryEntry,
tx: oneshot::Sender<()>,
) {
let _noisy = NoisyDrop;
let repository_name = local_repository.work_directory.display_name();
log::trace!("updating git branches for repo {repository_name}");
update_branches(&job_state, &mut local_repository)
.await
.log_err();
let t0 = Instant::now();
log::trace!("updating git statuses for repo {repository_name}");
let Some(statuses) = local_repository
.repo()
.status(&[git::WORK_DIRECTORY_REPO_PATH.clone()])
.log_err()
else {
return;
};
log::trace!(
"computed git statuses for repo {repository_name} in {:?}",
t0.elapsed()
);
let t0 = Instant::now();
let mut changed_paths = Vec::new();
let snapshot = job_state.lock().snapshot.snapshot.clone();
let Some(mut repository) = snapshot
.repository(local_repository.work_directory.path_key())
.context("Tried to update git statuses for a repository that isn't in the snapshot")
.log_err()
else {
return;
};
let merge_head_shas = local_repository.repo().merge_head_shas();
if merge_head_shas != local_repository.current_merge_head_shas {
mem::take(&mut repository.current_merge_conflicts);
}
let mut new_entries_by_path = SumTree::new(&());
for (repo_path, status) in statuses.entries.iter() {
let project_path = repository.work_directory.try_unrelativize(repo_path);
new_entries_by_path.insert_or_replace(
StatusEntry {
repo_path: repo_path.clone(),
status: *status,
},
&(),
);
if status.is_conflicted() {
repository.current_merge_conflicts.insert(repo_path.clone());
}
if let Some(path) = project_path {
changed_paths.push(path);
}
}
repository.statuses_by_path = new_entries_by_path;
let mut state = job_state.lock();
state
.snapshot
.repositories
.insert_or_replace(repository, &());
state
.snapshot
.git_repositories
.update(&local_repository.work_directory_id, |entry| {
entry.current_merge_head_shas = merge_head_shas;
entry.merge_message =
std::fs::read_to_string(local_repository.dot_git_dir_abs_path.join("MERGE_MSG"))
.ok()
.and_then(|merge_msg| Some(merge_msg.lines().next()?.to_owned()));
entry.status_scan_id += 1;
});
util::extend_sorted(
&mut state.changed_paths,
changed_paths,
usize::MAX,
Ord::cmp,
);
log::trace!(
"applied git status updates for repo {repository_name} in {:?}",
t0.elapsed(),
);
tx.send(()).ok();
}
fn build_diff(
phase: BackgroundScannerPhase,
old_snapshot: &Snapshot,

View File

@@ -845,9 +845,7 @@ async fn test_update_gitignore(cx: &mut TestAppContext) {
});
}
// TODO: Fix flaky test.
// #[gpui::test]
#[allow(unused)]
#[gpui::test]
async fn test_write_file(cx: &mut TestAppContext) {
init_test(cx);
cx.executor().allow_parking();
@@ -2432,9 +2430,7 @@ async fn test_git_repository_for_path(cx: &mut TestAppContext) {
// you can't rename a directory which some program has already open. This is a
// limitation of the Windows. See:
// https://stackoverflow.com/questions/41365318/access-is-denied-when-renaming-folder
// TODO: Fix flaky test.
// #[gpui::test]
#[allow(unused)]
#[gpui::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn test_file_status(cx: &mut TestAppContext) {
init_test(cx);
@@ -2627,9 +2623,7 @@ async fn test_file_status(cx: &mut TestAppContext) {
});
}
// TODO: Fix flaky test.
// #[gpui::test]
#[allow(unused)]
#[gpui::test]
async fn test_git_repository_status(cx: &mut TestAppContext) {
init_test(cx);
cx.executor().allow_parking();
@@ -2743,9 +2737,7 @@ async fn test_git_repository_status(cx: &mut TestAppContext) {
});
}
// TODO: Fix flaky test.
// #[gpui::test]
#[allow(unused)]
#[gpui::test]
async fn test_git_status_postprocessing(cx: &mut TestAppContext) {
init_test(cx);
cx.executor().allow_parking();
@@ -3541,8 +3533,6 @@ fn git_cherry_pick(commit: &git2::Commit<'_>, repo: &git2::Repository) {
repo.cherrypick(commit, None).expect("Failed to cherrypick");
}
// TODO: Remove allow(unused) once flaky tests are reinstated
#[allow(unused)]
#[track_caller]
fn git_stash(repo: &mut git2::Repository) {
use git2::Signature;
@@ -3552,8 +3542,6 @@ fn git_stash(repo: &mut git2::Repository) {
.expect("Failed to stash");
}
// TODO: Remove allow(unused) once flaky tests are reinstated
#[allow(unused)]
#[track_caller]
fn git_reset(offset: usize, repo: &git2::Repository) {
let head = repo.head().expect("Couldn't get repo head");