remove local-vfs

This commit is contained in:
Aleksey Kladov 2019-02-18 16:43:30 +03:00
parent 1bdd935e91
commit a6897a837c
6 changed files with 4 additions and 934 deletions

8
Cargo.lock generated
View file

@ -928,7 +928,7 @@ dependencies = [
"ra_hir 0.1.0",
"ra_project_model 0.1.0",
"ra_syntax 0.1.0",
"ra_vfs 0.1.0",
"ra_vfs 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"test_utils 0.1.0",
]
@ -1050,7 +1050,7 @@ dependencies = [
"ra_project_model 0.1.0",
"ra_syntax 0.1.0",
"ra_text_edit 0.1.0",
"ra_vfs 0.1.0",
"ra_vfs 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"relative-path 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1121,15 +1121,14 @@ dependencies = [
[[package]]
name = "ra_vfs"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"flexi_logger 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"notify 4.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"relative-path 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
"walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1986,6 +1985,7 @@ dependencies = [
"checksum proptest 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea66c78d75f2c6e9f304269eaef90899798daecc69f1a625d5a3dd793ff3522"
"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0"
"checksum quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"
"checksum ra_vfs 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d191ef0680eea419a302e0d09a00c00dfed1ec320406813bc100f93d1abe28dc"
"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"

View file

@ -1,18 +0,0 @@
[package]
edition = "2018"
name = "ra_vfs"
version = "0.1.0"
authors = ["rust-analyzer developers"]
[dependencies]
walkdir = "2.2.7"
relative-path = "0.4.0"
rustc-hash = "1.0"
crossbeam-channel = "0.3.5"
log = "0.4.6"
notify = "4.0.9"
parking_lot = "0.7.0"
[dev-dependencies]
tempfile = "3"
flexi_logger = "0.10.0"

View file

@ -1,286 +0,0 @@
use std::{
fs,
path::{Path, PathBuf},
sync::{mpsc, Arc},
time::Duration,
thread,
};
use crossbeam_channel::{Sender, Receiver, unbounded, RecvError, select};
use relative_path::RelativePathBuf;
use walkdir::WalkDir;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher as _Watcher};
use crate::{Roots, VfsRoot, VfsTask};
pub(crate) enum Task {
AddRoot { root: VfsRoot },
}
/// `TaskResult` transfers files read on the IO thread to the VFS on the main
/// thread.
#[derive(Debug)]
pub(crate) enum TaskResult {
/// Emitted when we've recursively scanned a source root during the initial
/// load.
BulkLoadRoot { root: VfsRoot, files: Vec<(RelativePathBuf, String)> },
/// Emitted when we've noticed that a single file has changed.
///
/// Note that this by design does not distinguish between
/// create/delete/write events, and instead specifies the *current* state of
/// the file. The idea is to guarantee that in the quiescent state the sum
/// of all results equals to the current state of the file system, while
/// allowing to skip intermediate events in non-quiescent states.
SingleFile { root: VfsRoot, path: RelativePathBuf, text: Option<String> },
}
/// The kind of raw notification we've received from the notify library.
///
/// Note that these are not necessary 100% precise (for example we might receive
/// `Create` instead of `Write`, see #734), but we try do distinguish `Create`s
/// to implement recursive watching of directories.
#[derive(Debug)]
enum ChangeKind {
Create,
Write,
Remove,
}
const WATCHER_DELAY: Duration = Duration::from_millis(250);
// Like thread::JoinHandle, but joins the thread on drop.
//
// This is useful because it guarantees the absence of run-away threads, even if
// code panics. This is important, because we might see panics in the test and
// we might be used in an IDE context, where a failed component is just
// restarted.
//
// Because all threads are joined, care must be taken to avoid deadlocks. That
// typically means ensuring that channels are dropped before the threads.
struct ScopedThread(Option<thread::JoinHandle<()>>);
impl ScopedThread {
fn spawn(name: String, f: impl FnOnce() + Send + 'static) -> ScopedThread {
let handle = thread::Builder::new().name(name).spawn(f).unwrap();
ScopedThread(Some(handle))
}
}
impl Drop for ScopedThread {
fn drop(&mut self) {
let res = self.0.take().unwrap().join();
if !thread::panicking() {
res.unwrap();
}
}
}
pub(crate) struct Worker {
// XXX: field order is significant here.
//
// In Rust, fields are dropped in the declaration order, and we rely on this
// here. We must close sender first, so that the `thread` (who holds the
// opposite side of the channel) noticed shutdown. Then, we must join the
// thread, but we must keep receiver alive so that the thread does not
// panic.
pub(crate) sender: Sender<Task>,
_thread: ScopedThread,
pub(crate) receiver: Receiver<VfsTask>,
}
pub(crate) fn start(roots: Arc<Roots>) -> Worker {
// This is a pretty elaborate setup of threads & channels! It is
// explained by the following concerns:
// * we need to burn a thread translating from notify's mpsc to
// crossbeam_channel.
// * we want to read all files from a single thread, to guarantee that
// we always get fresher versions and never go back in time.
// * we want to tear down everything neatly during shutdown.
let _thread;
// This are the channels we use to communicate with outside world.
// If `input_receiver` is closed we need to tear ourselves down.
// `output_sender` should not be closed unless the parent died.
let (input_sender, input_receiver) = unbounded();
let (output_sender, output_receiver) = unbounded();
_thread = ScopedThread::spawn("vfs".to_string(), move || {
// Make sure that the destruction order is
//
// * notify_sender
// * _thread
// * watcher_sender
//
// this is required to avoid deadlocks.
// These are the corresponding crossbeam channels
let (watcher_sender, watcher_receiver) = unbounded();
let _notify_thread;
{
// These are `std` channels notify will send events to
let (notify_sender, notify_receiver) = mpsc::channel();
let mut watcher = notify::watcher(notify_sender, WATCHER_DELAY)
.map_err(|e| log::error!("failed to spawn notify {}", e))
.ok();
// Start a silly thread to transform between two channels
_notify_thread = ScopedThread::spawn("notify-convertor".to_string(), move || {
notify_receiver
.into_iter()
.for_each(|event| convert_notify_event(event, &watcher_sender))
});
// Process requests from the called or notifications from
// watcher until the caller says stop.
loop {
select! {
// Received request from the caller. If this channel is
// closed, we should shutdown everything.
recv(input_receiver) -> t => match t {
Err(RecvError) => {
drop(input_receiver);
break
},
Ok(Task::AddRoot { root }) => {
watch_root(watcher.as_mut(), &output_sender, &*roots, root);
}
},
// Watcher send us changes. If **this** channel is
// closed, the watcher has died, which indicates a bug
// -- escalate!
recv(watcher_receiver) -> event => match event {
Err(RecvError) => panic!("watcher is dead"),
Ok((path, change)) => {
handle_change(watcher.as_mut(), &output_sender, &*roots, path, change);
}
},
}
}
}
// Drain pending events: we are not interested in them anyways!
watcher_receiver.into_iter().for_each(|_| ());
});
Worker { sender: input_sender, _thread, receiver: output_receiver }
}
fn watch_root(
watcher: Option<&mut RecommendedWatcher>,
sender: &Sender<VfsTask>,
roots: &Roots,
root: VfsRoot,
) {
let root_path = roots.path(root);
log::debug!("loading {} ...", root_path.display());
let files = watch_recursive(watcher, root_path, roots, root)
.into_iter()
.filter_map(|path| {
let abs_path = path.to_path(&root_path);
let text = read_to_string(&abs_path)?;
Some((path, text))
})
.collect();
let res = TaskResult::BulkLoadRoot { root, files };
sender.send(VfsTask(res)).unwrap();
log::debug!("... loaded {}", root_path.display());
}
fn convert_notify_event(event: DebouncedEvent, sender: &Sender<(PathBuf, ChangeKind)>) {
// forward relevant events only
match event {
DebouncedEvent::NoticeWrite(_)
| DebouncedEvent::NoticeRemove(_)
| DebouncedEvent::Chmod(_) => {
// ignore
}
DebouncedEvent::Rescan => {
// TODO: rescan all roots
}
DebouncedEvent::Create(path) => {
sender.send((path, ChangeKind::Create)).unwrap();
}
DebouncedEvent::Write(path) => {
sender.send((path, ChangeKind::Write)).unwrap();
}
DebouncedEvent::Remove(path) => {
sender.send((path, ChangeKind::Remove)).unwrap();
}
DebouncedEvent::Rename(src, dst) => {
sender.send((src, ChangeKind::Remove)).unwrap();
sender.send((dst, ChangeKind::Create)).unwrap();
}
DebouncedEvent::Error(err, path) => {
// TODO: should we reload the file contents?
log::warn!("watcher error \"{}\", {:?}", err, path);
}
}
}
fn handle_change(
watcher: Option<&mut RecommendedWatcher>,
sender: &Sender<VfsTask>,
roots: &Roots,
path: PathBuf,
kind: ChangeKind,
) {
let (root, rel_path) = match roots.find(&path) {
None => return,
Some(it) => it,
};
match kind {
ChangeKind::Create => {
let mut paths = Vec::new();
if path.is_dir() {
paths.extend(watch_recursive(watcher, &path, roots, root));
} else {
paths.push(rel_path);
}
paths
.into_iter()
.try_for_each(|rel_path| {
let abs_path = rel_path.to_path(&roots.path(root));
let text = read_to_string(&abs_path);
let res = TaskResult::SingleFile { root, path: rel_path, text };
sender.send(VfsTask(res))
})
.unwrap()
}
ChangeKind::Write | ChangeKind::Remove => {
let text = read_to_string(&path);
let res = TaskResult::SingleFile { root, path: rel_path, text };
sender.send(VfsTask(res)).unwrap();
}
}
}
fn watch_recursive(
mut watcher: Option<&mut RecommendedWatcher>,
dir: &Path,
roots: &Roots,
root: VfsRoot,
) -> Vec<RelativePathBuf> {
let mut files = Vec::new();
for entry in WalkDir::new(dir)
.into_iter()
.filter_entry(|it| roots.contains(root, it.path()).is_some())
.filter_map(|it| it.map_err(|e| log::warn!("watcher error: {}", e)).ok())
{
if entry.file_type().is_dir() {
if let Some(watcher) = &mut watcher {
watch_one(watcher, entry.path());
}
} else {
let path = roots.contains(root, entry.path()).unwrap();
files.push(path.to_owned());
}
}
files
}
fn watch_one(watcher: &mut RecommendedWatcher, dir: &Path) {
match watcher.watch(dir, RecursiveMode::NonRecursive) {
Ok(()) => log::debug!("watching \"{}\"", dir.display()),
Err(e) => log::warn!("could not watch \"{}\": {}", dir.display(), e),
}
}
fn read_to_string(path: &Path) -> Option<String> {
fs::read_to_string(&path).map_err(|e| log::warn!("failed to read file {}", e)).ok()
}

View file

@ -1,296 +0,0 @@
//! VFS stands for Virtual File System.
//!
//! When doing analysis, we don't want to do any IO, we want to keep all source
//! code in memory. However, the actual source code is stored on disk, so you
//! need to get it into the memory in the first place somehow. VFS is the
//! component which does this.
//!
//! It is also responsible for watching the disk for changes, and for merging
//! editor state (modified, unsaved files) with disk state.
//!
//! TODO: Some LSP clients support watching the disk, so this crate should to
//! support custom watcher events (related to
//! <https://github.com/rust-analyzer/rust-analyzer/issues/131>)
//!
//! VFS is based on a concept of roots: a set of directories on the file system
//! which are watched for changes. Typically, there will be a root for each
//! Cargo package.
mod roots;
mod io;
use std::{
fmt, fs, mem,
path::{Path, PathBuf},
sync::Arc,
};
use crossbeam_channel::Receiver;
use relative_path::{RelativePath, RelativePathBuf};
use rustc_hash::{FxHashMap, FxHashSet};
use crate::{
io::{TaskResult, Worker},
roots::Roots,
};
pub use crate::roots::VfsRoot;
/// Opaque wrapper around file-system event.
///
/// Calling code is expected to just pass `VfsTask` to `handle_task` method. It
/// is exposed as a public API so that the caller can plug vfs events into the
/// main event loop and be notified when changes happen.
pub struct VfsTask(TaskResult);
impl fmt::Debug for VfsTask {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("VfsTask { ... }")
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct VfsFile(pub u32);
struct VfsFileData {
root: VfsRoot,
path: RelativePathBuf,
is_overlayed: bool,
text: Arc<String>,
}
pub struct Vfs {
roots: Arc<Roots>,
files: Vec<VfsFileData>,
root2files: FxHashMap<VfsRoot, FxHashSet<VfsFile>>,
pending_changes: Vec<VfsChange>,
worker: Worker,
}
impl fmt::Debug for Vfs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Vfs")
.field("n_roots", &self.roots.len())
.field("n_files", &self.files.len())
.field("n_pending_changes", &self.pending_changes.len())
.finish()
}
}
#[derive(Debug, Clone)]
pub enum VfsChange {
AddRoot { root: VfsRoot, files: Vec<(VfsFile, RelativePathBuf, Arc<String>)> },
AddFile { root: VfsRoot, file: VfsFile, path: RelativePathBuf, text: Arc<String> },
RemoveFile { root: VfsRoot, file: VfsFile, path: RelativePathBuf },
ChangeFile { file: VfsFile, text: Arc<String> },
}
impl Vfs {
pub fn new(roots: Vec<PathBuf>) -> (Vfs, Vec<VfsRoot>) {
let roots = Arc::new(Roots::new(roots));
let worker = io::start(Arc::clone(&roots));
let mut root2files = FxHashMap::default();
for root in roots.iter() {
root2files.insert(root, Default::default());
worker.sender.send(io::Task::AddRoot { root }).unwrap();
}
let res = Vfs { roots, files: Vec::new(), root2files, worker, pending_changes: Vec::new() };
let vfs_roots = res.roots.iter().collect();
(res, vfs_roots)
}
pub fn root2path(&self, root: VfsRoot) -> PathBuf {
self.roots.path(root).to_path_buf()
}
pub fn path2file(&self, path: &Path) -> Option<VfsFile> {
if let Some((_root, _path, Some(file))) = self.find_root(path) {
return Some(file);
}
None
}
pub fn file2path(&self, file: VfsFile) -> PathBuf {
let rel_path = &self.file(file).path;
let root_path = &self.roots.path(self.file(file).root);
rel_path.to_path(root_path)
}
pub fn n_roots(&self) -> usize {
self.roots.len()
}
pub fn load(&mut self, path: &Path) -> Option<VfsFile> {
if let Some((root, rel_path, file)) = self.find_root(path) {
return if let Some(file) = file {
Some(file)
} else {
let text = fs::read_to_string(path).unwrap_or_default();
let text = Arc::new(text);
let file = self.raw_add_file(root, rel_path.clone(), Arc::clone(&text), false);
let change = VfsChange::AddFile { file, text, root, path: rel_path };
self.pending_changes.push(change);
Some(file)
};
}
None
}
pub fn add_file_overlay(&mut self, path: &Path, text: String) -> Option<VfsFile> {
let (root, rel_path, file) = self.find_root(path)?;
if let Some(file) = file {
self.change_file_event(file, text, true);
Some(file)
} else {
self.add_file_event(root, rel_path, text, true)
}
}
pub fn change_file_overlay(&mut self, path: &Path, new_text: String) {
if let Some((_root, _path, file)) = self.find_root(path) {
let file = file.expect("can't change a file which wasn't added");
self.change_file_event(file, new_text, true);
}
}
pub fn remove_file_overlay(&mut self, path: &Path) -> Option<VfsFile> {
let (root, rel_path, file) = self.find_root(path)?;
let file = file.expect("can't remove a file which wasn't added");
let full_path = rel_path.to_path(&self.roots.path(root));
if let Ok(text) = fs::read_to_string(&full_path) {
self.change_file_event(file, text, false);
} else {
self.remove_file_event(root, rel_path, file);
}
Some(file)
}
pub fn commit_changes(&mut self) -> Vec<VfsChange> {
mem::replace(&mut self.pending_changes, Vec::new())
}
pub fn task_receiver(&self) -> &Receiver<VfsTask> {
&self.worker.receiver
}
pub fn handle_task(&mut self, task: VfsTask) {
match task.0 {
TaskResult::BulkLoadRoot { root, files } => {
let mut cur_files = Vec::new();
// While we were scanning the root in the background, a file might have
// been open in the editor, so we need to account for that.
let existing = self.root2files[&root]
.iter()
.map(|&file| (self.file(file).path.clone(), file))
.collect::<FxHashMap<_, _>>();
for (path, text) in files {
if let Some(&file) = existing.get(&path) {
let text = Arc::clone(&self.file(file).text);
cur_files.push((file, path, text));
continue;
}
let text = Arc::new(text);
let file = self.raw_add_file(root, path.clone(), Arc::clone(&text), false);
cur_files.push((file, path, text));
}
let change = VfsChange::AddRoot { root, files: cur_files };
self.pending_changes.push(change);
}
TaskResult::SingleFile { root, path, text } => {
let existing_file = self.find_file(root, &path);
if existing_file.map(|file| self.file(file).is_overlayed) == Some(true) {
return;
}
match (existing_file, text) {
(Some(file), None) => {
self.remove_file_event(root, path, file);
}
(None, Some(text)) => {
self.add_file_event(root, path, text, false);
}
(Some(file), Some(text)) => {
self.change_file_event(file, text, false);
}
(None, None) => (),
}
}
}
}
// *_event calls change the state of VFS and push a change onto pending
// changes array.
fn add_file_event(
&mut self,
root: VfsRoot,
path: RelativePathBuf,
text: String,
is_overlay: bool,
) -> Option<VfsFile> {
let text = Arc::new(text);
let file = self.raw_add_file(root, path.clone(), text.clone(), is_overlay);
self.pending_changes.push(VfsChange::AddFile { file, root, path, text });
Some(file)
}
fn change_file_event(&mut self, file: VfsFile, text: String, is_overlay: bool) {
let text = Arc::new(text);
self.raw_change_file(file, text.clone(), is_overlay);
self.pending_changes.push(VfsChange::ChangeFile { file, text });
}
fn remove_file_event(&mut self, root: VfsRoot, path: RelativePathBuf, file: VfsFile) {
self.raw_remove_file(file);
self.pending_changes.push(VfsChange::RemoveFile { root, path, file });
}
// raw_* calls change the state of VFS, but **do not** emit events.
fn raw_add_file(
&mut self,
root: VfsRoot,
path: RelativePathBuf,
text: Arc<String>,
is_overlayed: bool,
) -> VfsFile {
let data = VfsFileData { root, path, text, is_overlayed };
let file = VfsFile(self.files.len() as u32);
self.files.push(data);
self.root2files.get_mut(&root).unwrap().insert(file);
file
}
fn raw_change_file(&mut self, file: VfsFile, new_text: Arc<String>, is_overlayed: bool) {
let mut file_data = &mut self.file_mut(file);
file_data.text = new_text;
file_data.is_overlayed = is_overlayed;
}
fn raw_remove_file(&mut self, file: VfsFile) {
// FIXME: use arena with removal
self.file_mut(file).text = Default::default();
self.file_mut(file).path = Default::default();
let root = self.file(file).root;
let removed = self.root2files.get_mut(&root).unwrap().remove(&file);
assert!(removed);
}
fn find_root(&self, path: &Path) -> Option<(VfsRoot, RelativePathBuf, Option<VfsFile>)> {
let (root, path) = self.roots.find(&path)?;
let file = self.find_file(root, &path);
Some((root, path, file))
}
fn find_file(&self, root: VfsRoot, path: &RelativePath) -> Option<VfsFile> {
self.root2files[&root].iter().map(|&it| it).find(|&file| self.file(file).path == path)
}
fn file(&self, file: VfsFile) -> &VfsFileData {
&self.files[file.0 as usize]
}
fn file_mut(&mut self, file: VfsFile) -> &mut VfsFileData {
&mut self.files[file.0 as usize]
}
}

View file

@ -1,108 +0,0 @@
use std::{
iter,
path::{Path, PathBuf},
};
use relative_path::{ RelativePath, RelativePathBuf};
/// VfsRoot identifies a watched directory on the file system.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct VfsRoot(pub u32);
/// Describes the contents of a single source root.
///
/// `RootConfig` can be thought of as a glob pattern like `src/**.rs` which
/// specifies the source root or as a function which takes a `PathBuf` and
/// returns `true` iff path belongs to the source root
struct RootData {
path: PathBuf,
// result of `root.canonicalize()` if that differs from `root`; `None` otherwise.
canonical_path: Option<PathBuf>,
excluded_dirs: Vec<RelativePathBuf>,
}
pub(crate) struct Roots {
roots: Vec<RootData>,
}
impl Roots {
pub(crate) fn new(mut paths: Vec<PathBuf>) -> Roots {
let mut roots = Vec::new();
// A hack to make nesting work.
paths.sort_by_key(|it| std::cmp::Reverse(it.as_os_str().len()));
paths.dedup();
for (i, path) in paths.iter().enumerate() {
let nested_roots =
paths[..i].iter().filter_map(|it| rel_path(path, it)).collect::<Vec<_>>();
roots.push(RootData::new(path.clone(), nested_roots));
}
Roots { roots }
}
pub(crate) fn find(&self, path: &Path) -> Option<(VfsRoot, RelativePathBuf)> {
self.iter().find_map(|root| {
let rel_path = self.contains(root, path)?;
Some((root, rel_path))
})
}
pub(crate) fn len(&self) -> usize {
self.roots.len()
}
pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = VfsRoot> + 'a {
(0..self.roots.len()).into_iter().map(|idx| VfsRoot(idx as u32))
}
pub(crate) fn path(&self, root: VfsRoot) -> &Path {
self.root(root).path.as_path()
}
/// Checks if root contains a path and returns a root-relative path.
pub(crate) fn contains(&self, root: VfsRoot, path: &Path) -> Option<RelativePathBuf> {
let data = self.root(root);
iter::once(&data.path)
.chain(data.canonical_path.as_ref().into_iter())
.find_map(|base| rel_path(base, path))
.filter(|path| !data.excluded_dirs.contains(path))
.filter(|path| !data.is_excluded(path))
}
fn root(&self, root: VfsRoot) -> &RootData {
&self.roots[root.0 as usize]
}
}
impl RootData {
fn new(path: PathBuf, excluded_dirs: Vec<RelativePathBuf>) -> RootData {
let mut canonical_path = path.canonicalize().ok();
if Some(&path) == canonical_path.as_ref() {
canonical_path = None;
}
RootData { path, canonical_path, excluded_dirs }
}
fn is_excluded(&self, path: &RelativePath) -> bool {
if self.excluded_dirs.iter().any(|it| it == path) {
return true;
}
// Ignore some common directories.
//
// FIXME: don't hard-code, specify at source-root creation time using
// gitignore
for (i, c) in path.components().enumerate() {
if let relative_path::Component::Normal(c) = c {
if (i == 0 && c == "target") || c == ".git" || c == "node_modules" {
return true;
}
}
}
match path.extension() {
None | Some("rs") => false,
_ => true,
}
}
}
fn rel_path(base: &Path, path: &Path) -> Option<RelativePathBuf> {
let path = path.strip_prefix(base).ok()?;
let path = RelativePathBuf::from_path(path).unwrap();
Some(path)
}

View file

@ -1,222 +0,0 @@
use std::{collections::HashSet, fs, time::Duration};
// use flexi_logger::Logger;
use crossbeam_channel::RecvTimeoutError;
use ra_vfs::{Vfs, VfsChange};
use tempfile::tempdir;
/// Processes exactly `num_tasks` events waiting in the `vfs` message queue.
///
/// Panics if there are not exactly that many tasks enqueued for processing.
fn process_tasks(vfs: &mut Vfs, num_tasks: u32) {
process_tasks_in_range(vfs, num_tasks, num_tasks);
}
/// Processes up to `max_count` events waiting in the `vfs` message queue.
///
/// Panics if it cannot process at least `min_count` events.
/// Panics if more than `max_count` events are enqueued for processing.
fn process_tasks_in_range(vfs: &mut Vfs, min_count: u32, max_count: u32) {
for i in 0..max_count {
let task = match vfs.task_receiver().recv_timeout(Duration::from_secs(3)) {
Err(RecvTimeoutError::Timeout) if i >= min_count => return,
otherwise => otherwise.unwrap(),
};
log::debug!("{:?}", task);
vfs.handle_task(task);
}
assert!(vfs.task_receiver().is_empty());
}
macro_rules! assert_match {
($x:expr, $pat:pat) => {
assert_match!($x, $pat, ())
};
($x:expr, $pat:pat, $assert:expr) => {
match $x {
$pat => $assert,
x => assert!(false, "Expected {}, got {:?}", stringify!($pat), x),
};
};
}
#[test]
fn test_vfs_works() -> std::io::Result<()> {
// Logger::with_str("vfs=debug,ra_vfs=debug").start().unwrap();
let files = [("a/foo.rs", "hello"), ("a/bar.rs", "world"), ("a/b/baz.rs", "nested hello")];
let dir = tempdir().unwrap();
for (path, text) in files.iter() {
let file_path = dir.path().join(path);
fs::create_dir_all(file_path.parent().unwrap()).unwrap();
fs::write(file_path, text)?
}
let a_root = dir.path().join("a");
let b_root = dir.path().join("a/b");
let (mut vfs, _) = Vfs::new(vec![a_root, b_root]);
process_tasks(&mut vfs, 2);
{
let files = vfs
.commit_changes()
.into_iter()
.flat_map(|change| {
let files = match change {
VfsChange::AddRoot { files, .. } => files,
_ => panic!("unexpected change"),
};
files.into_iter().map(|(_id, path, text)| {
let text: String = (&*text).clone();
(format!("{}", path.display()), text)
})
})
.collect::<HashSet<_>>();
let expected_files = [("foo.rs", "hello"), ("bar.rs", "world"), ("baz.rs", "nested hello")]
.iter()
.map(|(path, text)| (path.to_string(), text.to_string()))
.collect::<HashSet<_>>();
assert_eq!(files, expected_files);
}
// rust-analyzer#734: fsevents has a bunch of events still sitting around.
process_tasks_in_range(&mut vfs, 0, if cfg!(target_os = "macos") { 7 } else { 0 });
match vfs.commit_changes().as_slice() {
[] => {}
// This arises on fsevents (unless we wait 30 seconds before
// calling `Vfs::new` above). We need to churn through these
// events so that we can focus on the event that arises from
// the `fs::write` below.
[VfsChange::ChangeFile { .. }, // hello
VfsChange::ChangeFile { .. }, // world
VfsChange::AddFile { .. }, // b/baz.rs, nested hello
VfsChange::ChangeFile { .. }, // hello
VfsChange::ChangeFile { .. }, // world
VfsChange::ChangeFile { .. }, // nested hello
VfsChange::ChangeFile { .. }, // nested hello
] => {}
changes => panic!("Expected events for setting up initial files, got: {GOT:?}",
GOT=changes),
}
fs::write(&dir.path().join("a/b/baz.rs"), "quux").unwrap();
process_tasks(&mut vfs, 1);
assert_match!(
vfs.commit_changes().as_slice(),
[VfsChange::ChangeFile { text, .. }],
assert_eq!(text.as_str(), "quux")
);
vfs.add_file_overlay(&dir.path().join("a/b/baz.rs"), "m".to_string());
assert_match!(
vfs.commit_changes().as_slice(),
[VfsChange::ChangeFile { text, .. }],
assert_eq!(text.as_str(), "m")
);
// changing file on disk while overlayed doesn't generate a VfsChange
fs::write(&dir.path().join("a/b/baz.rs"), "corge").unwrap();
process_tasks(&mut vfs, 1);
assert_match!(vfs.commit_changes().as_slice(), []);
// removing overlay restores data on disk
vfs.remove_file_overlay(&dir.path().join("a/b/baz.rs"));
assert_match!(
vfs.commit_changes().as_slice(),
[VfsChange::ChangeFile { text, .. }],
assert_eq!(text.as_str(), "corge")
);
vfs.add_file_overlay(&dir.path().join("a/b/spam.rs"), "spam".to_string());
assert_match!(vfs.commit_changes().as_slice(), [VfsChange::AddFile { text, path, .. }], {
assert_eq!(text.as_str(), "spam");
assert_eq!(path, "spam.rs");
});
vfs.remove_file_overlay(&dir.path().join("a/b/spam.rs"));
assert_match!(
vfs.commit_changes().as_slice(),
[VfsChange::RemoveFile { path, .. }],
assert_eq!(path, "spam.rs")
);
fs::create_dir_all(dir.path().join("a/sub1/sub2")).unwrap();
fs::write(dir.path().join("a/sub1/sub2/new.rs"), "new hello").unwrap();
process_tasks(&mut vfs, 1);
assert_match!(vfs.commit_changes().as_slice(), [VfsChange::AddFile { text, path, .. }], {
assert_eq!(text.as_str(), "new hello");
assert_eq!(path, "sub1/sub2/new.rs");
});
fs::rename(&dir.path().join("a/sub1/sub2/new.rs"), &dir.path().join("a/sub1/sub2/new1.rs"))
.unwrap();
// rust-analyzer#734: For testing purposes, work-around
// passcod/notify#181 by processing either 1 or 2 events. (In
// particular, Mac can hand back either 1 or 2 events in a
// timing-dependent fashion.)
//
// rust-analyzer#827: Windows generates extra `Write` events when
// renaming? meaning we have extra tasks to process.
process_tasks_in_range(&mut vfs, 1, if cfg!(windows) { 4 } else { 2 });
match vfs.commit_changes().as_slice() {
[VfsChange::RemoveFile { path: removed_path, .. }, VfsChange::AddFile { text, path: added_path, .. }] =>
{
assert_eq!(removed_path, "sub1/sub2/new.rs");
assert_eq!(added_path, "sub1/sub2/new1.rs");
assert_eq!(text.as_str(), "new hello");
}
// Hopefully passcod/notify#181 will be addressed in some
// manner that will reliably emit an event mentioning
// `sub1/sub2/new.rs`. But until then, must accept that
// debouncing loses information unrecoverably.
[VfsChange::AddFile { text, path: added_path, .. }] => {
assert_eq!(added_path, "sub1/sub2/new1.rs");
assert_eq!(text.as_str(), "new hello");
}
changes => panic!(
"Expected events for rename of {OLD} to {NEW}, got: {GOT:?}",
OLD = "sub1/sub2/new.rs",
NEW = "sub1/sub2/new1.rs",
GOT = changes
),
}
fs::remove_file(&dir.path().join("a/sub1/sub2/new1.rs")).unwrap();
process_tasks(&mut vfs, 1);
assert_match!(
vfs.commit_changes().as_slice(),
[VfsChange::RemoveFile { path, .. }],
assert_eq!(path, "sub1/sub2/new1.rs")
);
{
vfs.add_file_overlay(&dir.path().join("a/memfile.rs"), "memfile".to_string());
assert_match!(
vfs.commit_changes().as_slice(),
[VfsChange::AddFile { text, .. }],
assert_eq!(text.as_str(), "memfile")
);
fs::write(&dir.path().join("a/memfile.rs"), "ignore me").unwrap();
process_tasks(&mut vfs, 1);
assert_match!(vfs.commit_changes().as_slice(), []);
}
// should be ignored
fs::create_dir_all(dir.path().join("a/target")).unwrap();
fs::write(&dir.path().join("a/target/new.rs"), "ignore me").unwrap();
assert_match!(
vfs.task_receiver().recv_timeout(Duration::from_millis(300)), // slightly more than watcher debounce delay
Err(RecvTimeoutError::Timeout)
);
Ok(())
}