Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make DepGraph::previous_work_products immutable #50524

Merged
merged 2 commits into from
May 11, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 7 additions & 19 deletions src/librustc/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ struct DepGraphData {
/// things available to us. If we find that they are not dirty, we
/// load the path to the file storing those work-products here into
/// this map. We can later look for and extract that data.
previous_work_products: RwLock<FxHashMap<WorkProductId, WorkProduct>>,
previous_work_products: FxHashMap<WorkProductId, WorkProduct>,

/// Work-products that we generate in this run.
work_products: RwLock<FxHashMap<WorkProductId, WorkProduct>>,
Expand All @@ -90,7 +90,8 @@ struct DepGraphData {

impl DepGraph {

pub fn new(prev_graph: PreviousDepGraph) -> DepGraph {
pub fn new(prev_graph: PreviousDepGraph,
prev_work_products: FxHashMap<WorkProductId, WorkProduct>) -> DepGraph {
// Pre-allocate the fingerprints array. We over-allocate a little so
// that we hopefully don't have to re-allocate during this compilation
// session.
Expand All @@ -100,7 +101,7 @@ impl DepGraph {
(prev_graph_node_count * 115) / 100);
DepGraph {
data: Some(Lrc::new(DepGraphData {
previous_work_products: RwLock::new(FxHashMap()),
previous_work_products: prev_work_products,
work_products: RwLock::new(FxHashMap()),
dep_node_debug: Lock::new(FxHashMap()),
current: Lock::new(CurrentDepGraph::new()),
Expand Down Expand Up @@ -460,19 +461,6 @@ impl DepGraph {
self.data.as_ref().unwrap().previous.node_to_index(dep_node)
}

/// Indicates that a previous work product exists for `v`. This is
/// invoked during initial start-up based on what nodes are clean
/// (and what files exist in the incr. directory).
pub fn insert_previous_work_product(&self, v: &WorkProductId, data: WorkProduct) {
debug!("insert_previous_work_product({:?}, {:?})", v, data);
self.data
.as_ref()
.unwrap()
.previous_work_products
.borrow_mut()
.insert(v.clone(), data);
}

/// Indicates that we created the given work-product in this run
/// for `v`. This record will be preserved and loaded in the next
/// run.
Expand All @@ -492,7 +480,7 @@ impl DepGraph {
self.data
.as_ref()
.and_then(|data| {
data.previous_work_products.borrow().get(v).cloned()
data.previous_work_products.get(v).cloned()
})
}

Expand All @@ -504,8 +492,8 @@ impl DepGraph {

/// Access the map of work-products created during the cached run. Only
/// used during saving of the dep-graph.
pub fn previous_work_products(&self) -> ReadGuard<FxHashMap<WorkProductId, WorkProduct>> {
self.data.as_ref().unwrap().previous_work_products.borrow()
pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
&self.data.as_ref().unwrap().previous_work_products
}

#[inline(always)]
Expand Down
19 changes: 10 additions & 9 deletions src/librustc_driver/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -980,15 +980,16 @@ where
let dep_graph = match future_dep_graph {
None => DepGraph::new_disabled(),
Some(future) => {
let prev_graph = time(sess, "blocked while dep-graph loading finishes", || {
future
.open()
.unwrap_or_else(|e| rustc_incremental::LoadResult::Error {
message: format!("could not decode incremental cache: {:?}", e),
})
.open(sess)
});
DepGraph::new(prev_graph)
let (prev_graph, prev_work_products) =
time(sess, "blocked while dep-graph loading finishes", || {
future
.open()
.unwrap_or_else(|e| rustc_incremental::LoadResult::Error {
message: format!("could not decode incremental cache: {:?}", e),
})
.open(sess)
});
DepGraph::new(prev_graph, prev_work_products)
}
};
let hir_forest = time(sess, "lowering ast -> hir", || {
Expand Down
112 changes: 57 additions & 55 deletions src/librustc_incremental/persist/load.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@

//! Code to save/load the dep-graph from files.

use rustc::dep_graph::{PreviousDepGraph, SerializedDepGraph};
use rustc_data_structures::fx::FxHashMap;
use rustc::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc::ty::maps::OnDiskCache;
Expand All @@ -32,73 +33,30 @@ pub fn dep_graph_tcx_init<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {

tcx.allocate_metadata_dep_nodes();
tcx.precompute_in_scope_traits_hashes();

if tcx.sess.incr_comp_session_dir_opt().is_none() {
// If we are only building with -Zquery-dep-graph but without an actual
// incr. comp. session directory, we exit here. Otherwise we'd fail
// when trying to load work products.
return
}

let work_products_path = work_products_path(tcx.sess);
let load_result = load_data(tcx.sess.opts.debugging_opts.incremental_info, &work_products_path);

if let LoadResult::Ok { data: (work_products_data, start_pos) } = load_result {
// Decode the list of work_products
let mut work_product_decoder = Decoder::new(&work_products_data[..], start_pos);
let work_products: Vec<SerializedWorkProduct> =
RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
let msg = format!("Error decoding `work-products` from incremental \
compilation session directory: {}", e);
tcx.sess.fatal(&msg[..])
});

for swp in work_products {
let mut all_files_exist = true;
for &(_, ref file_name) in swp.work_product.saved_files.iter() {
let path = in_incr_comp_dir_sess(tcx.sess, file_name);
if !path.exists() {
all_files_exist = false;

if tcx.sess.opts.debugging_opts.incremental_info {
eprintln!("incremental: could not find file for work \
product: {}", path.display());
}
}
}

if all_files_exist {
debug!("reconcile_work_products: all files for {:?} exist", swp);
tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product);
} else {
debug!("reconcile_work_products: some file for {:?} does not exist", swp);
delete_dirty_work_product(tcx, swp);
}
}
}
}

type WorkProductMap = FxHashMap<WorkProductId, WorkProduct>;

pub enum LoadResult<T> {
Ok { data: T },
DataOutOfDate,
Error { message: String },
}


impl LoadResult<PreviousDepGraph> {
pub fn open(self, sess: &Session) -> PreviousDepGraph {
impl LoadResult<(PreviousDepGraph, WorkProductMap)> {
pub fn open(self, sess: &Session) -> (PreviousDepGraph, WorkProductMap) {
match self {
LoadResult::Error { message } => {
sess.warn(&message);
PreviousDepGraph::new(SerializedDepGraph::new())
(PreviousDepGraph::new(SerializedDepGraph::new()), FxHashMap())
},
LoadResult::DataOutOfDate => {
if let Err(err) = delete_all_session_dir_contents(sess) {
sess.err(&format!("Failed to delete invalidated or incompatible \
incremental compilation session directory contents `{}`: {}.",
dep_graph_path(sess).display(), err));
}
PreviousDepGraph::new(SerializedDepGraph::new())
(PreviousDepGraph::new(SerializedDepGraph::new()), FxHashMap())
}
LoadResult::Ok { data } => data
}
Expand All @@ -125,10 +83,10 @@ fn load_data(report_incremental_info: bool, path: &Path) -> LoadResult<(Vec<u8>,
}
}

fn delete_dirty_work_product(tcx: TyCtxt,
fn delete_dirty_work_product(sess: &Session,
swp: SerializedWorkProduct) {
debug!("delete_dirty_work_product({:?})", swp);
work_product::delete_workproduct_files(tcx.sess, &swp.work_product);
work_product::delete_workproduct_files(sess, &swp.work_product);
}

/// Either a result that has already be computed or a
Expand All @@ -149,7 +107,7 @@ impl<T> MaybeAsync<T> {

/// Launch a thread and load the dependency graph in the background.
pub fn load_dep_graph(sess: &Session) ->
MaybeAsync<LoadResult<PreviousDepGraph>>
MaybeAsync<LoadResult<(PreviousDepGraph, WorkProductMap)>>
{
// Since `sess` isn't `Sync`, we perform all accesses to `sess`
// before we fire the background thread.
Expand All @@ -159,7 +117,7 @@ pub fn load_dep_graph(sess: &Session) ->
if sess.opts.incremental.is_none() {
// No incremental compilation.
return MaybeAsync::Sync(LoadResult::Ok {
data: PreviousDepGraph::new(SerializedDepGraph::new())
data: (PreviousDepGraph::new(SerializedDepGraph::new()), FxHashMap())
});
}

Expand All @@ -169,6 +127,50 @@ pub fn load_dep_graph(sess: &Session) ->
let report_incremental_info = sess.opts.debugging_opts.incremental_info;
let expected_hash = sess.opts.dep_tracking_hash();

let mut prev_work_products = FxHashMap();

// If we are only building with -Zquery-dep-graph but without an actual
// incr. comp. session directory, we exit here. Otherwise we'd fail
// when trying to load work products.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd reword this comment since we don't really exit the function if there is no cache directory. It could just say something like "If there is an incr. comp. cache directory, load the set of previous work products".

if sess.incr_comp_session_dir_opt().is_some() {
let work_products_path = work_products_path(sess);
let load_result = load_data(report_incremental_info, &work_products_path);

if let LoadResult::Ok { data: (work_products_data, start_pos) } = load_result {
// Decode the list of work_products
let mut work_product_decoder = Decoder::new(&work_products_data[..], start_pos);
let work_products: Vec<SerializedWorkProduct> =
RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| {
let msg = format!("Error decoding `work-products` from incremental \
compilation session directory: {}", e);
sess.fatal(&msg[..])
});

for swp in work_products {
let mut all_files_exist = true;
for &(_, ref file_name) in swp.work_product.saved_files.iter() {
let path = in_incr_comp_dir_sess(sess, file_name);
if !path.exists() {
all_files_exist = false;

if sess.opts.debugging_opts.incremental_info {
eprintln!("incremental: could not find file for work \
product: {}", path.display());
}
}
}

if all_files_exist {
debug!("reconcile_work_products: all files for {:?} exist", swp);
prev_work_products.insert(swp.id, swp.work_product);
} else {
debug!("reconcile_work_products: some file for {:?} does not exist", swp);
delete_dirty_work_product(sess, swp);
}
}
}
}

MaybeAsync::Async(std::thread::spawn(move || {
time_ext(time_passes, None, "background load prev dep-graph", move || {
match load_data(report_incremental_info, &path) {
Expand All @@ -195,7 +197,7 @@ pub fn load_dep_graph(sess: &Session) ->
let dep_graph = SerializedDepGraph::decode(&mut decoder)
.expect("Error reading cached dep-graph");

LoadResult::Ok { data: PreviousDepGraph::new(dep_graph) }
LoadResult::Ok { data: (PreviousDepGraph::new(dep_graph), prev_work_products) }
}
}
})
Expand Down