diff --git a/src/tools/miri/.gitignore b/src/tools/miri/.gitignore index 03c5591b78751..ed2d0ba7ba079 100644 --- a/src/tools/miri/.gitignore +++ b/src/tools/miri/.gitignore @@ -9,6 +9,9 @@ tex/*/out *.mm_profdata perf.data perf.data.old -flamegraph.svg +flamegraph*.svg +rustc-ice*.txt tests/native-lib/libtestlib.so .auto-* + +/genmc/ diff --git a/src/tools/miri/CONTRIBUTING.md b/src/tools/miri/CONTRIBUTING.md index 0d77ca06e1b48..739f0702252ab 100644 --- a/src/tools/miri/CONTRIBUTING.md +++ b/src/tools/miri/CONTRIBUTING.md @@ -19,12 +19,10 @@ When you get a review, please take care of the requested changes in new commits. existing commits. Generally avoid force-pushing. The only time you should force push is when there is a conflict with the master branch (in that case you should rebase across master, not merge), and all the way at the end of the review process when the reviewer tells you that the PR is done and you -should squash the commits. For the latter case, use `git rebase --keep-base ...` to squash without -changing the base commit your PR branches off of. Use your own judgment and the reviewer's guidance -to decide whether the PR should be squashed into a single commit or multiple logically separate -commits. (All this is to work around the fact that Github is quite bad at dealing with force pushes -and does not support `git range-diff`. Maybe one day Github will be good at git and then life can -become easier.) +should squash the commits. If you are unsure how to use `git rebase` to squash commits, use `./miri +squash` which automates the process but leaves little room for customization. (All this is to work +around the fact that Github is quite bad at dealing with force pushes and does not support `git +range-diff`. Maybe one day Github will be good at git and then life can become easier.) Most PRs bounce back and forth between the reviewer and the author several times, so it is good to keep track of who is expected to take the next step. We are using the `S-waiting-for-review` and diff --git a/src/tools/miri/Cargo.toml b/src/tools/miri/Cargo.toml index bb24e700e7388..7b7be97aa518f 100644 --- a/src/tools/miri/Cargo.toml +++ b/src/tools/miri/Cargo.toml @@ -65,6 +65,7 @@ harness = false [features] default = ["stack-cache"] +genmc = [] stack-cache = [] stack-cache-consistency-check = ["stack-cache"] diff --git a/src/tools/miri/README.md b/src/tools/miri/README.md index 95e1770aa7bad..a78cc9d931980 100644 --- a/src/tools/miri/README.md +++ b/src/tools/miri/README.md @@ -277,22 +277,15 @@ Try running `cargo miri clean`. Miri adds its own set of `-Z` flags, which are usually set via the `MIRIFLAGS` environment variable. We first document the most relevant and most commonly used flags: -* `-Zmiri-address-reuse-rate=` changes the probability that a freed *non-stack* allocation - will be added to the pool for address reuse, and the probability that a new *non-stack* allocation - will be taken from the pool. Stack allocations never get added to or taken from the pool. The - default is `0.5`. -* `-Zmiri-address-reuse-cross-thread-rate=` changes the probability that an allocation which - attempts to reuse a previously freed block of memory will also consider blocks freed by *other - threads*. The default is `0.1`, which means by default, in 90% of the cases where an address reuse - attempt is made, only addresses from the same thread will be considered. Reusing an address from - another thread induces synchronization between those threads, which can mask data races and weak - memory bugs. -* `-Zmiri-compare-exchange-weak-failure-rate=` changes the failure rate of - `compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail). - You can change it to any value between `0.0` and `1.0`, where `1.0` means it - will always fail and `0.0` means it will never fail. Note that setting it to - `1.0` will likely cause hangs, since it means programs using - `compare_exchange_weak` cannot make progress. +* `-Zmiri-deterministic-concurrency` makes Miri's concurrency-related behavior fully deterministic. + Strictly speaking, Miri is always fully deterministic when isolation is enabled (the default + mode), but this determinism is achieved by using an RNG with a fixed seed. Seemingly harmless + changes to the program, or just running it for a different target architecture, can thus lead to + completely different program behavior down the line. This flag disables the use of an RNG for + concurrency-related decisions. Therefore, Miri cannot find bugs that only occur under some + specific circumstances, but Miri's behavior will also be more stable across versions and targets. + This is equivalent to `-Zmiri-fixed-schedule -Zmiri-compare-exchange-weak-failure-rate=0.0 + -Zmiri-address-reuse-cross-thread-rate=0.0 -Zmiri-disable-weak-memory-emulation`. * `-Zmiri-disable-isolation` disables host isolation. As a consequence, the program has access to host resources such as environment variables, file systems, and randomness. @@ -334,9 +327,6 @@ environment variable. We first document the most relevant and most commonly used This will necessarily miss some bugs as those operations are not efficiently and accurately implementable in a sanitizer, but it will only miss bugs that concern memory/pointers which is subject to these operations. -* `-Zmiri-preemption-rate` configures the probability that at the end of a basic block, the active - thread will be preempted. The default is `0.01` (i.e., 1%). Setting this to `0` disables - preemption. * `-Zmiri-report-progress` makes Miri print the current stacktrace every now and then, so you can tell what it is doing when a program just keeps running. You can customize how frequently the report is printed via `-Zmiri-report-progress=`, which prints the report every N basic @@ -365,6 +355,22 @@ The remaining flags are for advanced use only, and more likely to change or be r Some of these are **unsound**, which means they can lead to Miri failing to detect cases of undefined behavior in a program. +* `-Zmiri-address-reuse-rate=` changes the probability that a freed *non-stack* allocation + will be added to the pool for address reuse, and the probability that a new *non-stack* allocation + will be taken from the pool. Stack allocations never get added to or taken from the pool. The + default is `0.5`. +* `-Zmiri-address-reuse-cross-thread-rate=` changes the probability that an allocation which + attempts to reuse a previously freed block of memory will also consider blocks freed by *other + threads*. The default is `0.1`, which means by default, in 90% of the cases where an address reuse + attempt is made, only addresses from the same thread will be considered. Reusing an address from + another thread induces synchronization between those threads, which can mask data races and weak + memory bugs. +* `-Zmiri-compare-exchange-weak-failure-rate=` changes the failure rate of + `compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail). + You can change it to any value between `0.0` and `1.0`, where `1.0` means it + will always fail and `0.0` means it will never fail. Note that setting it to + `1.0` will likely cause hangs, since it means programs using + `compare_exchange_weak` cannot make progress. * `-Zmiri-disable-alignment-check` disables checking pointer alignment, so you can focus on other failures, but it means Miri can miss bugs in your program. Using this flag is **unsound**. @@ -383,6 +389,10 @@ to Miri failing to detect cases of undefined behavior in a program. this flag is **unsound**. * `-Zmiri-disable-weak-memory-emulation` disables the emulation of some C++11 weak memory effects. +* `-Zmiri-fixed-schedule` disables preemption (like `-Zmiri-preemption-rate=0.0`) and furthermore + disables the randomization of the next thread to be picked, instead fixing a round-robin schedule. + Note however that other aspects of Miri's concurrency behavior are still randomize; use + `-Zmiri-deterministic-concurrency` to disable them all. * `-Zmiri-native-lib=` is an experimental flag for providing support for calling native functions from inside the interpreter via FFI. The flag is supported only on Unix systems. Functions not provided by that file are still executed via the usual Miri shims. @@ -412,6 +422,10 @@ to Miri failing to detect cases of undefined behavior in a program. without an explicit value), `none` means it never recurses, `scalar` means it only recurses for types where we would also emit `noalias` annotations in the generated LLVM IR (types passed as individual scalars or pairs of scalars). Setting this to `none` is **unsound**. +* `-Zmiri-preemption-rate` configures the probability that at the end of a basic block, the active + thread will be preempted. The default is `0.01` (i.e., 1%). Setting this to `0` disables + preemption. Note that even without preemption, the schedule is still non-deterministic: + if a thread blocks or yields, the next thread is chosen randomly. * `-Zmiri-provenance-gc=` configures how often the pointer provenance garbage collector runs. The default is to search for and remove unreachable provenance once every `10000` basic blocks. Setting this to `0` disables the garbage collector, which causes some programs to have explosive memory @@ -443,9 +457,6 @@ to Miri failing to detect cases of undefined behavior in a program. casts are not supported in this mode, but that may change in the future. * `-Zmiri-force-page-size=` overrides the default page size for an architecture, in multiples of 1k. `4` is default for most targets. This value should always be a power of 2 and nonzero. -* `-Zmiri-unique-is-unique` performs additional aliasing checks for `core::ptr::Unique` to ensure - that it could theoretically be considered `noalias`. This flag is experimental and has - an effect only when used with `-Zmiri-tree-borrows`. [function ABI]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier diff --git a/src/tools/miri/bench-cargo-miri/big-allocs/src/main.rs b/src/tools/miri/bench-cargo-miri/big-allocs/src/main.rs index 5b807ac3df1fb..89797c4982067 100644 --- a/src/tools/miri/bench-cargo-miri/big-allocs/src/main.rs +++ b/src/tools/miri/bench-cargo-miri/big-allocs/src/main.rs @@ -7,10 +7,7 @@ fn main() { // We can't use too big of an allocation or this code will encounter an allocation failure in // CI. Since the allocation can't be huge, we need to do a few iterations so that the effect // we're trying to measure is clearly visible above the interpreter's startup time. - // FIXME (https://github.com/rust-lang/miri/issues/4253): On 32bit targets, we can run out of - // usable addresses if we don't reuse, leading to random test failures. - let count = if cfg!(target_pointer_width = "32") { 8 } else { 12 }; - for _ in 0..count { + for _ in 0..20 { drop(Vec::::with_capacity(512 * 1024 * 1024)); } } diff --git a/src/tools/miri/miri-script/src/commands.rs b/src/tools/miri/miri-script/src/commands.rs index 17a7c06b52535..1c9750e2cbdc1 100644 --- a/src/tools/miri/miri-script/src/commands.rs +++ b/src/tools/miri/miri-script/src/commands.rs @@ -1,7 +1,8 @@ use std::collections::HashMap; use std::ffi::{OsStr, OsString}; -use std::fs::File; -use std::io::{BufReader, BufWriter, Write}; +use std::fmt::Write as _; +use std::fs::{self, File}; +use std::io::{self, BufRead, BufReader, BufWriter, Write as _}; use std::ops::Not; use std::path::PathBuf; use std::time::Duration; @@ -169,7 +170,8 @@ impl Command { | Command::Toolchain { .. } | Command::Bench { .. } | Command::RustcPull { .. } - | Command::RustcPush { .. } => {} + | Command::RustcPush { .. } + | Command::Squash => {} } // Then run the actual command. match self { @@ -188,6 +190,7 @@ impl Command { Command::Toolchain { flags } => Self::toolchain(flags), Command::RustcPull { commit } => Self::rustc_pull(commit.clone()), Command::RustcPush { github_user, branch } => Self::rustc_push(github_user, branch), + Command::Squash => Self::squash(), } } @@ -383,6 +386,72 @@ impl Command { Ok(()) } + fn squash() -> Result<()> { + let sh = Shell::new()?; + sh.change_dir(miri_dir()?); + // Figure out base wrt latest upstream master. + // (We can't trust any of the local ones, they can all be outdated.) + let origin_master = { + cmd!(sh, "git fetch https://github.com/rust-lang/miri/") + .quiet() + .ignore_stdout() + .ignore_stderr() + .run()?; + cmd!(sh, "git rev-parse FETCH_HEAD").read()? + }; + let base = cmd!(sh, "git merge-base HEAD {origin_master}").read()?; + // Rebase onto that, setting ourselves as the sequence editor so that we can edit the sequence programmatically. + // We want to forward the host stdin so apparently we cannot use `cmd!`. + let mut cmd = process::Command::new("git"); + cmd.arg("rebase").arg(&base).arg("--interactive"); + cmd.env("GIT_SEQUENCE_EDITOR", env::current_exe()?); + cmd.env("MIRI_SCRIPT_IS_GIT_SEQUENCE_EDITOR", "1"); + cmd.current_dir(sh.current_dir()); + let result = cmd.status()?; + if !result.success() { + bail!("`git rebase` failed"); + } + Ok(()) + } + + pub fn squash_sequence_editor() -> Result<()> { + let sequence_file = env::args().nth(1).expect("git should pass us a filename"); + if sequence_file == "fmt" { + // This is probably us being called as a git hook as part of the rebase. Let's just + // ignore this. Sadly `git rebase` does not have a flag to skip running hooks. + return Ok(()); + } + // Read the provided sequence and adjust it. + let rebase_sequence = { + let mut rebase_sequence = String::new(); + let file = fs::File::open(&sequence_file).with_context(|| { + format!("failed to read rebase sequence from {sequence_file:?}") + })?; + let file = io::BufReader::new(file); + for line in file.lines() { + let line = line?; + // The first line is left unchanged. + if rebase_sequence.is_empty() { + writeln!(rebase_sequence, "{line}").unwrap(); + continue; + } + // If this is a "pick" like, make it "squash". + if let Some(rest) = line.strip_prefix("pick ") { + writeln!(rebase_sequence, "squash {rest}").unwrap(); + continue; + } + // We've reached the end of the relevant part of the sequence, and we can stop. + break; + } + rebase_sequence + }; + // Write out the adjusted sequence. + fs::write(&sequence_file, rebase_sequence).with_context(|| { + format!("failed to write adjusted rebase sequence to {sequence_file:?}") + })?; + Ok(()) + } + fn bench( target: Option, no_install: bool, diff --git a/src/tools/miri/miri-script/src/main.rs b/src/tools/miri/miri-script/src/main.rs index 279bdf8cc3f48..6aab2f79bd78c 100644 --- a/src/tools/miri/miri-script/src/main.rs +++ b/src/tools/miri/miri-script/src/main.rs @@ -133,6 +133,8 @@ pub enum Command { #[arg(default_value = "miri-sync")] branch: String, }, + /// Squash the commits of the current feature branch into one. + Squash, } impl Command { @@ -154,7 +156,7 @@ impl Command { flags.extend(remainder); Ok(()) } - Self::Bench { .. } | Self::RustcPull { .. } | Self::RustcPush { .. } => + Self::Bench { .. } | Self::RustcPull { .. } | Self::RustcPush { .. } | Self::Squash => bail!("unexpected \"--\" found in arguments"), } } @@ -170,6 +172,11 @@ pub struct Cli { } fn main() -> Result<()> { + // If we are invoked as the git sequence editor, jump to that logic. + if !std::env::var_os("MIRI_SCRIPT_IS_GIT_SEQUENCE_EDITOR").unwrap_or_default().is_empty() { + return Command::squash_sequence_editor(); + } + // Split the arguments into the part before the `--` and the part after. // The `--` itself ends up in the second part. let miri_args: Vec<_> = std::env::args().take_while(|x| *x != "--").collect(); diff --git a/src/tools/miri/rust-version b/src/tools/miri/rust-version index 59d53891217d0..97bc826b57a40 100644 --- a/src/tools/miri/rust-version +++ b/src/tools/miri/rust-version @@ -1 +1 @@ -1b8ab72680f36e783af84c1a3c4f8508572bd9f9 +2ad5f8607d0e192b60b130e5cc416b477b351c18 diff --git a/src/tools/miri/src/alloc_addresses/mod.rs b/src/tools/miri/src/alloc_addresses/mod.rs index 335e8d76999cf..dd389d97cdce0 100644 --- a/src/tools/miri/src/alloc_addresses/mod.rs +++ b/src/tools/miri/src/alloc_addresses/mod.rs @@ -114,8 +114,16 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { memory_kind: MemoryKind, ) -> InterpResult<'tcx, u64> { let this = self.eval_context_ref(); - let mut rng = this.machine.rng.borrow_mut(); let info = this.get_alloc_info(alloc_id); + + // Miri's address assignment leaks state across thread boundaries, which is incompatible + // with GenMC execution. So we instead let GenMC assign addresses to allocations. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + let addr = genmc_ctx.handle_alloc(&this.machine, info.size, info.align, memory_kind)?; + return interp_ok(addr); + } + + let mut rng = this.machine.rng.borrow_mut(); // This is either called immediately after allocation (and then cached), or when // adjusting `tcx` pointers (which never get freed). So assert that we are looking // at a live allocation. This also ensures that we never re-assign an address to an @@ -197,6 +205,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { if global_state.next_base_addr > this.target_usize_max() { throw_exhaust!(AddressSpaceFull); } + // If we filled up more than half the address space, start aggressively reusing + // addresses to avoid running out. + if global_state.next_base_addr > u64::try_from(this.target_isize_max()).unwrap() { + global_state.reuse.address_space_shortage(); + } interp_ok(base_addr) } @@ -490,7 +503,7 @@ impl<'tcx> MiriMachine<'tcx> { // Also remember this address for future reuse. let thread = self.threads.active_thread(); global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || { - if let Some(data_race) = &self.data_race { + if let Some(data_race) = self.data_race.as_vclocks_ref() { data_race.release_clock(&self.threads, |clock| clock.clone()) } else { VClock::default() diff --git a/src/tools/miri/src/alloc_addresses/reuse_pool.rs b/src/tools/miri/src/alloc_addresses/reuse_pool.rs index c0d24a9fbbcf9..29d4f2bb7b0f3 100644 --- a/src/tools/miri/src/alloc_addresses/reuse_pool.rs +++ b/src/tools/miri/src/alloc_addresses/reuse_pool.rs @@ -20,7 +20,7 @@ pub struct ReusePool { /// allocations as address-size pairs, the list must be sorted by the size and then the thread ID. /// /// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to - /// less than 64 different possible value, that bounds the overall size of the pool. + /// less than 64 different possible values, that bounds the overall size of the pool. /// /// We also store the ID and the data-race clock of the thread that donated this pool element, /// to ensure synchronization with the thread that picks up this address. @@ -36,6 +36,15 @@ impl ReusePool { } } + /// Call this when we are using up a lot of the address space: if memory reuse is enabled at all, + /// this will bump the intra-thread reuse rate to 100% so that we can keep running this program as + /// long as possible. + pub fn address_space_shortage(&mut self) { + if self.address_reuse_rate > 0.0 { + self.address_reuse_rate = 1.0; + } + } + fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> { let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap(); if self.pool.len() <= pool_idx { @@ -55,9 +64,7 @@ impl ReusePool { clock: impl FnOnce() -> VClock, ) { // Let's see if we even want to remember this address. - // We don't remember stack addresses: there's a lot of them (so the perf impact is big), - // and we only want to reuse stack slots within the same thread or else we'll add a lot of - // undesired synchronization. + // We don't remember stack addresses since there's so many of them (so the perf impact is big). if kind == MemoryKind::Stack || !rng.random_bool(self.address_reuse_rate) { return; } diff --git a/src/tools/miri/src/bin/miri.rs b/src/tools/miri/src/bin/miri.rs index 8ff1c9d6ff022..69aa035fdc3df 100644 --- a/src/tools/miri/src/bin/miri.rs +++ b/src/tools/miri/src/bin/miri.rs @@ -28,13 +28,14 @@ use std::env::{self, VarError}; use std::num::NonZero; use std::ops::Range; use std::path::PathBuf; +use std::rc::Rc; use std::str::FromStr; use std::sync::atomic::{AtomicI32, AtomicU32, Ordering}; use std::sync::{Arc, Once}; use miri::{ - BacktraceStyle, BorrowTrackerMethod, MiriConfig, MiriEntryFnType, ProvenanceMode, RetagFields, - ValidationMode, + BacktraceStyle, BorrowTrackerMethod, GenmcConfig, GenmcCtx, MiriConfig, MiriEntryFnType, + ProvenanceMode, RetagFields, ValidationMode, }; use rustc_abi::ExternAbi; use rustc_data_structures::sync; @@ -60,6 +61,8 @@ use tracing::debug; struct MiriCompilerCalls { miri_config: Option, many_seeds: Option, + /// Settings for using GenMC with Miri. + genmc_config: Option, } struct ManySeedsConfig { @@ -68,8 +71,12 @@ struct ManySeedsConfig { } impl MiriCompilerCalls { - fn new(miri_config: MiriConfig, many_seeds: Option) -> Self { - Self { miri_config: Some(miri_config), many_seeds } + fn new( + miri_config: MiriConfig, + many_seeds: Option, + genmc_config: Option, + ) -> Self { + Self { miri_config: Some(miri_config), many_seeds, genmc_config } } } @@ -179,6 +186,12 @@ impl rustc_driver::Callbacks for MiriCompilerCalls { optimizations is usually marginal at best."); } + if let Some(genmc_config) = &self.genmc_config { + let _genmc_ctx = Rc::new(GenmcCtx::new(&config, genmc_config)); + + todo!("GenMC mode not yet implemented"); + }; + if let Some(many_seeds) = self.many_seeds.take() { assert!(config.seed.is_none()); let exit_code = sync::IntoDynSyncSend(AtomicI32::new(rustc_driver::EXIT_SUCCESS)); @@ -187,8 +200,14 @@ impl rustc_driver::Callbacks for MiriCompilerCalls { let mut config = config.clone(); config.seed = Some((*seed).into()); eprintln!("Trying seed: {seed}"); - let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, config) - .unwrap_or(rustc_driver::EXIT_FAILURE); + let return_code = miri::eval_entry( + tcx, + entry_def_id, + entry_type, + &config, + /* genmc_ctx */ None, + ) + .unwrap_or(rustc_driver::EXIT_FAILURE); if return_code != rustc_driver::EXIT_SUCCESS { eprintln!("FAILING SEED: {seed}"); if !many_seeds.keep_going { @@ -206,11 +225,12 @@ impl rustc_driver::Callbacks for MiriCompilerCalls { } std::process::exit(exit_code.0.into_inner()); } else { - let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, config) + let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, &config, None) .unwrap_or_else(|| { tcx.dcx().abort_if_errors(); rustc_driver::EXIT_FAILURE }); + std::process::exit(return_code); } @@ -506,6 +526,7 @@ fn main() { let mut many_seeds_keep_going = false; let mut miri_config = MiriConfig::default(); miri_config.env = env_snapshot; + let mut genmc_config = None; let mut rustc_args = vec![]; let mut after_dashdash = false; @@ -533,8 +554,6 @@ fn main() { } else if arg == "-Zmiri-tree-borrows" { miri_config.borrow_tracker = Some(BorrowTrackerMethod::TreeBorrows); miri_config.provenance_mode = ProvenanceMode::Strict; - } else if arg == "-Zmiri-unique-is-unique" { - miri_config.unique_is_unique = true; } else if arg == "-Zmiri-disable-data-race-detector" { miri_config.data_race_detector = false; miri_config.weak_memory_emulation = false; @@ -573,6 +592,13 @@ fn main() { miri_config.mute_stdout_stderr = true; } else if arg == "-Zmiri-retag-fields" { miri_config.retag_fields = RetagFields::Yes; + } else if arg == "-Zmiri-fixed-schedule" { + miri_config.fixed_scheduling = true; + } else if arg == "-Zmiri-deterministic-concurrency" { + miri_config.fixed_scheduling = true; + miri_config.address_reuse_cross_thread_rate = 0.0; + miri_config.cmpxchg_weak_failure_rate = 0.0; + miri_config.weak_memory_emulation = false; } else if let Some(retag_fields) = arg.strip_prefix("-Zmiri-retag-fields=") { miri_config.retag_fields = match retag_fields { "all" => RetagFields::Yes, @@ -596,6 +622,10 @@ fn main() { many_seeds = Some(0..64); } else if arg == "-Zmiri-many-seeds-keep-going" { many_seeds_keep_going = true; + } else if let Some(trimmed_arg) = arg.strip_prefix("-Zmiri-genmc") { + // FIXME(GenMC): Currently, GenMC mode is incompatible with aliasing model checking. + miri_config.borrow_tracker = None; + GenmcConfig::parse_arg(&mut genmc_config, trimmed_arg); } else if let Some(param) = arg.strip_prefix("-Zmiri-env-forward=") { miri_config.forwarded_env_vars.push(param.to_owned()); } else if let Some(param) = arg.strip_prefix("-Zmiri-env-set=") { @@ -690,14 +720,6 @@ fn main() { rustc_args.push(arg); } } - // `-Zmiri-unique-is-unique` should only be used with `-Zmiri-tree-borrows` - if miri_config.unique_is_unique - && !matches!(miri_config.borrow_tracker, Some(BorrowTrackerMethod::TreeBorrows)) - { - show_error!( - "-Zmiri-unique-is-unique only has an effect when -Zmiri-tree-borrows is also used" - ); - } // Tree Borrows implies strict provenance, and is not compatible with native calls. if matches!(miri_config.borrow_tracker, Some(BorrowTrackerMethod::TreeBorrows)) { if miri_config.provenance_mode != ProvenanceMode::Strict { @@ -727,7 +749,24 @@ fn main() { let many_seeds = many_seeds.map(|seeds| ManySeedsConfig { seeds, keep_going: many_seeds_keep_going }); + // Validate settings for data race detection and GenMC mode. + assert_eq!(genmc_config.is_some(), miri_config.genmc_mode); + if genmc_config.is_some() { + if !miri_config.data_race_detector { + show_error!("Cannot disable data race detection in GenMC mode (currently)"); + } else if !miri_config.weak_memory_emulation { + show_error!("Cannot disable weak memory emulation in GenMC mode"); + } + } else if miri_config.weak_memory_emulation && !miri_config.data_race_detector { + show_error!( + "Weak memory emulation cannot be enabled when the data race detector is disabled" + ); + }; + debug!("rustc arguments: {:?}", rustc_args); debug!("crate arguments: {:?}", miri_config.args); - run_compiler_and_exit(&rustc_args, &mut MiriCompilerCalls::new(miri_config, many_seeds)) + run_compiler_and_exit( + &rustc_args, + &mut MiriCompilerCalls::new(miri_config, many_seeds, genmc_config), + ) } diff --git a/src/tools/miri/src/borrow_tracker/mod.rs b/src/tools/miri/src/borrow_tracker/mod.rs index 9808102f4ba66..b66c561d2b8ad 100644 --- a/src/tools/miri/src/borrow_tracker/mod.rs +++ b/src/tools/miri/src/borrow_tracker/mod.rs @@ -102,8 +102,6 @@ pub struct GlobalStateInner { tracked_pointer_tags: FxHashSet, /// Whether to recurse into datatypes when searching for pointers to retag. retag_fields: RetagFields, - /// Whether `core::ptr::Unique` gets special (`Box`-like) handling. - unique_is_unique: bool, } impl VisitProvenance for GlobalStateInner { @@ -164,7 +162,6 @@ impl GlobalStateInner { borrow_tracker_method: BorrowTrackerMethod, tracked_pointer_tags: FxHashSet, retag_fields: RetagFields, - unique_is_unique: bool, ) -> Self { GlobalStateInner { borrow_tracker_method, @@ -173,7 +170,6 @@ impl GlobalStateInner { protected_tags: FxHashMap::default(), tracked_pointer_tags, retag_fields, - unique_is_unique, } } @@ -239,7 +235,6 @@ impl BorrowTrackerMethod { self, config.tracked_pointer_tags.clone(), config.retag_fields, - config.unique_is_unique, )) } } diff --git a/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs index 0704ed199dbb3..bc57ba697b382 100644 --- a/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs +++ b/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs @@ -740,7 +740,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> { if let Some(access) = access { assert_eq!(access, AccessKind::Write); // Make sure the data race model also knows about this. - if let Some(data_race) = alloc_extra.data_race.as_mut() { + if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() { data_race.write( alloc_id, range, @@ -789,7 +789,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> { if let Some(access) = access { assert_eq!(access, AccessKind::Read); // Make sure the data race model also knows about this. - if let Some(data_race) = alloc_extra.data_race.as_ref() { + if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() { data_race.read( alloc_id, range, diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs index 65042f2668081..f3e32e75f2f2c 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs @@ -2,7 +2,6 @@ use rustc_abi::{BackendRepr, Size}; use rustc_middle::mir::{Mutability, RetagKind}; use rustc_middle::ty::layout::HasTypingEnv; use rustc_middle::ty::{self, Ty}; -use rustc_span::def_id::DefId; use crate::borrow_tracker::{GlobalState, GlobalStateInner, ProtectorKind}; use crate::concurrency::data_race::NaReadType; @@ -115,14 +114,15 @@ impl<'tcx> Tree { /// Policy for a new borrow. #[derive(Debug, Clone, Copy)] struct NewPermission { - /// Optionally ignore the actual size to do a zero-size reborrow. - /// If this is set then `dereferenceable` is not enforced. - zero_size: bool, /// Which permission should the pointer start with. initial_state: Permission, /// Whether this pointer is part of the arguments of a function call. /// `protector` is `Some(_)` for all pointers marked `noalias`. protector: Option, + /// Whether a read should be performed on a retag. This should be `false` + /// for `Cell` because this could cause data races when using thread-safe + /// data types like `Mutex`. + initial_read: bool, } impl<'tcx> NewPermission { @@ -141,18 +141,19 @@ impl<'tcx> NewPermission { // To eliminate the case of Protected Reserved IM we override interior mutability // in the case of a protected reference: protected references are always considered // "freeze" in their reservation phase. - let initial_state = match mutability { - Mutability::Mut if ty_is_unpin => Permission::new_reserved(ty_is_freeze, is_protected), - Mutability::Not if ty_is_freeze => Permission::new_frozen(), + let (initial_state, initial_read) = match mutability { + Mutability::Mut if ty_is_unpin => + (Permission::new_reserved(ty_is_freeze, is_protected), true), + Mutability::Not if ty_is_freeze => (Permission::new_frozen(), true), + Mutability::Not if !ty_is_freeze => (Permission::new_cell(), false), // Raw pointers never enter this function so they are not handled. // However raw pointers are not the only pointers that take the parent - // tag, this also happens for `!Unpin` `&mut`s and interior mutable - // `&`s, which are excluded above. + // tag, this also happens for `!Unpin` `&mut`s, which are excluded above. _ => return None, }; let protector = is_protected.then_some(ProtectorKind::StrongProtector); - Some(Self { zero_size: false, initial_state, protector }) + Some(Self { initial_state, protector, initial_read }) } /// Compute permission for `Box`-like type (`Box` always, and also `Unique` if enabled). @@ -162,19 +163,18 @@ impl<'tcx> NewPermission { ty: Ty<'tcx>, kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>, - zero_size: bool, ) -> Option { let pointee = ty.builtin_deref(true).unwrap(); pointee.is_unpin(*cx.tcx, cx.typing_env()).then_some(()).map(|()| { // Regular `Unpin` box, give it `noalias` but only a weak protector // because it is valid to deallocate it within the function. - let ty_is_freeze = ty.is_freeze(*cx.tcx, cx.typing_env()); + let ty_is_freeze = pointee.is_freeze(*cx.tcx, cx.typing_env()); let protected = kind == RetagKind::FnEntry; let initial_state = Permission::new_reserved(ty_is_freeze, protected); Self { - zero_size, initial_state, protector: protected.then_some(ProtectorKind::WeakProtector), + initial_read: true, } }) } @@ -289,13 +289,15 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let mut tree_borrows = alloc_extra.borrow_tracker_tb().borrow_mut(); // All reborrows incur a (possibly zero-sized) read access to the parent - tree_borrows.perform_access( - orig_tag, - Some((range, AccessKind::Read, diagnostics::AccessCause::Reborrow)), - this.machine.borrow_tracker.as_ref().unwrap(), - alloc_id, - this.machine.current_span(), - )?; + if new_perm.initial_read { + tree_borrows.perform_access( + orig_tag, + Some((range, AccessKind::Read, diagnostics::AccessCause::Reborrow)), + this.machine.borrow_tracker.as_ref().unwrap(), + alloc_id, + this.machine.current_span(), + )?; + } // Record the parent-child pair in the tree. tree_borrows.new_child( orig_tag, @@ -308,8 +310,8 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { drop(tree_borrows); // Also inform the data race model (but only if any bytes are actually affected). - if range.size.bytes() > 0 { - if let Some(data_race) = alloc_extra.data_race.as_ref() { + if range.size.bytes() > 0 && new_perm.initial_read { + if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() { data_race.read( alloc_id, range, @@ -333,15 +335,12 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Determine the size of the reborrow. // For most types this is the entire size of the place, however // - when `extern type` is involved we use the size of the known prefix, - // - if the pointer is not reborrowed (raw pointer) or if `zero_size` is set - // then we override the size to do a zero-length reborrow. - let reborrow_size = match new_perm { - NewPermission { zero_size: false, .. } => - this.size_and_align_of_mplace(place)? - .map(|(size, _)| size) - .unwrap_or(place.layout.size), - _ => Size::from_bytes(0), - }; + // - if the pointer is not reborrowed (raw pointer) then we override the size + // to do a zero-length reborrow. + let reborrow_size = this + .size_and_align_of_mplace(place)? + .map(|(size, _)| size) + .unwrap_or(place.layout.size); trace!("Creating new permission: {:?} with size {:?}", new_perm, reborrow_size); // This new tag is not guaranteed to actually be used. @@ -405,9 +404,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let this = self.eval_context_mut(); let options = this.machine.borrow_tracker.as_mut().unwrap().get_mut(); let retag_fields = options.retag_fields; - let unique_did = - options.unique_is_unique.then(|| this.tcx.lang_items().ptr_unique()).flatten(); - let mut visitor = RetagVisitor { ecx: this, kind, retag_fields, unique_did }; + let mut visitor = RetagVisitor { ecx: this, kind, retag_fields }; return visitor.visit_value(place); // The actual visitor. @@ -415,7 +412,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { ecx: &'ecx mut MiriInterpCx<'tcx>, kind: RetagKind, retag_fields: RetagFields, - unique_did: Option, } impl<'ecx, 'tcx> RetagVisitor<'ecx, 'tcx> { #[inline(always)] // yes this helps in our benchmarks @@ -446,12 +442,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn visit_box(&mut self, box_ty: Ty<'tcx>, place: &PlaceTy<'tcx>) -> InterpResult<'tcx> { // Only boxes for the global allocator get any special treatment. if box_ty.is_box_global(*self.ecx.tcx) { - let new_perm = NewPermission::from_unique_ty( - place.layout.ty, - self.kind, - self.ecx, - /* zero_size */ false, - ); + let new_perm = + NewPermission::from_unique_ty(place.layout.ty, self.kind, self.ecx); self.retag_ptr_inplace(place, new_perm)?; } interp_ok(()) @@ -485,16 +477,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // even if field retagging is not enabled. *shrug*) self.walk_value(place)?; } - ty::Adt(adt, _) if self.unique_did == Some(adt.did()) => { - let place = inner_ptr_of_unique(self.ecx, place)?; - let new_perm = NewPermission::from_unique_ty( - place.layout.ty, - self.kind, - self.ecx, - /* zero_size */ true, - ); - self.retag_ptr_inplace(&place, new_perm)?; - } _ => { // Not a reference/pointer/box. Only recurse if configured appropriately. let recurse = match self.retag_fields { @@ -533,8 +515,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Retag it. With protection! That is the entire point. let new_perm = NewPermission { initial_state: Permission::new_reserved(ty_is_freeze, /* protected */ true), - zero_size: false, protector: Some(ProtectorKind::StrongProtector), + initial_read: true, }; this.tb_retag_place(place, new_perm) } @@ -594,27 +576,3 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { tree_borrows.give_pointer_debug_name(tag, nth_parent, name) } } - -/// Takes a place for a `Unique` and turns it into a place with the inner raw pointer. -/// I.e. input is what you get from the visitor upon encountering an `adt` that is `Unique`, -/// and output can be used by `retag_ptr_inplace`. -fn inner_ptr_of_unique<'tcx>( - ecx: &MiriInterpCx<'tcx>, - place: &PlaceTy<'tcx>, -) -> InterpResult<'tcx, PlaceTy<'tcx>> { - // Follows the same layout as `interpret/visitor.rs:walk_value` for `Box` in - // `rustc_const_eval`, just with one fewer layer. - // Here we have a `Unique(NonNull(*mut), PhantomData)` - assert_eq!(place.layout.fields.count(), 2, "Unique must have exactly 2 fields"); - let (nonnull, phantom) = (ecx.project_field(place, 0)?, ecx.project_field(place, 1)?); - assert!( - phantom.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()), - "2nd field of `Unique` should be `PhantomData` but is `{:?}`", - phantom.layout.ty, - ); - // Now down to `NonNull(*mut)` - assert_eq!(nonnull.layout.fields.count(), 1, "NonNull must have exactly 1 field"); - let ptr = ecx.project_field(&nonnull, 0)?; - // Finally a plain `*mut` - interp_ok(ptr) -} diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs index 5c12ce39d10da..087f6fc3f24b0 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs @@ -8,6 +8,10 @@ use crate::borrow_tracker::tree_borrows::tree::AccessRelatedness; /// The activation states of a pointer. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] enum PermissionPriv { + /// represents: a shared reference to interior mutable data. + /// allows: all foreign and child accesses; + /// rejects: nothing + Cell, /// represents: a local mutable reference that has not yet been written to; /// allows: child reads, foreign reads; /// affected by: child writes (becomes Active), @@ -60,6 +64,14 @@ impl PartialOrd for PermissionPriv { use Ordering::*; Some(match (self, other) { (a, b) if a == b => Equal, + // Versions of `Reserved` with different interior mutability are incomparable with each + // other. + (ReservedIM, ReservedFrz { .. }) + | (ReservedFrz { .. }, ReservedIM) + // `Cell` is not comparable with any other permission + // since it never transitions to any other state and we + // can never get to `Cell` from another state. + | (Cell, _) | (_, Cell) => return None, (Disabled, _) => Greater, (_, Disabled) => Less, (Frozen, _) => Greater, @@ -71,9 +83,6 @@ impl PartialOrd for PermissionPriv { // `bool` is ordered such that `false <= true`, so this works as intended. c1.cmp(c2) } - // Versions of `Reserved` with different interior mutability are incomparable with each - // other. - (ReservedIM, ReservedFrz { .. }) | (ReservedFrz { .. }, ReservedIM) => return None, }) } } @@ -81,17 +90,22 @@ impl PartialOrd for PermissionPriv { impl PermissionPriv { /// Check if `self` can be the initial state of a pointer. fn is_initial(&self) -> bool { - matches!(self, ReservedFrz { conflicted: false } | Frozen | ReservedIM) + matches!(self, ReservedFrz { conflicted: false } | Frozen | ReservedIM | Cell) } /// Reject `ReservedIM` that cannot exist in the presence of a protector. fn compatible_with_protector(&self) -> bool { - !matches!(self, ReservedIM) + // FIXME(TB-Cell): It is unclear what to do here. + // `Cell` will occur with a protector but won't provide the guarantees + // of noalias (it will fail the `protected_enforces_noalias` test). + !matches!(self, ReservedIM | Cell) } /// See `foreign_access_skipping.rs`. Computes the SIFA of a permission. fn strongest_idempotent_foreign_access(&self, prot: bool) -> IdempotentForeignAccess { match self { + // Cell survives any foreign access + Cell => IdempotentForeignAccess::Write, // A protected non-conflicted Reserved will become conflicted under a foreign read, // and is hence not idempotent under it. ReservedFrz { conflicted } if prot && !conflicted => IdempotentForeignAccess::None, @@ -124,7 +138,7 @@ mod transition { Disabled => return None, // The inner data `ty_is_freeze` of `Reserved` is always irrelevant for Read // accesses, since the data is not being mutated. Hence the `{ .. }`. - readable @ (ReservedFrz { .. } | ReservedIM | Active | Frozen) => readable, + readable @ (Cell | ReservedFrz { .. } | ReservedIM | Active | Frozen) => readable, }) } @@ -132,6 +146,8 @@ mod transition { /// is protected; invalidate `Active`. fn foreign_read(state: PermissionPriv, protected: bool) -> Option { Some(match state { + // Cell ignores foreign reads. + Cell => Cell, // Non-writeable states just ignore foreign reads. non_writeable @ (Frozen | Disabled) => non_writeable, // Writeable states are more tricky, and depend on whether things are protected. @@ -167,6 +183,8 @@ mod transition { /// write permissions, `Frozen` and `Disabled` cannot obtain such permissions and produce UB. fn child_write(state: PermissionPriv, protected: bool) -> Option { Some(match state { + // Cell ignores child writes. + Cell => Cell, // If the `conflicted` flag is set, then there was a foreign read during // the function call that is still ongoing (still `protected`), // this is UB (`noalias` violation). @@ -185,6 +203,8 @@ mod transition { // types receive a `ReservedFrz` instead of `ReservedIM` when retagged under a protector, // so the result of this function does indirectly depend on (past) protector status. Some(match state { + // Cell ignores foreign writes. + Cell => Cell, res @ ReservedIM => { // We can never create a `ReservedIM` under a protector, only `ReservedFrz`. assert!(!protected); @@ -242,6 +262,11 @@ impl Permission { self.inner == Frozen } + /// Check if `self` is the shared-reference-to-interior-mutable-data state of a pointer. + pub fn is_cell(&self) -> bool { + self.inner == Cell + } + /// Default initial permission of the root of a new tree at inbounds positions. /// Must *only* be used for the root, this is not in general an "initial" permission! pub fn new_active() -> Self { @@ -278,11 +303,27 @@ impl Permission { Self { inner: Disabled } } + /// Default initial permission of a shared reference to interior mutable data. + pub fn new_cell() -> Self { + Self { inner: Cell } + } + /// Reject `ReservedIM` that cannot exist in the presence of a protector. pub fn compatible_with_protector(&self) -> bool { self.inner.compatible_with_protector() } + /// What kind of access to perform before releasing the protector. + pub fn protector_end_access(&self) -> Option { + match self.inner { + // Do not do perform access if it is a `Cell`, as this + // can cause data races when using thread-safe data types. + Cell => None, + Active => Some(AccessKind::Write), + _ => Some(AccessKind::Read), + } + } + /// Apply the transition to the inner PermissionPriv. pub fn perform_access( kind: AccessKind, @@ -306,30 +347,32 @@ impl Permission { /// remove protected parents. pub fn can_be_replaced_by_child(self, child: Self) -> bool { match (self.inner, child.inner) { - // ReservedIM can be replaced by anything, as it allows all - // transitions. + // Cell allows all transitions. + (Cell, _) => true, + // Cell is the most permissive, nothing can be replaced by Cell. + // (ReservedIM, Cell) => true, + (_, Cell) => false, + // ReservedIM can be replaced by anything besides Cell. + // ReservedIM allows all transitions, but unlike Cell, a local write + // to ReservedIM transitions to Active, while it is a no-op for Cell. (ReservedIM, _) => true, + (_, ReservedIM) => false, // Reserved (as parent, where conflictedness does not matter) - // can be replaced by all but ReservedIM, - // since ReservedIM alone would survive foreign writes - (ReservedFrz { .. }, ReservedIM) => false, + // can be replaced by all but ReservedIM and Cell, + // since ReservedIM and Cell alone would survive foreign writes (ReservedFrz { .. }, _) => true, + (_, ReservedFrz { .. }) => false, // Active can not be replaced by something surviving - // foreign reads and then remaining writable. - (Active, ReservedIM) => false, - (Active, ReservedFrz { .. }) => false, + // foreign reads and then remaining writable (i.e., Reserved*). // Replacing a state by itself is always okay, even if the child state is protected. - (Active, Active) => true, // Active can be replaced by Frozen, since it is not protected. - (Active, Frozen) => true, - (Active, Disabled) => true, + (Active, Active | Frozen | Disabled) => true, + (_, Active) => false, // Frozen can only be replaced by Disabled (and itself). - (Frozen, Frozen) => true, - (Frozen, Disabled) => true, - (Frozen, _) => false, + (Frozen, Frozen | Disabled) => true, + (_, Frozen) => false, // Disabled can not be replaced by anything else. (Disabled, Disabled) => true, - (Disabled, _) => false, } } @@ -383,6 +426,7 @@ pub mod diagnostics { f, "{}", match self { + Cell => "Cell", ReservedFrz { conflicted: false } => "Reserved", ReservedFrz { conflicted: true } => "Reserved (conflicted)", ReservedIM => "Reserved (interior mutable)", @@ -413,6 +457,7 @@ pub mod diagnostics { // and also as `diagnostics::DisplayFmtPermission.uninit` otherwise // alignment will be incorrect. match self.inner { + Cell => "Cel ", ReservedFrz { conflicted: false } => "Res ", ReservedFrz { conflicted: true } => "ResC", ReservedIM => "ReIM", @@ -459,7 +504,7 @@ pub mod diagnostics { /// (Reserved < Active < Frozen < Disabled); /// - between `self` and `err` the permission should also be increasing, /// so all permissions inside `err` should be greater than `self.1`; - /// - `Active` and `Reserved(conflicted=false)` cannot cause an error + /// - `Active`, `Reserved(conflicted=false)`, and `Cell` cannot cause an error /// due to insufficient permissions, so `err` cannot be a `ChildAccessForbidden(_)` /// of either of them; /// - `err` should not be `ProtectedDisabled(Disabled)`, because the protected @@ -492,13 +537,14 @@ pub mod diagnostics { (ReservedFrz { conflicted: true } | Active | Frozen, Disabled) => false, (ReservedFrz { conflicted: true }, Frozen) => false, - // `Active` and `Reserved` have all permissions, so a + // `Active`, `Reserved`, and `Cell` have all permissions, so a // `ChildAccessForbidden(Reserved | Active)` can never exist. - (_, Active) | (_, ReservedFrz { conflicted: false }) => + (_, Active) | (_, ReservedFrz { conflicted: false }) | (_, Cell) => unreachable!("this permission cannot cause an error"), // No transition has `Reserved { conflicted: false }` or `ReservedIM` - // as its `.to` unless it's a noop. - (ReservedFrz { conflicted: false } | ReservedIM, _) => + // as its `.to` unless it's a noop. `Cell` cannot be in its `.to` + // because all child accesses are a noop. + (ReservedFrz { conflicted: false } | ReservedIM | Cell, _) => unreachable!("self is a noop transition"), // All transitions produced in normal executions (using `apply_access`) // change permissions in the order `Reserved -> Active -> Frozen -> Disabled`. @@ -544,16 +590,17 @@ pub mod diagnostics { "permission that results in Disabled should not itself be Disabled in the first place" ), // No transition has `Reserved { conflicted: false }` or `ReservedIM` as its `.to` - // unless it's a noop. - (ReservedFrz { conflicted: false } | ReservedIM, _) => + // unless it's a noop. `Cell` cannot be in its `.to` because all child + // accesses are a noop. + (ReservedFrz { conflicted: false } | ReservedIM | Cell, _) => unreachable!("self is a noop transition"), // Permissions only evolve in the order `Reserved -> Active -> Frozen -> Disabled`, // so permissions found must be increasing in the order // `self.from < self.to <= forbidden.from < forbidden.to`. - (Disabled, ReservedFrz { .. } | ReservedIM | Active | Frozen) - | (Frozen, ReservedFrz { .. } | ReservedIM | Active) - | (Active, ReservedFrz { .. } | ReservedIM) => + (Disabled, Cell | ReservedFrz { .. } | ReservedIM | Active | Frozen) + | (Frozen, Cell | ReservedFrz { .. } | ReservedIM | Active) + | (Active, Cell | ReservedFrz { .. } | ReservedIM) => unreachable!("permissions between self and err must be increasing"), } } @@ -590,7 +637,7 @@ mod propagation_optimization_checks { impl Exhaustive for PermissionPriv { fn exhaustive() -> Box> { Box::new( - vec![Active, Frozen, Disabled, ReservedIM] + vec![Active, Frozen, Disabled, ReservedIM, Cell] .into_iter() .chain(::exhaustive().map(|conflicted| ReservedFrz { conflicted })), ) diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs index 3389b1c602c33..47ccaadbb9e36 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs @@ -721,9 +721,14 @@ impl<'tcx> Tree { // visit all children, skipping none |_| ContinueTraversal::Recurse, |args: NodeAppArgs<'_>| -> Result<(), TransitionError> { - let NodeAppArgs { node, .. } = args; + let NodeAppArgs { node, perm, .. } = args; + let perm = + perm.get().copied().unwrap_or_else(|| node.default_location_state()); if global.borrow().protected_tags.get(&node.tag) == Some(&ProtectorKind::StrongProtector) + // Don't check for protector if it is a Cell (see `unsafe_cell_deallocate` in `interior_mutability.rs`). + // Related to https://github.com/rust-lang/rust/issues/55005. + && !perm.permission().is_cell() { Err(TransitionError::ProtectedDealloc) } else { @@ -865,10 +870,9 @@ impl<'tcx> Tree { let idx = self.tag_mapping.get(&tag).unwrap(); // Only visit initialized permissions if let Some(p) = perms.get(idx) + && let Some(access_kind) = p.permission.protector_end_access() && p.initialized { - let access_kind = - if p.permission.is_active() { AccessKind::Write } else { AccessKind::Read }; let access_cause = diagnostics::AccessCause::FnExit(access_kind); TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms } .traverse_nonchildren( diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs index 847c682347592..714eb1fba91c5 100644 --- a/src/tools/miri/src/concurrency/data_race.rs +++ b/src/tools/miri/src/concurrency/data_race.rs @@ -1,4 +1,4 @@ -//! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks +//! Implementation of a data-race detector using Lamport Timestamps / Vector clocks //! based on the Dynamic Race Detection for C++: //! //! which does not report false-positives when fences are used, and gives better @@ -54,6 +54,7 @@ use rustc_span::Span; use super::vector_clock::{VClock, VTimestamp, VectorIdx}; use super::weak_memory::EvalContextExt as _; +use crate::concurrency::GlobalDataRaceHandler; use crate::diagnostics::RacingOp; use crate::*; @@ -259,7 +260,7 @@ enum AccessType { /// Per-byte vector clock metadata for data-race detection. #[derive(Clone, PartialEq, Eq, Debug)] struct MemoryCellClocks { - /// The vector-clock timestamp and the thread that did the last non-atomic write. We don't need + /// The vector clock timestamp and the thread that did the last non-atomic write. We don't need /// a full `VClock` here, it's always a single thread and nothing synchronizes, so the effective /// clock is all-0 except for the thread that did the write. write: (VectorIdx, VTimestamp), @@ -269,7 +270,7 @@ struct MemoryCellClocks { /// a deallocation of memory. write_type: NaWriteType, - /// The vector-clock of all non-atomic reads that happened since the last non-atomic write + /// The vector clock of all non-atomic reads that happened since the last non-atomic write /// (i.e., we join together the "singleton" clocks corresponding to each read). It is reset to /// zero on each write operation. read: VClock, @@ -298,7 +299,7 @@ struct ThreadExtraState { } /// Global data-race detection state, contains the currently -/// executing thread as well as the vector-clocks associated +/// executing thread as well as the vector clocks associated /// with each of the threads. // FIXME: it is probably better to have one large RefCell, than to have so many small ones. #[derive(Debug, Clone)] @@ -335,7 +336,7 @@ pub struct GlobalState { /// for use as the index for a new thread. /// Elements in this set may still require the vector index to /// report data-races, and can only be re-used after all - /// active vector-clocks catch up with the threads timestamp. + /// active vector clocks catch up with the threads timestamp. reuse_candidates: RefCell>, /// We make SC fences act like RMWs on a global location. @@ -348,6 +349,9 @@ pub struct GlobalState { /// Track when an outdated (weak memory) load happens. pub track_outdated_loads: bool, + + /// Whether weak memory emulation is enabled + pub weak_memory: bool, } impl VisitProvenance for GlobalState { @@ -680,6 +684,23 @@ impl MemoryCellClocks { } } +impl GlobalDataRaceHandler { + /// Select whether data race checking is disabled. This is solely an + /// implementation detail of `allow_data_races_*` and must not be used anywhere else! + fn set_ongoing_action_data_race_free(&self, enable: bool) { + match self { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Vclocks(data_race) => { + let old = data_race.ongoing_action_data_race_free.replace(enable); + assert_ne!(old, enable, "cannot nest allow_data_races"); + } + GlobalDataRaceHandler::Genmc(genmc_ctx) => { + genmc_ctx.set_ongoing_action_data_race_free(enable); + } + } + } +} + /// Evaluation context extensions. impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {} pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { @@ -696,6 +717,19 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // This is fine with StackedBorrow and race checks because they don't concern metadata on // the *value* (including the associated provenance if this is an AtomicPtr) at this location. // Only metadata on the location itself is used. + + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics + let old_val = None; + return genmc_ctx.atomic_load( + this, + place.ptr().addr(), + place.layout.size, + atomic, + old_val, + ); + } + let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?; let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, || { this.validate_atomic_load(place, atomic) @@ -718,6 +752,12 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // This is also a very special exception where we just ignore an error -- if this read // was UB e.g. because the memory is uninitialized, we don't want to know! let old_val = this.run_for_validation_mut(|this| this.read_scalar(dest)).discard_err(); + // Inform GenMC about the atomic store. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics + genmc_ctx.atomic_store(this, dest.ptr().addr(), dest.layout.size, val, atomic)?; + return interp_ok(()); + } this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?; this.validate_atomic_store(dest, atomic)?; this.buffered_atomic_write(val, dest, atomic, old_val) @@ -737,6 +777,21 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { let old = this.allow_data_races_mut(|this| this.read_immediate(place))?; + // Inform GenMC about the atomic rmw operation. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics + let (old_val, new_val) = genmc_ctx.atomic_rmw_op( + this, + place.ptr().addr(), + place.layout.size, + atomic, + (op, not), + rhs.to_scalar(), + )?; + this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + return interp_ok(ImmTy::from_scalar(old_val, old.layout)); + } + let val = this.binary_op(op, &old, rhs)?; let val = if not { this.unary_op(mir::UnOp::Not, &val)? } else { val }; this.allow_data_races_mut(|this| this.write_immediate(*val, place))?; @@ -761,6 +816,19 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { let old = this.allow_data_races_mut(|this| this.read_scalar(place))?; this.allow_data_races_mut(|this| this.write_scalar(new, place))?; + // Inform GenMC about the atomic atomic exchange. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics + let (old_val, _is_success) = genmc_ctx.atomic_exchange( + this, + place.ptr().addr(), + place.layout.size, + new, + atomic, + )?; + return interp_ok(old_val); + } + this.validate_atomic_rmw(place, atomic)?; this.buffered_atomic_rmw(new, place, atomic, old)?; @@ -780,6 +848,23 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { this.atomic_access_check(place, AtomicAccessType::Rmw)?; let old = this.allow_data_races_mut(|this| this.read_immediate(place))?; + + // Inform GenMC about the atomic min/max operation. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics + let (old_val, new_val) = genmc_ctx.atomic_min_max_op( + this, + place.ptr().addr(), + place.layout.size, + atomic, + min, + old.layout.backend_repr.is_signed(), + rhs.to_scalar(), + )?; + this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + return interp_ok(ImmTy::from_scalar(old_val, old.layout)); + } + let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?; #[rustfmt::skip] // rustfmt makes this unreadable @@ -823,6 +908,25 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // read ordering and write in the success case. // Read as immediate for the sake of `binary_op()` let old = this.allow_data_races_mut(|this| this.read_immediate(place))?; + + // Inform GenMC about the atomic atomic compare exchange. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + let (old, cmpxchg_success) = genmc_ctx.atomic_compare_exchange( + this, + place.ptr().addr(), + place.layout.size, + this.read_scalar(expect_old)?, + new, + success, + fail, + can_fail_spuriously, + )?; + if cmpxchg_success { + this.allow_data_races_mut(|this| this.write_scalar(new, place))?; + } + return interp_ok(Immediate::ScalarPair(old, Scalar::from_bool(cmpxchg_success))); + } + // `binary_op` will bail if either of them is not a scalar. let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?; // If the operation would succeed, but is "weak", fail some portion @@ -859,49 +963,11 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { /// Update the data-race detector for an atomic fence on the current thread. fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let current_span = this.machine.current_span(); - if let Some(data_race) = &mut this.machine.data_race { - data_race.maybe_perform_sync_operation( - &this.machine.threads, - current_span, - |index, mut clocks| { - trace!("Atomic fence on {:?} with ordering {:?}", index, atomic); - - // Apply data-race detection for the current fences - // this treats AcqRel and SeqCst as the same as an acquire - // and release fence applied in the same timestamp. - if atomic != AtomicFenceOrd::Release { - // Either Acquire | AcqRel | SeqCst - clocks.apply_acquire_fence(); - } - if atomic == AtomicFenceOrd::SeqCst { - // Behave like an RMW on the global fence location. This takes full care of - // all the SC fence requirements, including C++17 §32.4 [atomics.order] - // paragraph 6 (which would limit what future reads can see). It also rules - // out many legal behaviors, but we don't currently have a model that would - // be more precise. - // Also see the second bullet on page 10 of - // . - let mut sc_fence_clock = data_race.last_sc_fence.borrow_mut(); - sc_fence_clock.join(&clocks.clock); - clocks.clock.join(&sc_fence_clock); - // Also establish some sort of order with the last SC write that happened, globally - // (but this is only respected by future reads). - clocks.write_seqcst.join(&data_race.last_sc_write_per_thread.borrow()); - } - // The release fence is last, since both of the above could alter our clock, - // which should be part of what is being released. - if atomic != AtomicFenceOrd::Acquire { - // Either Release | AcqRel | SeqCst - clocks.apply_release_fence(); - } - - // Increment timestamp in case of release semantics. - interp_ok(atomic != AtomicFenceOrd::Acquire) - }, - ) - } else { - interp_ok(()) + let machine = &this.machine; + match &this.machine.data_race { + GlobalDataRaceHandler::None => interp_ok(()), + GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic), + GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic), } } @@ -910,10 +976,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { fn allow_data_races_all_threads_done(&mut self) { let this = self.eval_context_ref(); assert!(this.have_all_terminated()); - if let Some(data_race) = &this.machine.data_race { - let old = data_race.ongoing_action_data_race_free.replace(true); - assert!(!old, "cannot nest allow_data_races"); - } + this.machine.data_race.set_ongoing_action_data_race_free(true); } /// Calls the callback with the "release" clock of the current thread. @@ -923,14 +986,16 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { /// The closure will only be invoked if data race handling is on. fn release_clock(&self, callback: impl FnOnce(&VClock) -> R) -> Option { let this = self.eval_context_ref(); - Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads, callback)) + Some( + this.machine.data_race.as_vclocks_ref()?.release_clock(&this.machine.threads, callback), + ) } /// Acquire the given clock into the current thread, establishing synchronization with /// the moment when that clock snapshot was taken via `release_clock`. fn acquire_clock(&self, clock: &VClock) { let this = self.eval_context_ref(); - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.acquire_clock(clock, &this.machine.threads); } } @@ -1132,7 +1197,7 @@ impl VClockAlloc { machine: &MiriMachine<'_>, ) -> InterpResult<'tcx> { let current_span = machine.current_span(); - let global = machine.data_race.as_ref().unwrap(); + let global = machine.data_race.as_vclocks_ref().unwrap(); if !global.race_detecting() { return interp_ok(()); } @@ -1174,7 +1239,7 @@ impl VClockAlloc { machine: &mut MiriMachine<'_>, ) -> InterpResult<'tcx> { let current_span = machine.current_span(); - let global = machine.data_race.as_mut().unwrap(); + let global = machine.data_race.as_vclocks_mut().unwrap(); if !global.race_detecting() { return interp_ok(()); } @@ -1228,7 +1293,7 @@ impl Default for LocalClocks { impl FrameState { pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) { let current_span = machine.current_span(); - let global = machine.data_race.as_ref().unwrap(); + let global = machine.data_race.as_vclocks_ref().unwrap(); if !global.race_detecting() { return; } @@ -1258,7 +1323,7 @@ impl FrameState { pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) { let current_span = machine.current_span(); - let global = machine.data_race.as_ref().unwrap(); + let global = machine.data_race.as_vclocks_ref().unwrap(); if !global.race_detecting() { return; } @@ -1281,7 +1346,7 @@ impl FrameState { alloc: &mut VClockAlloc, machine: &MiriMachine<'_>, ) { - let global = machine.data_race.as_ref().unwrap(); + let global = machine.data_race.as_vclocks_ref().unwrap(); if !global.race_detecting() { return; } @@ -1314,14 +1379,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { #[inline] fn allow_data_races_ref(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R { let this = self.eval_context_ref(); - if let Some(data_race) = &this.machine.data_race { - let old = data_race.ongoing_action_data_race_free.replace(true); - assert!(!old, "cannot nest allow_data_races"); - } + this.machine.data_race.set_ongoing_action_data_race_free(true); let result = op(this); - if let Some(data_race) = &this.machine.data_race { - data_race.ongoing_action_data_race_free.set(false); - } + this.machine.data_race.set_ongoing_action_data_race_free(false); result } @@ -1331,14 +1391,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { #[inline] fn allow_data_races_mut(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R { let this = self.eval_context_mut(); - if let Some(data_race) = &this.machine.data_race { - let old = data_race.ongoing_action_data_race_free.replace(true); - assert!(!old, "cannot nest allow_data_races"); - } + this.machine.data_race.set_ongoing_action_data_race_free(true); let result = op(this); - if let Some(data_race) = &this.machine.data_race { - data_race.ongoing_action_data_race_free.set(false); - } + this.machine.data_race.set_ongoing_action_data_race_free(false); result } @@ -1355,7 +1410,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { let align = Align::from_bytes(place.layout.size.bytes()).unwrap(); this.check_ptr_align(place.ptr(), align)?; // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable - // memory on many targets (i.e., they segfault if taht memory is mapped read-only), and + // memory on many targets (i.e., they segfault if that memory is mapped read-only), and // atomic loads can be implemented via compare_exchange on some targets. There could // possibly be some very specific exceptions to this, see // for details. @@ -1486,7 +1541,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { ) -> InterpResult<'tcx> { let this = self.eval_context_ref(); assert!(access.is_atomic()); - let Some(data_race) = &this.machine.data_race else { return interp_ok(()) }; + let Some(data_race) = this.machine.data_race.as_vclocks_ref() else { + return interp_ok(()); + }; if !data_race.race_detecting() { return interp_ok(()); } @@ -1494,7 +1551,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?; // Load and log the atomic operation. // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option. - let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap(); + let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap(); trace!( "Atomic op({}) with ordering {:?} on {:?} (size={})", access.description(None, None), @@ -1565,6 +1622,7 @@ impl GlobalState { last_sc_fence: RefCell::new(VClock::default()), last_sc_write_per_thread: RefCell::new(VClock::default()), track_outdated_loads: config.track_outdated_loads, + weak_memory: config.weak_memory_emulation, }; // Setup the main-thread since it is not explicitly created: @@ -1728,7 +1786,7 @@ impl GlobalState { } } - /// On thread termination, the vector-clock may re-used + /// On thread termination, the vector clock may be re-used /// in the future once all remaining thread-clocks catch /// up with the time index of the terminated thread. /// This assigns thread termination with a unique index @@ -1750,6 +1808,50 @@ impl GlobalState { reuse.insert(current_index); } + /// Update the data-race detector for an atomic fence on the current thread. + fn atomic_fence<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + atomic: AtomicFenceOrd, + ) -> InterpResult<'tcx> { + let current_span = machine.current_span(); + self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| { + trace!("Atomic fence on {:?} with ordering {:?}", index, atomic); + + // Apply data-race detection for the current fences + // this treats AcqRel and SeqCst as the same as an acquire + // and release fence applied in the same timestamp. + if atomic != AtomicFenceOrd::Release { + // Either Acquire | AcqRel | SeqCst + clocks.apply_acquire_fence(); + } + if atomic == AtomicFenceOrd::SeqCst { + // Behave like an RMW on the global fence location. This takes full care of + // all the SC fence requirements, including C++17 §32.4 [atomics.order] + // paragraph 6 (which would limit what future reads can see). It also rules + // out many legal behaviors, but we don't currently have a model that would + // be more precise. + // Also see the second bullet on page 10 of + // . + let mut sc_fence_clock = self.last_sc_fence.borrow_mut(); + sc_fence_clock.join(&clocks.clock); + clocks.clock.join(&sc_fence_clock); + // Also establish some sort of order with the last SC write that happened, globally + // (but this is only respected by future reads). + clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow()); + } + // The release fence is last, since both of the above could alter our clock, + // which should be part of what is being released. + if atomic != AtomicFenceOrd::Acquire { + // Either Release | AcqRel | SeqCst + clocks.apply_release_fence(); + } + + // Increment timestamp in case of release semantics. + interp_ok(atomic != AtomicFenceOrd::Acquire) + }) + } + /// Attempt to perform a synchronized operation, this /// will perform no operation if multi-threading is /// not currently enabled. diff --git a/src/tools/miri/src/concurrency/data_race_handler.rs b/src/tools/miri/src/concurrency/data_race_handler.rs new file mode 100644 index 0000000000000..047c37e56b8f3 --- /dev/null +++ b/src/tools/miri/src/concurrency/data_race_handler.rs @@ -0,0 +1,91 @@ +use std::rc::Rc; + +use super::{data_race, weak_memory}; +use crate::concurrency::GenmcCtx; +use crate::{VisitProvenance, VisitWith}; + +pub enum GlobalDataRaceHandler { + /// No data race detection will be done. + None, + /// State required to run in GenMC mode. + /// In this mode, the program will be executed repeatedly to explore different concurrent executions. + /// The `GenmcCtx` must persist across multiple executions, so it is behind an `Rc`. + /// + /// The `GenmcCtx` has several methods with which to inform it about events like atomic memory accesses. + /// In GenMC mode, some functionality is taken over by GenMC: + /// - Memory Allocation: Allocated addresses need to be consistent across executions, which Miri's allocator doesn't guarantee + /// - Scheduling: To influence which concurrent execution we will explore next, GenMC takes over scheduling + /// - Atomic operations: GenMC will ensure that we explore all possible values that the memory model allows + /// an atomic operation to see at any specific point of the program. + Genmc(Rc), + /// The default data race detector for Miri using vector clocks. + Vclocks(Box), +} + +#[derive(Debug)] +pub enum AllocDataRaceHandler { + None, + Genmc, + /// Data race detection via the use of vector clocks. + /// Weak memory emulation via the use of store buffers (if enabled). + Vclocks(data_race::AllocState, Option), +} + +impl GlobalDataRaceHandler { + pub fn is_none(&self) -> bool { + matches!(self, GlobalDataRaceHandler::None) + } + + pub fn as_vclocks_ref(&self) -> Option<&data_race::GlobalState> { + if let Self::Vclocks(data_race) = self { Some(data_race) } else { None } + } + + pub fn as_vclocks_mut(&mut self) -> Option<&mut data_race::GlobalState> { + if let Self::Vclocks(data_race) = self { Some(data_race) } else { None } + } + + pub fn as_genmc_ref(&self) -> Option<&GenmcCtx> { + if let Self::Genmc(genmc_ctx) = self { Some(genmc_ctx) } else { None } + } +} + +impl AllocDataRaceHandler { + pub fn as_vclocks_ref(&self) -> Option<&data_race::AllocState> { + if let Self::Vclocks(data_race, _weak_memory) = self { Some(data_race) } else { None } + } + + pub fn as_vclocks_mut(&mut self) -> Option<&mut data_race::AllocState> { + if let Self::Vclocks(data_race, _weak_memory) = self { Some(data_race) } else { None } + } + + pub fn as_weak_memory_ref(&self) -> Option<&weak_memory::AllocState> { + if let Self::Vclocks(_data_race, weak_memory) = self { weak_memory.as_ref() } else { None } + } + + pub fn as_weak_memory_mut(&mut self) -> Option<&mut weak_memory::AllocState> { + if let Self::Vclocks(_data_race, weak_memory) = self { weak_memory.as_mut() } else { None } + } +} + +impl VisitProvenance for GlobalDataRaceHandler { + fn visit_provenance(&self, visit: &mut VisitWith<'_>) { + match self { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Vclocks(data_race) => data_race.visit_provenance(visit), + GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.visit_provenance(visit), + } + } +} + +impl VisitProvenance for AllocDataRaceHandler { + fn visit_provenance(&self, visit: &mut VisitWith<'_>) { + match self { + AllocDataRaceHandler::None => {} + AllocDataRaceHandler::Genmc => {} + AllocDataRaceHandler::Vclocks(data_race, weak_memory) => { + data_race.visit_provenance(visit); + weak_memory.visit_provenance(visit); + } + } + } +} diff --git a/src/tools/miri/src/concurrency/genmc/config.rs b/src/tools/miri/src/concurrency/genmc/config.rs new file mode 100644 index 0000000000000..f91211a670f65 --- /dev/null +++ b/src/tools/miri/src/concurrency/genmc/config.rs @@ -0,0 +1,19 @@ +use crate::MiriConfig; + +#[derive(Debug, Default, Clone)] +pub struct GenmcConfig { + // TODO: add fields +} + +impl GenmcConfig { + /// Function for parsing command line options for GenMC mode. + /// All GenMC arguments start with the string "-Zmiri-genmc". + /// + /// `trimmed_arg` should be the argument to be parsed, with the suffix "-Zmiri-genmc" removed + pub fn parse_arg(genmc_config: &mut Option, trimmed_arg: &str) { + if genmc_config.is_none() { + *genmc_config = Some(Default::default()); + } + todo!("implement parsing of GenMC options") + } +} diff --git a/src/tools/miri/src/concurrency/genmc/dummy.rs b/src/tools/miri/src/concurrency/genmc/dummy.rs new file mode 100644 index 0000000000000..3d0558fb68530 --- /dev/null +++ b/src/tools/miri/src/concurrency/genmc/dummy.rs @@ -0,0 +1,239 @@ +#![allow(unused)] + +use rustc_abi::{Align, Size}; +use rustc_const_eval::interpret::{InterpCx, InterpResult}; +use rustc_middle::mir; + +use crate::{ + AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig, + MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith, +}; + +#[derive(Debug)] +pub struct GenmcCtx {} + +#[derive(Debug, Default, Clone)] +pub struct GenmcConfig {} + +impl GenmcCtx { + pub fn new(_miri_config: &MiriConfig, _genmc_config: &GenmcConfig) -> Self { + unreachable!() + } + + pub fn get_stuck_execution_count(&self) -> usize { + unreachable!() + } + + pub fn print_genmc_graph(&self) { + unreachable!() + } + + pub fn is_exploration_done(&self) -> bool { + unreachable!() + } + + /**** Memory access handling ****/ + + pub(crate) fn handle_execution_start(&self) { + unreachable!() + } + + pub(crate) fn handle_execution_end<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + ) -> Result<(), String> { + unreachable!() + } + + pub(super) fn set_ongoing_action_data_race_free(&self, _enable: bool) { + unreachable!() + } + + //* might fails if there's a race, load might also not read anything (returns None) */ + pub(crate) fn atomic_load<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _ordering: AtomicReadOrd, + _old_val: Option, + ) -> InterpResult<'tcx, Scalar> { + unreachable!() + } + + pub(crate) fn atomic_store<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _value: Scalar, + _ordering: AtomicWriteOrd, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + pub(crate) fn atomic_fence<'tcx>( + &self, + _machine: &MiriMachine<'tcx>, + _ordering: AtomicFenceOrd, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + pub(crate) fn atomic_rmw_op<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _ordering: AtomicRwOrd, + (rmw_op, not): (mir::BinOp, bool), + _rhs_scalar: Scalar, + ) -> InterpResult<'tcx, (Scalar, Scalar)> { + unreachable!() + } + + pub(crate) fn atomic_min_max_op<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + ordering: AtomicRwOrd, + min: bool, + is_signed: bool, + rhs_scalar: Scalar, + ) -> InterpResult<'tcx, (Scalar, Scalar)> { + unreachable!() + } + + pub(crate) fn atomic_exchange<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _rhs_scalar: Scalar, + _ordering: AtomicRwOrd, + ) -> InterpResult<'tcx, (Scalar, bool)> { + unreachable!() + } + + pub(crate) fn atomic_compare_exchange<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _expected_old_value: Scalar, + _new_value: Scalar, + _success: AtomicRwOrd, + _fail: AtomicReadOrd, + _can_fail_spuriously: bool, + ) -> InterpResult<'tcx, (Scalar, bool)> { + unreachable!() + } + + pub(crate) fn memory_load<'tcx>( + &self, + _machine: &MiriMachine<'tcx>, + _address: Size, + _size: Size, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + pub(crate) fn memory_store<'tcx>( + &self, + _machine: &MiriMachine<'tcx>, + _address: Size, + _size: Size, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + /**** Memory (de)allocation ****/ + + pub(crate) fn handle_alloc<'tcx>( + &self, + _machine: &MiriMachine<'tcx>, + _size: Size, + _alignment: Align, + _memory_kind: MemoryKind, + ) -> InterpResult<'tcx, u64> { + unreachable!() + } + + pub(crate) fn handle_dealloc<'tcx>( + &self, + _machine: &MiriMachine<'tcx>, + _address: Size, + _size: Size, + _align: Align, + _kind: MemoryKind, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + /**** Thread management ****/ + + pub(crate) fn handle_thread_create<'tcx>( + &self, + _threads: &ThreadManager<'tcx>, + _new_thread_id: ThreadId, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + pub(crate) fn handle_thread_join<'tcx>( + &self, + _active_thread_id: ThreadId, + _child_thread_id: ThreadId, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + pub(crate) fn handle_thread_stack_empty(&self, _thread_id: ThreadId) { + unreachable!() + } + + pub(crate) fn handle_thread_finish<'tcx>( + &self, + _threads: &ThreadManager<'tcx>, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } + + /**** Scheduling functionality ****/ + + pub(crate) fn schedule_thread<'tcx>( + &self, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + ) -> InterpResult<'tcx, ThreadId> { + unreachable!() + } + + /**** Blocking instructions ****/ + + pub(crate) fn handle_verifier_assume<'tcx>( + &self, + _machine: &MiriMachine<'tcx>, + _condition: bool, + ) -> InterpResult<'tcx, ()> { + unreachable!() + } +} + +impl VisitProvenance for GenmcCtx { + fn visit_provenance(&self, _visit: &mut VisitWith<'_>) { + unreachable!() + } +} + +impl GenmcConfig { + pub fn parse_arg(_genmc_config: &mut Option, trimmed_arg: &str) { + unimplemented!( + "GenMC feature im Miri is disabled, cannot handle argument: \"-Zmiri-genmc{trimmed_arg}\"" + ); + } + + pub fn should_print_graph(&self, _rep: usize) -> bool { + unreachable!() + } +} diff --git a/src/tools/miri/src/concurrency/genmc/mod.rs b/src/tools/miri/src/concurrency/genmc/mod.rs new file mode 100644 index 0000000000000..0dfd4b9b80f98 --- /dev/null +++ b/src/tools/miri/src/concurrency/genmc/mod.rs @@ -0,0 +1,284 @@ +#![allow(unused)] // FIXME(GenMC): remove this + +use std::cell::Cell; + +use rustc_abi::{Align, Size}; +use rustc_const_eval::interpret::{InterpCx, InterpResult, interp_ok}; +use rustc_middle::mir; + +use crate::{ + AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig, + MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith, +}; + +mod config; + +pub use self::config::GenmcConfig; + +// FIXME(GenMC): add fields +pub struct GenmcCtx { + /// Some actions Miri does are allowed to cause data races. + /// GenMC will not be informed about certain actions (e.g. non-atomic loads) when this flag is set. + allow_data_races: Cell, +} + +impl GenmcCtx { + /// Create a new `GenmcCtx` from a given config. + pub fn new(miri_config: &MiriConfig, genmc_config: &GenmcConfig) -> Self { + assert!(miri_config.genmc_mode); + todo!() + } + + pub fn get_stuck_execution_count(&self) -> usize { + todo!() + } + + pub fn print_genmc_graph(&self) { + todo!() + } + + /// This function determines if we should continue exploring executions or if we are done. + /// + /// In GenMC mode, the input program should be repeatedly executed until this function returns `true` or an error is found. + pub fn is_exploration_done(&self) -> bool { + todo!() + } + + /// Inform GenMC that a new program execution has started. + /// This function should be called at the start of every execution. + pub(crate) fn handle_execution_start(&self) { + todo!() + } + + /// Inform GenMC that the program's execution has ended. + /// + /// This function must be called even when the execution got stuck (i.e., it returned a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`). + pub(crate) fn handle_execution_end<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + ) -> Result<(), String> { + todo!() + } + + /**** Memory access handling ****/ + + /// Select whether data race free actions should be allowed. This function should be used carefully! + /// + /// If `true` is passed, allow for data races to happen without triggering an error, until this function is called again with argument `false`. + /// This allows for racy non-atomic memory accesses to be ignored (GenMC is not informed about them at all). + /// + /// Certain operations are not permitted in GenMC mode with data races disabled and will cause a panic, e.g., atomic accesses or asking for scheduling decisions. + /// + /// # Panics + /// If data race free is attempted to be set more than once (i.e., no nesting allowed). + pub(super) fn set_ongoing_action_data_race_free(&self, enable: bool) { + let old = self.allow_data_races.replace(enable); + assert_ne!(old, enable, "cannot nest allow_data_races"); + } + + pub(crate) fn atomic_load<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + ordering: AtomicReadOrd, + old_val: Option, + ) -> InterpResult<'tcx, Scalar> { + assert!(!self.allow_data_races.get()); + todo!() + } + + pub(crate) fn atomic_store<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + value: Scalar, + ordering: AtomicWriteOrd, + ) -> InterpResult<'tcx, ()> { + assert!(!self.allow_data_races.get()); + todo!() + } + + pub(crate) fn atomic_fence<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + ordering: AtomicFenceOrd, + ) -> InterpResult<'tcx, ()> { + assert!(!self.allow_data_races.get()); + todo!() + } + + /// Inform GenMC about an atomic read-modify-write operation. + /// + /// Returns `(old_val, new_val)`. + pub(crate) fn atomic_rmw_op<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + ordering: AtomicRwOrd, + (rmw_op, not): (mir::BinOp, bool), + rhs_scalar: Scalar, + ) -> InterpResult<'tcx, (Scalar, Scalar)> { + assert!(!self.allow_data_races.get()); + todo!() + } + + /// Inform GenMC about an atomic `min` or `max` operation. + /// + /// Returns `(old_val, new_val)`. + pub(crate) fn atomic_min_max_op<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + ordering: AtomicRwOrd, + min: bool, + is_signed: bool, + rhs_scalar: Scalar, + ) -> InterpResult<'tcx, (Scalar, Scalar)> { + assert!(!self.allow_data_races.get()); + todo!() + } + + pub(crate) fn atomic_exchange<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + rhs_scalar: Scalar, + ordering: AtomicRwOrd, + ) -> InterpResult<'tcx, (Scalar, bool)> { + assert!(!self.allow_data_races.get()); + todo!() + } + + pub(crate) fn atomic_compare_exchange<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + address: Size, + size: Size, + expected_old_value: Scalar, + new_value: Scalar, + success: AtomicRwOrd, + fail: AtomicReadOrd, + can_fail_spuriously: bool, + ) -> InterpResult<'tcx, (Scalar, bool)> { + assert!(!self.allow_data_races.get()); + todo!() + } + + /// Inform GenMC about a non-atomic memory load + /// + /// NOTE: Unlike for *atomic* loads, we don't return a value here. Non-atomic values are still handled by Miri. + pub(crate) fn memory_load<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + address: Size, + size: Size, + ) -> InterpResult<'tcx, ()> { + todo!() + } + + pub(crate) fn memory_store<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + address: Size, + size: Size, + ) -> InterpResult<'tcx, ()> { + todo!() + } + + /**** Memory (de)allocation ****/ + + pub(crate) fn handle_alloc<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + size: Size, + alignment: Align, + memory_kind: MemoryKind, + ) -> InterpResult<'tcx, u64> { + todo!() + } + + pub(crate) fn handle_dealloc<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + address: Size, + size: Size, + align: Align, + kind: MemoryKind, + ) -> InterpResult<'tcx, ()> { + todo!() + } + + /**** Thread management ****/ + + pub(crate) fn handle_thread_create<'tcx>( + &self, + threads: &ThreadManager<'tcx>, + new_thread_id: ThreadId, + ) -> InterpResult<'tcx, ()> { + assert!(!self.allow_data_races.get()); + todo!() + } + + pub(crate) fn handle_thread_join<'tcx>( + &self, + active_thread_id: ThreadId, + child_thread_id: ThreadId, + ) -> InterpResult<'tcx, ()> { + assert!(!self.allow_data_races.get()); + todo!() + } + + pub(crate) fn handle_thread_stack_empty(&self, thread_id: ThreadId) { + todo!() + } + + pub(crate) fn handle_thread_finish<'tcx>( + &self, + threads: &ThreadManager<'tcx>, + ) -> InterpResult<'tcx, ()> { + assert!(!self.allow_data_races.get()); + todo!() + } + + /**** Scheduling functionality ****/ + + /// Ask for a scheduling decision. This should be called before every MIR instruction. + /// + /// GenMC may realize that the execution got stuck, then this function will return a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`). + /// + /// This is **not** an error by iself! Treat this as if the program ended normally: `handle_execution_end` should be called next, which will determine if were are any actual errors. + pub(crate) fn schedule_thread<'tcx>( + &self, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + ) -> InterpResult<'tcx, ThreadId> { + assert!(!self.allow_data_races.get()); + todo!() + } + + /**** Blocking instructions ****/ + + pub(crate) fn handle_verifier_assume<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + condition: bool, + ) -> InterpResult<'tcx, ()> { + if condition { interp_ok(()) } else { self.handle_user_block(machine) } + } +} + +impl VisitProvenance for GenmcCtx { + fn visit_provenance(&self, _visit: &mut VisitWith<'_>) { + // We don't have any tags. + } +} + +impl GenmcCtx { + fn handle_user_block<'tcx>(&self, machine: &MiriMachine<'tcx>) -> InterpResult<'tcx, ()> { + todo!() + } +} diff --git a/src/tools/miri/src/concurrency/init_once.rs b/src/tools/miri/src/concurrency/init_once.rs index 534f02545bdec..c26384f65f6cc 100644 --- a/src/tools/miri/src/concurrency/init_once.rs +++ b/src/tools/miri/src/concurrency/init_once.rs @@ -72,7 +72,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { init_once.status = InitOnceStatus::Complete; // Each complete happens-before the end of the wait - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race .release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock)); } @@ -99,7 +99,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { init_once.status = InitOnceStatus::Uninitialized; // Each complete happens-before the end of the wait - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race .release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock)); } diff --git a/src/tools/miri/src/concurrency/mod.rs b/src/tools/miri/src/concurrency/mod.rs index c5082b4e40b4c..dd33f90f153d5 100644 --- a/src/tools/miri/src/concurrency/mod.rs +++ b/src/tools/miri/src/concurrency/mod.rs @@ -1,5 +1,6 @@ pub mod cpu_affinity; pub mod data_race; +mod data_race_handler; pub mod init_once; mod range_object_map; pub mod sync; @@ -7,4 +8,19 @@ pub mod thread; mod vector_clock; pub mod weak_memory; +// Import either the real genmc adapter or a dummy module. +cfg_match! { + feature = "genmc" => { + mod genmc; + pub use self::genmc::{GenmcCtx, GenmcConfig}; + } + _ => { + #[path = "genmc/dummy.rs"] + mod genmc_dummy; + use self::genmc_dummy as genmc; + pub use self::genmc::{GenmcCtx, GenmcConfig}; + } +} + +pub use self::data_race_handler::{AllocDataRaceHandler, GlobalDataRaceHandler}; pub use self::vector_clock::VClock; diff --git a/src/tools/miri/src/concurrency/sync.rs b/src/tools/miri/src/concurrency/sync.rs index 268268848ed2f..64f34d3e21cc8 100644 --- a/src/tools/miri/src/concurrency/sync.rs +++ b/src/tools/miri/src/concurrency/sync.rs @@ -361,7 +361,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { mutex.owner = Some(thread); } mutex.lock_count = mutex.lock_count.strict_add(1); - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.acquire_clock(&mutex.clock, &this.machine.threads); } } @@ -385,7 +385,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { mutex.owner = None; // The mutex is completely unlocked. Try transferring ownership // to another thread. - if let Some(data_race) = &this.machine.data_race { + + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.release_clock(&this.machine.threads, |clock| { mutex.clock.clone_from(clock) }); @@ -477,7 +478,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let rwlock = &mut this.machine.sync.rwlocks[id]; let count = rwlock.readers.entry(thread).or_insert(0); *count = count.strict_add(1); - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads); } } @@ -502,7 +503,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { } Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock } - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { // Add this to the shared-release clock of all concurrent readers. data_race.release_clock(&this.machine.threads, |clock| { rwlock.clock_current_readers.join(clock) @@ -565,7 +566,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { trace!("rwlock_writer_lock: {:?} now held by {:?}", id, thread); let rwlock = &mut this.machine.sync.rwlocks[id]; rwlock.writer = Some(thread); - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads); } } @@ -585,7 +586,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { rwlock.writer = None; trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread); // Record release clock for next lock holder. - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.release_clock(&this.machine.threads, |clock| { rwlock.clock_unlocked.clone_from(clock) }); @@ -691,7 +692,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { match unblock { UnblockKind::Ready => { // The condvar was signaled. Make sure we get the clock for that. - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.acquire_clock( &this.machine.sync.condvars[condvar].clock, &this.machine.threads, @@ -721,10 +722,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn condvar_signal(&mut self, id: CondvarId) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let condvar = &mut this.machine.sync.condvars[id]; - let data_race = &this.machine.data_race; // Each condvar signal happens-before the end of the condvar wake - if let Some(data_race) = data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock)); } let Some(waiter) = condvar.waiters.pop_front() else { @@ -764,7 +764,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { UnblockKind::Ready => { let futex = futex_ref.0.borrow(); // Acquire the clock of the futex. - if let Some(data_race) = &this.machine.data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.acquire_clock(&futex.clock, &this.machine.threads); } }, @@ -792,10 +792,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { ) -> InterpResult<'tcx, usize> { let this = self.eval_context_mut(); let mut futex = futex_ref.0.borrow_mut(); - let data_race = &this.machine.data_race; // Each futex-wake happens-before the end of the futex wait - if let Some(data_race) = data_race { + if let Some(data_race) = this.machine.data_race.as_vclocks_ref() { data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock)); } diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs index 72fa918e8e511..8aa65e6cb612b 100644 --- a/src/tools/miri/src/concurrency/thread.rs +++ b/src/tools/miri/src/concurrency/thread.rs @@ -6,6 +6,7 @@ use std::task::Poll; use std::time::{Duration, SystemTime}; use either::Either; +use rand::seq::IteratorRandom; use rustc_abi::ExternAbi; use rustc_const_eval::CTRL_C_RECEIVED; use rustc_data_structures::fx::FxHashMap; @@ -15,7 +16,7 @@ use rustc_middle::mir::Mutability; use rustc_middle::ty::layout::TyAndLayout; use rustc_span::Span; -use crate::concurrency::data_race; +use crate::concurrency::GlobalDataRaceHandler; use crate::shims::tls; use crate::*; @@ -401,6 +402,8 @@ pub struct ThreadManager<'tcx> { thread_local_allocs: FxHashMap<(DefId, ThreadId), StrictPointer>, /// A flag that indicates that we should change the active thread. yield_active_thread: bool, + /// A flag that indicates that we should do round robin scheduling of threads else randomized scheduling is used. + fixed_scheduling: bool, } impl VisitProvenance for ThreadManager<'_> { @@ -410,6 +413,7 @@ impl VisitProvenance for ThreadManager<'_> { thread_local_allocs, active_thread: _, yield_active_thread: _, + fixed_scheduling: _, } = self; for thread in threads { @@ -421,8 +425,8 @@ impl VisitProvenance for ThreadManager<'_> { } } -impl<'tcx> Default for ThreadManager<'tcx> { - fn default() -> Self { +impl<'tcx> ThreadManager<'tcx> { + pub(crate) fn new(config: &MiriConfig) -> Self { let mut threads = IndexVec::new(); // Create the main thread and add it to the list of threads. threads.push(Thread::new(Some("main"), None)); @@ -431,11 +435,10 @@ impl<'tcx> Default for ThreadManager<'tcx> { threads, thread_local_allocs: Default::default(), yield_active_thread: false, + fixed_scheduling: config.fixed_scheduling, } } -} -impl<'tcx> ThreadManager<'tcx> { pub(crate) fn init( ecx: &mut MiriInterpCx<'tcx>, on_main_stack_empty: StackEmptyCallback<'tcx>, @@ -580,13 +583,28 @@ impl<'tcx> ThreadManager<'tcx> { fn join_thread( &mut self, joined_thread_id: ThreadId, - data_race: Option<&mut data_race::GlobalState>, + data_race_handler: &mut GlobalDataRaceHandler, ) -> InterpResult<'tcx> { if self.threads[joined_thread_id].join_status == ThreadJoinStatus::Detached { // On Windows this corresponds to joining on a closed handle. throw_ub_format!("trying to join a detached thread"); } + fn after_join<'tcx>( + threads: &mut ThreadManager<'_>, + joined_thread_id: ThreadId, + data_race_handler: &mut GlobalDataRaceHandler, + ) -> InterpResult<'tcx> { + match data_race_handler { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Vclocks(data_race) => + data_race.thread_joined(threads, joined_thread_id), + GlobalDataRaceHandler::Genmc(genmc_ctx) => + genmc_ctx.handle_thread_join(threads.active_thread, joined_thread_id)?, + } + interp_ok(()) + } + // Mark the joined thread as being joined so that we detect if other // threads try to join it. self.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined; @@ -606,18 +624,13 @@ impl<'tcx> ThreadManager<'tcx> { } |this, unblock: UnblockKind| { assert_eq!(unblock, UnblockKind::Ready); - if let Some(data_race) = &mut this.machine.data_race { - data_race.thread_joined(&this.machine.threads, joined_thread_id); - } - interp_ok(()) + after_join(&mut this.machine.threads, joined_thread_id, &mut this.machine.data_race) } ), ); } else { // The thread has already terminated - establish happens-before - if let Some(data_race) = data_race { - data_race.thread_joined(self, joined_thread_id); - } + after_join(self, joined_thread_id, data_race_handler)?; } interp_ok(()) } @@ -627,7 +640,7 @@ impl<'tcx> ThreadManager<'tcx> { fn join_thread_exclusive( &mut self, joined_thread_id: ThreadId, - data_race: Option<&mut data_race::GlobalState>, + data_race_handler: &mut GlobalDataRaceHandler, ) -> InterpResult<'tcx> { if self.threads[joined_thread_id].join_status == ThreadJoinStatus::Joined { throw_ub_format!("trying to join an already joined thread"); @@ -645,7 +658,7 @@ impl<'tcx> ThreadManager<'tcx> { "this thread already has threads waiting for its termination" ); - self.join_thread(joined_thread_id, data_race) + self.join_thread(joined_thread_id, data_race_handler) } /// Set the name of the given thread. @@ -695,70 +708,6 @@ impl<'tcx> ThreadManager<'tcx> { }) .min() } - - /// Decide which action to take next and on which thread. - /// - /// The currently implemented scheduling policy is the one that is commonly - /// used in stateless model checkers such as Loom: run the active thread as - /// long as we can and switch only when we have to (the active thread was - /// blocked, terminated, or has explicitly asked to be preempted). - fn schedule(&mut self, clock: &MonotonicClock) -> InterpResult<'tcx, SchedulingAction> { - // This thread and the program can keep going. - if self.threads[self.active_thread].state.is_enabled() && !self.yield_active_thread { - // The currently active thread is still enabled, just continue with it. - return interp_ok(SchedulingAction::ExecuteStep); - } - // The active thread yielded or got terminated. Let's see if there are any timeouts to take - // care of. We do this *before* running any other thread, to ensure that timeouts "in the - // past" fire before any other thread can take an action. This ensures that for - // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by - // abstime has already been passed at the time of the call". - // - let potential_sleep_time = self.next_callback_wait_time(clock); - if potential_sleep_time == Some(Duration::ZERO) { - return interp_ok(SchedulingAction::ExecuteTimeoutCallback); - } - // No callbacks immediately scheduled, pick a regular thread to execute. - // The active thread blocked or yielded. So we go search for another enabled thread. - // Crucially, we start searching at the current active thread ID, rather than at 0, since we - // want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2. - // - // `skip(N)` means we start iterating at thread N, so we skip 1 more to start just *after* - // the active thread. Then after that we look at `take(N)`, i.e., the threads *before* the - // active thread. - let threads = self - .threads - .iter_enumerated() - .skip(self.active_thread.index() + 1) - .chain(self.threads.iter_enumerated().take(self.active_thread.index())); - for (id, thread) in threads { - debug_assert_ne!(self.active_thread, id); - if thread.state.is_enabled() { - info!( - "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------", - self.get_thread_display_name(id), - self.get_thread_display_name(self.active_thread) - ); - self.active_thread = id; - break; - } - } - self.yield_active_thread = false; - if self.threads[self.active_thread].state.is_enabled() { - return interp_ok(SchedulingAction::ExecuteStep); - } - // We have not found a thread to execute. - if self.threads.iter().all(|thread| thread.state.is_terminated()) { - unreachable!("all threads terminated without the main thread terminating?!"); - } else if let Some(sleep_time) = potential_sleep_time { - // All threads are currently blocked, but we have unexecuted - // timeout_callbacks, which may unblock some of the threads. Hence, - // sleep until the first callback. - interp_ok(SchedulingAction::Sleep(sleep_time)) - } else { - throw_machine_stop!(TerminationInfo::Deadlock); - } - } } impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {} @@ -806,6 +755,11 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { #[inline] fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> { let this = self.eval_context_mut(); + // Inform GenMC that a thread has finished all user code. GenMC needs to know this for scheduling. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + let thread_id = this.active_thread(); + genmc_ctx.handle_thread_stack_empty(thread_id); + } let mut callback = this .active_thread_mut() .on_stack_empty @@ -815,6 +769,102 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { this.active_thread_mut().on_stack_empty = Some(callback); interp_ok(res) } + + /// Decide which action to take next and on which thread. + /// + /// The currently implemented scheduling policy is the one that is commonly + /// used in stateless model checkers such as Loom: run the active thread as + /// long as we can and switch only when we have to (the active thread was + /// blocked, terminated, or has explicitly asked to be preempted). + /// + /// If GenMC mode is active, the scheduling is instead handled by GenMC. + fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { + let this = self.eval_context_mut(); + // In GenMC mode, we let GenMC do the scheduling + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + let next_thread_id = genmc_ctx.schedule_thread(this)?; + + let thread_manager = &mut this.machine.threads; + thread_manager.active_thread = next_thread_id; + thread_manager.yield_active_thread = false; + + assert!(thread_manager.threads[thread_manager.active_thread].state.is_enabled()); + return interp_ok(SchedulingAction::ExecuteStep); + } + + // We are not in GenMC mode, so we control the schedule + let thread_manager = &mut this.machine.threads; + let clock = &this.machine.monotonic_clock; + let rng = this.machine.rng.get_mut(); + // This thread and the program can keep going. + if thread_manager.threads[thread_manager.active_thread].state.is_enabled() + && !thread_manager.yield_active_thread + { + // The currently active thread is still enabled, just continue with it. + return interp_ok(SchedulingAction::ExecuteStep); + } + // The active thread yielded or got terminated. Let's see if there are any timeouts to take + // care of. We do this *before* running any other thread, to ensure that timeouts "in the + // past" fire before any other thread can take an action. This ensures that for + // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by + // abstime has already been passed at the time of the call". + // + let potential_sleep_time = thread_manager.next_callback_wait_time(clock); + if potential_sleep_time == Some(Duration::ZERO) { + return interp_ok(SchedulingAction::ExecuteTimeoutCallback); + } + // No callbacks immediately scheduled, pick a regular thread to execute. + // The active thread blocked or yielded. So we go search for another enabled thread. + // We build the list of threads by starting with the threads after the current one, followed by + // the threads before the current one and then the current thread itself (i.e., this iterator acts + // like `threads.rotate_left(self.active_thread.index() + 1)`. This ensures that if we pick the first + // eligible thread, we do regular round-robin scheduling, and all threads get a chance to take a step. + let mut threads_iter = thread_manager + .threads + .iter_enumerated() + .skip(thread_manager.active_thread.index() + 1) + .chain( + thread_manager + .threads + .iter_enumerated() + .take(thread_manager.active_thread.index() + 1), + ) + .filter(|(_id, thread)| thread.state.is_enabled()); + // Pick a new thread, and switch to it. + let new_thread = if thread_manager.fixed_scheduling { + threads_iter.next() + } else { + threads_iter.choose(rng) + }; + + if let Some((id, _thread)) = new_thread { + if thread_manager.active_thread != id { + info!( + "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------", + thread_manager.get_thread_display_name(id), + thread_manager.get_thread_display_name(thread_manager.active_thread) + ); + thread_manager.active_thread = id; + } + } + // This completes the `yield`, if any was requested. + thread_manager.yield_active_thread = false; + + if thread_manager.threads[thread_manager.active_thread].state.is_enabled() { + return interp_ok(SchedulingAction::ExecuteStep); + } + // We have not found a thread to execute. + if thread_manager.threads.iter().all(|thread| thread.state.is_terminated()) { + unreachable!("all threads terminated without the main thread terminating?!"); + } else if let Some(sleep_time) = potential_sleep_time { + // All threads are currently blocked, but we have unexecuted + // timeout_callbacks, which may unblock some of the threads. Hence, + // sleep until the first callback. + interp_ok(SchedulingAction::Sleep(sleep_time)) + } else { + throw_machine_stop!(TerminationInfo::Deadlock); + } + } } // Public interface to thread management. @@ -880,10 +930,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { Box::new(move |m| state.on_stack_empty(m)) }); let current_span = this.machine.current_span(); - if let Some(data_race) = &mut this.machine.data_race { - data_race.thread_created(&this.machine.threads, new_thread_id, current_span); + match &mut this.machine.data_race { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Vclocks(data_race) => + data_race.thread_created(&this.machine.threads, new_thread_id, current_span), + GlobalDataRaceHandler::Genmc(genmc_ctx) => + genmc_ctx.handle_thread_create(&this.machine.threads, new_thread_id)?, } - // Write the current thread-id, switch to the next thread later // to treat this write operation as occurring on the current thread. if let Some(thread_info_place) = thread { @@ -930,12 +983,17 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`. fn terminate_active_thread(&mut self, tls_alloc_action: TlsAllocAction) -> InterpResult<'tcx> { let this = self.eval_context_mut(); + // Mark thread as terminated. let thread = this.active_thread_mut(); assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated"); thread.state = ThreadState::Terminated; - if let Some(ref mut data_race) = this.machine.data_race { - data_race.thread_terminated(&this.machine.threads); + match &mut this.machine.data_race { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Vclocks(data_race) => + data_race.thread_terminated(&this.machine.threads), + GlobalDataRaceHandler::Genmc(genmc_ctx) => + genmc_ctx.handle_thread_finish(&this.machine.threads)?, } // Deallocate TLS. let gone_thread = this.active_thread(); @@ -1051,7 +1109,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { #[inline] fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - this.machine.threads.join_thread(joined_thread_id, this.machine.data_race.as_mut())?; + this.machine.threads.join_thread(joined_thread_id, &mut this.machine.data_race)?; interp_ok(()) } @@ -1060,7 +1118,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let this = self.eval_context_mut(); this.machine .threads - .join_thread_exclusive(joined_thread_id, this.machine.data_race.as_mut())?; + .join_thread_exclusive(joined_thread_id, &mut this.machine.data_race)?; interp_ok(()) } @@ -1138,7 +1196,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { use rand::Rng as _; let this = self.eval_context_mut(); - if this.machine.rng.get_mut().random_bool(this.machine.preemption_rate) { + if !this.machine.threads.fixed_scheduling + && this.machine.rng.get_mut().random_bool(this.machine.preemption_rate) + { this.yield_active_thread(); } } @@ -1152,7 +1212,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.machine.handle_abnormal_termination(); throw_machine_stop!(TerminationInfo::Interrupted); } - match this.machine.threads.schedule(&this.machine.monotonic_clock)? { + match this.schedule()? { SchedulingAction::ExecuteStep => { if !this.step()? { // See if this thread can do something else. diff --git a/src/tools/miri/src/concurrency/vector_clock.rs b/src/tools/miri/src/concurrency/vector_clock.rs index 345726634299b..78858fcedaec7 100644 --- a/src/tools/miri/src/concurrency/vector_clock.rs +++ b/src/tools/miri/src/concurrency/vector_clock.rs @@ -40,8 +40,8 @@ impl From for VectorIdx { } } -/// The size of the vector-clock to store inline -/// clock vectors larger than this will be stored on the heap +/// The size of the vector clock to store inline. +/// Clock vectors larger than this will be stored on the heap. const SMALL_VECTOR: usize = 4; /// The time-stamps recorded in the data-race detector consist of both @@ -136,7 +136,7 @@ impl Ord for VTimestamp { pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>); impl VClock { - /// Create a new vector-clock containing all zeros except + /// Create a new vector clock containing all zeros except /// for a value at the given index pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock { if timestamp.time() == 0 { @@ -185,8 +185,8 @@ impl VClock { } } - // Join the two vector-clocks together, this - // sets each vector-element to the maximum value + // Join the two vector clocks together, this + // sets each vector element to the maximum value // of that element in either of the two source elements. pub fn join(&mut self, other: &Self) { let rhs_slice = other.as_slice(); diff --git a/src/tools/miri/src/concurrency/weak_memory.rs b/src/tools/miri/src/concurrency/weak_memory.rs index 1a3e9614f8af0..95c010be2fd21 100644 --- a/src/tools/miri/src/concurrency/weak_memory.rs +++ b/src/tools/miri/src/concurrency/weak_memory.rs @@ -77,7 +77,7 @@ // (https://github.com/ChrisLidbury/tsan11/blob/ecbd6b81e9b9454e01cba78eb9d88684168132c7/lib/tsan/rtl/tsan_relaxed.cc#L160-L167) // and here. // -// 4. W_SC ; R_SC case requires the SC load to ignore all but last store maked SC (stores not marked SC are not +// 4. W_SC ; R_SC case requires the SC load to ignore all but last store marked SC (stores not marked SC are not // affected). But this rule is applied to all loads in ReadsFromSet from the paper (last two lines of code), not just SC load. // This is implemented correctly in tsan11 // (https://github.com/ChrisLidbury/tsan11/blob/ecbd6b81e9b9454e01cba78eb9d88684168132c7/lib/tsan/rtl/tsan_relaxed.cc#L295) @@ -88,9 +88,11 @@ use std::collections::VecDeque; use rustc_data_structures::fx::FxHashMap; +use super::AllocDataRaceHandler; use super::data_race::{GlobalState as DataRaceState, ThreadClockSet}; use super::range_object_map::{AccessType, RangeObjectMap}; use super::vector_clock::{VClock, VTimestamp, VectorIdx}; +use crate::concurrency::GlobalDataRaceHandler; use crate::*; pub type AllocState = StoreBufferAlloc; @@ -459,8 +461,13 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let this = self.eval_context_mut(); let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr(), 0)?; if let ( - crate::AllocExtra { weak_memory: Some(alloc_buffers), .. }, - crate::MiriMachine { data_race: Some(global), threads, .. }, + crate::AllocExtra { + data_race: AllocDataRaceHandler::Vclocks(_, Some(alloc_buffers)), + .. + }, + crate::MiriMachine { + data_race: GlobalDataRaceHandler::Vclocks(global), threads, .. + }, ) = this.get_alloc_extra_mut(alloc_id)? { if atomic == AtomicRwOrd::SeqCst { @@ -484,9 +491,11 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { ) -> InterpResult<'tcx, Option> { let this = self.eval_context_ref(); 'fallback: { - if let Some(global) = &this.machine.data_race { + if let Some(global) = this.machine.data_race.as_vclocks_ref() { let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr(), 0)?; - if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() { + if let Some(alloc_buffers) = + this.get_alloc_extra(alloc_id)?.data_race.as_weak_memory_ref() + { if atomic == AtomicReadOrd::SeqCst { global.sc_read(&this.machine.threads); } @@ -534,8 +543,13 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let this = self.eval_context_mut(); let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr(), 0)?; if let ( - crate::AllocExtra { weak_memory: Some(alloc_buffers), .. }, - crate::MiriMachine { data_race: Some(global), threads, .. }, + crate::AllocExtra { + data_race: AllocDataRaceHandler::Vclocks(_, Some(alloc_buffers)), + .. + }, + crate::MiriMachine { + data_race: GlobalDataRaceHandler::Vclocks(global), threads, .. + }, ) = this.get_alloc_extra_mut(alloc_id)? { if atomic == AtomicWriteOrd::SeqCst { @@ -561,13 +575,15 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { ) -> InterpResult<'tcx> { let this = self.eval_context_ref(); - if let Some(global) = &this.machine.data_race { + if let Some(global) = this.machine.data_race.as_vclocks_ref() { if atomic == AtomicReadOrd::SeqCst { global.sc_read(&this.machine.threads); } let size = place.layout.size; let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr(), 0)?; - if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() { + if let Some(alloc_buffers) = + this.get_alloc_extra(alloc_id)?.data_race.as_weak_memory_ref() + { let Some(buffer) = alloc_buffers.get_store_buffer(alloc_range(base_offset, size))? else { diff --git a/src/tools/miri/src/diagnostics.rs b/src/tools/miri/src/diagnostics.rs index 014b1299f2dd0..89768077d8780 100644 --- a/src/tools/miri/src/diagnostics.rs +++ b/src/tools/miri/src/diagnostics.rs @@ -31,6 +31,8 @@ pub enum TerminationInfo { }, Int2PtrWithStrictProvenance, Deadlock, + /// In GenMC mode, an execution can get stuck in certain cases. This is not an error. + GenmcStuckExecution, MultipleSymbolDefinitions { link_name: Symbol, first: SpanData, @@ -75,6 +77,7 @@ impl fmt::Display for TerminationInfo { StackedBorrowsUb { msg, .. } => write!(f, "{msg}"), TreeBorrowsUb { title, .. } => write!(f, "{title}"), Deadlock => write!(f, "the evaluated program deadlocked"), + GenmcStuckExecution => write!(f, "GenMC determined that the execution got stuck"), MultipleSymbolDefinitions { link_name, .. } => write!(f, "multiple definitions of symbol `{link_name}`"), SymbolShimClashing { link_name, .. } => @@ -235,6 +238,12 @@ pub fn report_error<'tcx>( StackedBorrowsUb { .. } | TreeBorrowsUb { .. } | DataRace { .. } => Some("Undefined Behavior"), Deadlock => Some("deadlock"), + GenmcStuckExecution => { + // This case should only happen in GenMC mode. We treat it like a normal program exit. + assert!(ecx.machine.data_race.as_genmc_ref().is_some()); + tracing::info!("GenMC: found stuck execution"); + return Some((0, true)); + } MultipleSymbolDefinitions { .. } | SymbolShimClashing { .. } => None, }; #[rustfmt::skip] diff --git a/src/tools/miri/src/eval.rs b/src/tools/miri/src/eval.rs index 7586b5efd4b2b..bb5e5d7ee8146 100644 --- a/src/tools/miri/src/eval.rs +++ b/src/tools/miri/src/eval.rs @@ -3,6 +3,7 @@ use std::ffi::{OsStr, OsString}; use std::panic::{self, AssertUnwindSafe}; use std::path::PathBuf; +use std::rc::Rc; use std::task::Poll; use std::{iter, thread}; @@ -14,6 +15,7 @@ use rustc_middle::ty::layout::{LayoutCx, LayoutOf}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_session::config::EntryFnType; +use crate::concurrency::GenmcCtx; use crate::concurrency::thread::TlsAllocAction; use crate::diagnostics::report_leaks; use crate::shims::tls; @@ -99,10 +101,6 @@ pub struct MiriConfig { pub validation: ValidationMode, /// Determines if Stacked Borrows or Tree Borrows is enabled. pub borrow_tracker: Option, - /// Whether `core::ptr::Unique` receives special treatment. - /// If `true` then `Unique` is reborrowed with its own new tag and permission, - /// otherwise `Unique` is just another raw pointer. - pub unique_is_unique: bool, /// Controls alignment checking. pub check_alignment: AlignmentCheck, /// Action for an op requiring communication with the host. @@ -117,16 +115,18 @@ pub struct MiriConfig { pub args: Vec, /// The seed to use when non-determinism or randomness are required (e.g. ptr-to-int cast, `getrandom()`). pub seed: Option, - /// The stacked borrows pointer ids to report about + /// The stacked borrows pointer ids to report about. pub tracked_pointer_tags: FxHashSet, /// The allocation ids to report about. pub tracked_alloc_ids: FxHashSet, /// For the tracked alloc ids, also report read/write accesses. pub track_alloc_accesses: bool, - /// Determine if data race detection should be enabled + /// Determine if data race detection should be enabled. pub data_race_detector: bool, - /// Determine if weak memory emulation should be enabled. Requires data race detection to be enabled + /// Determine if weak memory emulation should be enabled. Requires data race detection to be enabled. pub weak_memory_emulation: bool, + /// Determine if we are running in GenMC mode. In this mode, Miri will explore multiple concurrent executions of the given program. + pub genmc_mode: bool, /// Track when an outdated (weak memory) load happens. pub track_outdated_loads: bool, /// Rate of spurious failures for compare_exchange_weak atomic operations, @@ -137,7 +137,7 @@ pub struct MiriConfig { pub measureme_out: Option, /// Which style to use for printing backtraces. pub backtrace_style: BacktraceStyle, - /// Which provenance to use for int2ptr casts + /// Which provenance to use for int2ptr casts. pub provenance_mode: ProvenanceMode, /// Whether to ignore any output by the program. This is helpful when debugging miri /// as its messages don't get intermingled with the program messages. @@ -155,7 +155,7 @@ pub struct MiriConfig { pub gc_interval: u32, /// The number of CPUs to be reported by miri. pub num_cpus: u32, - /// Requires Miri to emulate pages of a certain size + /// Requires Miri to emulate pages of a certain size. pub page_size: Option, /// Whether to collect a backtrace when each allocation is created, just in case it leaks. pub collect_leak_backtraces: bool, @@ -163,6 +163,8 @@ pub struct MiriConfig { pub address_reuse_rate: f64, /// Probability for address reuse across threads. pub address_reuse_cross_thread_rate: f64, + /// Round Robin scheduling with no preemption. + pub fixed_scheduling: bool, } impl Default for MiriConfig { @@ -171,7 +173,6 @@ impl Default for MiriConfig { env: vec![], validation: ValidationMode::Shallow, borrow_tracker: Some(BorrowTrackerMethod::StackedBorrows), - unique_is_unique: false, check_alignment: AlignmentCheck::Int, isolated_op: IsolatedOp::Reject(RejectOpWith::Abort), ignore_leaks: false, @@ -184,6 +185,7 @@ impl Default for MiriConfig { track_alloc_accesses: false, data_race_detector: true, weak_memory_emulation: true, + genmc_mode: false, track_outdated_loads: false, cmpxchg_weak_failure_rate: 0.8, // 80% measureme_out: None, @@ -200,6 +202,7 @@ impl Default for MiriConfig { collect_leak_backtraces: true, address_reuse_rate: 0.5, address_reuse_cross_thread_rate: 0.1, + fixed_scheduling: false, } } } @@ -230,16 +233,22 @@ impl<'tcx> MainThreadState<'tcx> { match state.on_stack_empty(this)? { Poll::Pending => {} // just keep going Poll::Ready(()) => { - // Give background threads a chance to finish by yielding the main thread a - // couple of times -- but only if we would also preempt threads randomly. - if this.machine.preemption_rate > 0.0 { - // There is a non-zero chance they will yield back to us often enough to - // make Miri terminate eventually. - *self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN }; - } else { - // The other threads did not get preempted, so no need to yield back to - // them. + if this.machine.data_race.as_genmc_ref().is_some() { + // In GenMC mode, we don't yield at the end of the main thread. + // Instead, the `GenmcCtx` will ensure that unfinished threads get a chance to run at this point. *self = Done; + } else { + // Give background threads a chance to finish by yielding the main thread a + // couple of times -- but only if we would also preempt threads randomly. + if this.machine.preemption_rate > 0.0 { + // There is a non-zero chance they will yield back to us often enough to + // make Miri terminate eventually. + *self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN }; + } else { + // The other threads did not get preempted, so no need to yield back to + // them. + *self = Done; + } } } }, @@ -265,6 +274,17 @@ impl<'tcx> MainThreadState<'tcx> { // Deal with our thread-local memory. We do *not* want to actually free it, instead we consider TLS // to be like a global `static`, so that all memory reached by it is considered to "not leak". this.terminate_active_thread(TlsAllocAction::Leak)?; + + // Machine cleanup. Only do this if all threads have terminated; threads that are still running + // might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396). + if this.have_all_terminated() { + // Even if all threads have terminated, we have to beware of data races since some threads + // might not have joined the main thread (https://github.com/rust-lang/miri/issues/2020, + // https://github.com/rust-lang/miri/issues/2508). + this.allow_data_races_all_threads_done(); + EnvVars::cleanup(this).expect("error during env var cleanup"); + } + // Stop interpreter loop. throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true }); } @@ -280,11 +300,16 @@ pub fn create_ecx<'tcx>( entry_id: DefId, entry_type: MiriEntryFnType, config: &MiriConfig, + genmc_ctx: Option>, ) -> InterpResult<'tcx, InterpCx<'tcx, MiriMachine<'tcx>>> { let typing_env = ty::TypingEnv::fully_monomorphized(); let layout_cx = LayoutCx::new(tcx, typing_env); - let mut ecx = - InterpCx::new(tcx, rustc_span::DUMMY_SP, typing_env, MiriMachine::new(config, layout_cx)); + let mut ecx = InterpCx::new( + tcx, + rustc_span::DUMMY_SP, + typing_env, + MiriMachine::new(config, layout_cx, genmc_ctx), + ); // Some parts of initialization require a full `InterpCx`. MiriMachine::late_init(&mut ecx, config, { @@ -438,12 +463,17 @@ pub fn eval_entry<'tcx>( tcx: TyCtxt<'tcx>, entry_id: DefId, entry_type: MiriEntryFnType, - config: MiriConfig, + config: &MiriConfig, + genmc_ctx: Option>, ) -> Option { // Copy setting before we move `config`. let ignore_leaks = config.ignore_leaks; - let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config).report_err() { + if let Some(genmc_ctx) = &genmc_ctx { + genmc_ctx.handle_execution_start(); + } + + let mut ecx = match create_ecx(tcx, entry_id, entry_type, config, genmc_ctx).report_err() { Ok(v) => v, Err(err) => { let (kind, backtrace) = err.into_parts(); @@ -462,21 +492,21 @@ pub fn eval_entry<'tcx>( // `Ok` can never happen; the interpreter loop always exits with an "error" // (but that "error" might be just "regular program termination"). let Err(err) = res.report_err(); + // Show diagnostic, if any. let (return_code, leak_check) = report_error(&ecx, err)?; - // If we get here there was no fatal error. - - // Machine cleanup. Only do this if all threads have terminated; threads that are still running - // might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396). - if ecx.have_all_terminated() { - // Even if all threads have terminated, we have to beware of data races since some threads - // might not have joined the main thread (https://github.com/rust-lang/miri/issues/2020, - // https://github.com/rust-lang/miri/issues/2508). - ecx.allow_data_races_all_threads_done(); - EnvVars::cleanup(&mut ecx).expect("error during env var cleanup"); + // We inform GenMC that the execution is complete. + if let Some(genmc_ctx) = ecx.machine.data_race.as_genmc_ref() + && let Err(error) = genmc_ctx.handle_execution_end(&ecx) + { + // FIXME(GenMC): Improve error reporting. + tcx.dcx().err(format!("GenMC returned an error: \"{error}\"")); + return None; } + // If we get here there was no fatal error. + // Possibly check for memory leaks. if leak_check && !ignore_leaks { // Check for thread leaks. diff --git a/src/tools/miri/src/intrinsics/atomic.rs b/src/tools/miri/src/intrinsics/atomic.rs index dcafa7b6cabe5..2eb8086f578f2 100644 --- a/src/tools/miri/src/intrinsics/atomic.rs +++ b/src/tools/miri/src/intrinsics/atomic.rs @@ -149,7 +149,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { // Perform regular load. let val = this.read_scalar(val)?; - // Perform atomic store + // Perform atomic store. this.write_scalar_atomic(val, &place, atomic)?; interp_ok(()) } @@ -161,7 +161,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { ) -> InterpResult<'tcx> { let [] = check_intrinsic_arg_count(args)?; let _ = atomic; - //FIXME: compiler fences are currently ignored + // FIXME, FIXME(GenMC): compiler fences are currently ignored (also ignored in GenMC mode) interp_ok(()) } @@ -199,23 +199,16 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch"); } - match atomic_op { - AtomicOp::Min => { - let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?; - this.write_immediate(*old, dest)?; // old value is returned - interp_ok(()) - } - AtomicOp::Max => { - let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?; - this.write_immediate(*old, dest)?; // old value is returned - interp_ok(()) - } - AtomicOp::MirOp(op, not) => { - let old = this.atomic_rmw_op_immediate(&place, &rhs, op, not, atomic)?; - this.write_immediate(*old, dest)?; // old value is returned - interp_ok(()) - } - } + let old = match atomic_op { + AtomicOp::Min => + this.atomic_min_max_scalar(&place, rhs, /* min */ true, atomic)?, + AtomicOp::Max => + this.atomic_min_max_scalar(&place, rhs, /* min */ false, atomic)?, + AtomicOp::MirOp(op, not) => + this.atomic_rmw_op_immediate(&place, &rhs, op, not, atomic)?, + }; + this.write_immediate(*old, dest)?; // old value is returned + interp_ok(()) } fn atomic_exchange( diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs index e03611e9b509e..58b93ae82a1e4 100644 --- a/src/tools/miri/src/lib.rs +++ b/src/tools/miri/src/lib.rs @@ -133,6 +133,7 @@ pub use crate::concurrency::thread::{ BlockReason, DynUnblockCallback, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, TimeoutAnchor, TimeoutClock, UnblockKind, }; +pub use crate::concurrency::{GenmcConfig, GenmcCtx}; pub use crate::diagnostics::{ EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo, report_error, }; diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs index 55aa3d6fa6873..6060d41dac594 100644 --- a/src/tools/miri/src/machine.rs +++ b/src/tools/miri/src/machine.rs @@ -6,6 +6,7 @@ use std::borrow::Cow; use std::cell::{Cell, RefCell}; use std::collections::hash_map::Entry; use std::path::Path; +use std::rc::Rc; use std::{fmt, process}; use rand::rngs::StdRng; @@ -27,9 +28,10 @@ use rustc_span::def_id::{CrateNum, DefId}; use rustc_span::{Span, SpanData, Symbol}; use rustc_target::callconv::FnAbi; +use crate::alloc_addresses::EvalContextExt; use crate::concurrency::cpu_affinity::{self, CpuAffinityMask}; use crate::concurrency::data_race::{self, NaReadType, NaWriteType}; -use crate::concurrency::weak_memory; +use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory}; use crate::*; /// First real-time signal. @@ -332,12 +334,10 @@ impl ProvenanceExtra { pub struct AllocExtra<'tcx> { /// Global state of the borrow tracker, if enabled. pub borrow_tracker: Option, - /// Data race detection via the use of a vector-clock. - /// This is only added if it is enabled. - pub data_race: Option, - /// Weak memory emulation via the use of store buffers. - /// This is only added if it is enabled. - pub weak_memory: Option, + /// Extra state for data race detection. + /// + /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine` + pub data_race: AllocDataRaceHandler, /// A backtrace to where this allocation was allocated. /// As this is recorded for leak reports, it only exists /// if this allocation is leakable. The backtrace is not @@ -360,11 +360,10 @@ impl<'tcx> Clone for AllocExtra<'tcx> { impl VisitProvenance for AllocExtra<'_> { fn visit_provenance(&self, visit: &mut VisitWith<'_>) { - let AllocExtra { borrow_tracker, data_race, weak_memory, backtrace: _, sync: _ } = self; + let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self; borrow_tracker.visit_provenance(visit); data_race.visit_provenance(visit); - weak_memory.visit_provenance(visit); } } @@ -447,8 +446,12 @@ pub struct MiriMachine<'tcx> { /// Global data for borrow tracking. pub borrow_tracker: Option, - /// Data race detector global data. - pub data_race: Option, + /// Depending on settings, this will be `None`, + /// global data for a data race detector, + /// or the context required for running in GenMC mode. + /// + /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`. + pub data_race: GlobalDataRaceHandler, /// Ptr-int-cast module global data. pub alloc_addresses: alloc_addresses::GlobalState, @@ -544,9 +547,6 @@ pub struct MiriMachine<'tcx> { /// Corresponds to -Zmiri-mute-stdout-stderr and doesn't write the output but acts as if it succeeded. pub(crate) mute_stdout_stderr: bool, - /// Whether weak memory emulation is enabled - pub(crate) weak_memory: bool, - /// The probability of the active thread being preempted at the end of each basic block. pub(crate) preemption_rate: f64, @@ -617,7 +617,11 @@ pub struct MiriMachine<'tcx> { } impl<'tcx> MiriMachine<'tcx> { - pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx>) -> Self { + pub(crate) fn new( + config: &MiriConfig, + layout_cx: LayoutCx<'tcx>, + genmc_ctx: Option>, + ) -> Self { let tcx = layout_cx.tcx(); let local_crates = helpers::get_local_crates(tcx); let layouts = @@ -636,7 +640,14 @@ impl<'tcx> MiriMachine<'tcx> { }); let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0)); let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config)); - let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config)); + let data_race = if config.genmc_mode { + // `genmc_ctx` persists across executions, so we don't create a new one here. + GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap()) + } else if config.data_race_detector { + GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config))) + } else { + GlobalDataRaceHandler::None + }; // Determine page size, stack address, and stack size. // These values are mostly meaningless, but the stack address is also where we start // allocating physical integer addresses for all allocations. @@ -669,7 +680,7 @@ impl<'tcx> MiriMachine<'tcx> { cpu_affinity::MAX_CPUS, config.num_cpus ); - let threads = ThreadManager::default(); + let threads = ThreadManager::new(config); let mut thread_cpu_affinity = FxHashMap::default(); if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") { thread_cpu_affinity @@ -709,7 +720,6 @@ impl<'tcx> MiriMachine<'tcx> { check_alignment: config.check_alignment, cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate, mute_stdout_stderr: config.mute_stdout_stderr, - weak_memory: config.weak_memory_emulation, preemption_rate: config.preemption_rate, report_progress: config.report_progress, basic_block_count: 0, @@ -835,16 +845,25 @@ impl<'tcx> MiriMachine<'tcx> { .as_ref() .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine)); - let data_race = ecx.machine.data_race.as_ref().map(|data_race| { - data_race::AllocState::new_allocation( - data_race, - &ecx.machine.threads, - size, - kind, - ecx.machine.current_span(), - ) - }); - let weak_memory = ecx.machine.weak_memory.then(weak_memory::AllocState::new_allocation); + let data_race = match &ecx.machine.data_race { + GlobalDataRaceHandler::None => AllocDataRaceHandler::None, + GlobalDataRaceHandler::Vclocks(data_race) => + AllocDataRaceHandler::Vclocks( + data_race::AllocState::new_allocation( + data_race, + &ecx.machine.threads, + size, + kind, + ecx.machine.current_span(), + ), + data_race.weak_memory.then(weak_memory::AllocState::new_allocation), + ), + GlobalDataRaceHandler::Genmc(_genmc_ctx) => { + // GenMC learns about new allocations directly from the alloc_addresses module, + // since it has to be able to control the address at which they are placed. + AllocDataRaceHandler::Genmc + } + }; // If an allocation is leaked, we want to report a backtrace to indicate where it was // allocated. We don't need to record a backtrace for allocations which are allowed to @@ -862,13 +881,7 @@ impl<'tcx> MiriMachine<'tcx> { .insert(id, (ecx.machine.current_span(), None)); } - interp_ok(AllocExtra { - borrow_tracker, - data_race, - weak_memory, - backtrace, - sync: FxHashMap::default(), - }) + interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() }) } } @@ -909,7 +922,6 @@ impl VisitProvenance for MiriMachine<'_> { check_alignment: _, cmpxchg_weak_failure_rate: _, mute_stdout_stderr: _, - weak_memory: _, preemption_rate: _, report_progress: _, basic_block_count: _, @@ -1203,9 +1215,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { ecx: &mut InterpCx<'tcx, Self>, val: ImmTy<'tcx>, ) -> InterpResult<'tcx, ImmTy<'tcx>> { - crate::math::apply_random_float_error_to_imm( - ecx, val, 2 /* log2(4) */ - ) + crate::math::apply_random_float_error_to_imm(ecx, val, 2 /* log2(4) */) } #[inline(always)] @@ -1380,7 +1390,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { _tcx: TyCtxtAt<'tcx>, machine: &Self, alloc_extra: &AllocExtra<'tcx>, - _ptr: Pointer, + ptr: Pointer, (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra), range: AllocRange, ) -> InterpResult<'tcx> { @@ -1388,15 +1398,25 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { machine .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read)); } - if let Some(data_race) = &alloc_extra.data_race { - data_race.read(alloc_id, range, NaReadType::Read, None, machine)?; + // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error. + match &machine.data_race { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Genmc(genmc_ctx) => + genmc_ctx.memory_load(machine, ptr.addr(), range.size)?, + GlobalDataRaceHandler::Vclocks(_data_race) => { + let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race + else { + unreachable!(); + }; + data_race.read(alloc_id, range, NaReadType::Read, None, machine)?; + if let Some(weak_memory) = weak_memory { + weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap()); + } + } } if let Some(borrow_tracker) = &alloc_extra.borrow_tracker { borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?; } - if let Some(weak_memory) = &alloc_extra.weak_memory { - weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap()); - } interp_ok(()) } @@ -1405,7 +1425,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { _tcx: TyCtxtAt<'tcx>, machine: &mut Self, alloc_extra: &mut AllocExtra<'tcx>, - _ptr: Pointer, + ptr: Pointer, (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra), range: AllocRange, ) -> InterpResult<'tcx> { @@ -1413,15 +1433,26 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { machine .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write)); } - if let Some(data_race) = &mut alloc_extra.data_race { - data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?; + match &machine.data_race { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Genmc(genmc_ctx) => { + genmc_ctx.memory_store(machine, ptr.addr(), range.size)?; + } + GlobalDataRaceHandler::Vclocks(_global_state) => { + let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = + &mut alloc_extra.data_race + else { + unreachable!() + }; + data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?; + if let Some(weak_memory) = weak_memory { + weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap()); + } + } } if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker { borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?; } - if let Some(weak_memory) = &alloc_extra.weak_memory { - weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap()); - } interp_ok(()) } @@ -1430,7 +1461,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { _tcx: TyCtxtAt<'tcx>, machine: &mut Self, alloc_extra: &mut AllocExtra<'tcx>, - _ptr: Pointer, + ptr: Pointer, (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra), size: Size, align: Align, @@ -1439,14 +1470,20 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { if machine.tracked_alloc_ids.contains(&alloc_id) { machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id)); } - if let Some(data_race) = &mut alloc_extra.data_race { - data_race.write( - alloc_id, - alloc_range(Size::ZERO, size), - NaWriteType::Deallocate, - None, - machine, - )?; + match &machine.data_race { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Genmc(genmc_ctx) => + genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?, + GlobalDataRaceHandler::Vclocks(_global_state) => { + let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap(); + data_race.write( + alloc_id, + alloc_range(Size::ZERO, size), + NaWriteType::Deallocate, + None, + machine, + )?; + } } if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker { borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?; @@ -1533,7 +1570,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { timing, is_user_relevant: ecx.machine.is_user_relevant(&frame), salt: ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL), - data_race: ecx.machine.data_race.as_ref().map(|_| data_race::FrameState::default()), + data_race: ecx + .machine + .data_race + .as_vclocks_ref() + .map(|_| data_race::FrameState::default()), }; interp_ok(frame.with_extra(extra)) @@ -1679,7 +1720,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { if let Some(data_race) = &machine.threads.active_thread_stack().last().unwrap().extra.data_race { - data_race.local_moved_to_memory(local, alloc_info.data_race.as_mut().unwrap(), machine); + data_race.local_moved_to_memory( + local, + alloc_info.data_race.as_vclocks_mut().unwrap(), + machine, + ); } interp_ok(()) } diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_join_self.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_join_self.rs index 53760b05a3127..f75f306e5a631 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_join_self.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_join_self.rs @@ -1,6 +1,6 @@ //@ignore-target: windows # No pthreads on Windows // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency // Joining itself is undefined behavior. diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.rs index e3d5da26aeae4..68dce8f865423 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.rs @@ -1,5 +1,7 @@ //@ignore-target: windows # No pthreads on Windows //@error-in-other-file: deadlock +// We are making scheduler assumptions here. +//@compile-flags: -Zmiri-deterministic-concurrency use std::cell::UnsafeCell; use std::sync::Arc; diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.rs index 3a985122e22e0..3a932404238db 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.rs @@ -1,5 +1,7 @@ //@ignore-target: windows # No pthreads on Windows //@error-in-other-file: deadlock +// We are making scheduler assumptions here. +//@compile-flags: -Zmiri-deterministic-concurrency use std::cell::UnsafeCell; use std::sync::Arc; diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.rs index 6d7bb80d8e6c6..3b21738846357 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.rs @@ -1,5 +1,7 @@ //@ignore-target: windows # No pthreads on Windows //@error-in-other-file: deadlock +// We are making scheduler assumptions here. +//@compile-flags: -Zmiri-deterministic-concurrency use std::cell::UnsafeCell; use std::sync::Arc; diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs index 3ee2bf14f9fe4..2980d257a2925 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs @@ -1,6 +1,6 @@ //@only-target: windows # Uses win32 api functions // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock // On windows, joining main is not UB, but it will block a thread forever. diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs index eee2979f3cff3..85672ec860f56 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs +++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs @@ -1,6 +1,6 @@ //@only-target: windows # Uses win32 api functions // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock // On windows, a thread joining itself is not UB, but it will deadlock. diff --git a/src/tools/miri/tests/fail-dep/libc/env-set_var-data-race.rs b/src/tools/miri/tests/fail-dep/libc/env-set_var-data-race.rs index 3a832bb0ce0c1..5c80f6425eaae 100644 --- a/src/tools/miri/tests/fail-dep/libc/env-set_var-data-race.rs +++ b/src/tools/miri/tests/fail-dep/libc/env-set_var-data-race.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-disable-isolation -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-disable-isolation -Zmiri-deterministic-concurrency //@ignore-target: windows # No libc env support on Windows use std::{env, thread}; diff --git a/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.rs b/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.rs index 0d893663fd6d3..9dc554030c0e1 100644 --- a/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.rs +++ b/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.rs @@ -1,7 +1,7 @@ //@only-target: linux android illumos //~^ERROR: deadlocked //~^^ERROR: deadlocked -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock use std::thread; diff --git a/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.rs b/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.rs index 9fed47c17d405..5297a3297750a 100644 --- a/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.rs +++ b/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.rs @@ -1,7 +1,7 @@ //@only-target: linux android illumos //~^ERROR: deadlocked //~^^ERROR: deadlocked -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock use std::thread; diff --git a/src/tools/miri/tests/fail-dep/libc/libc-epoll-data-race.rs b/src/tools/miri/tests/fail-dep/libc/libc-epoll-data-race.rs index 45f6bf6da0968..314ce90cfb5d4 100644 --- a/src/tools/miri/tests/fail-dep/libc/libc-epoll-data-race.rs +++ b/src/tools/miri/tests/fail-dep/libc/libc-epoll-data-race.rs @@ -4,7 +4,7 @@ //! to be considered synchronized. //@only-target: linux android illumos // ensure deterministic schedule -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::convert::TryInto; use std::thread; diff --git a/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.rs b/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.rs index 059b24cb8c0be..f6f2e2b93121b 100644 --- a/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.rs +++ b/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //~^ERROR: deadlocked //~^^ERROR: deadlocked //@only-target: linux android illumos diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.rs b/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.rs index 8413e118819cd..0699dec6556d4 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.rs +++ b/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.rs @@ -2,7 +2,7 @@ //! faulty logic around `release_clock` that led to this code not reporting a data race. //~^^ERROR: deadlock //@ignore-target: windows # no libc socketpair on Windows -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock use std::thread; diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair-data-race.rs b/src/tools/miri/tests/fail-dep/libc/socketpair-data-race.rs index 55491da9f60d7..37fac436ff3e1 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair-data-race.rs +++ b/src/tools/miri/tests/fail-dep/libc/socketpair-data-race.rs @@ -1,7 +1,7 @@ //! This is a regression test for : we had some //! faulty logic around `release_clock` that led to this code not reporting a data race. //@ignore-target: windows # no libc socketpair on Windows -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::thread; fn main() { diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.rs b/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.rs index d3e4c43f2b75c..b383985950029 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.rs +++ b/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.rs @@ -2,7 +2,7 @@ //~^ERROR: deadlocked //~^^ERROR: deadlocked // test_race depends on a deterministic schedule. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock use std::thread; diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.rs b/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.rs index 4f951acb2c31a..7d84d87ebbb14 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.rs +++ b/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.rs @@ -2,7 +2,7 @@ //~^ERROR: deadlocked //~^^ERROR: deadlocked // test_race depends on a deterministic schedule. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@error-in-other-file: deadlock use std::thread; diff --git a/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs b/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs index 0061679eaa4e7..31f1a22f9f6d5 100644 --- a/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs +++ b/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs @@ -1,8 +1,6 @@ //! Make sure that a retag acts like a write for the data race model. //@revisions: stack tree -//@compile-flags: -Zmiri-preemption-rate=0 -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@[tree]compile-flags: -Zmiri-tree-borrows #[derive(Copy, Clone)] struct SendPtr(*mut u8); diff --git a/src/tools/miri/tests/fail/data_race/alloc_read_race.rs b/src/tools/miri/tests/fail/data_race/alloc_read_race.rs index 312b7ba05d31c..7c5116989943e 100644 --- a/src/tools/miri/tests/fail/data_race/alloc_read_race.rs +++ b/src/tools/miri/tests/fail/data_race/alloc_read_race.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::mem::MaybeUninit; use std::ptr::null_mut; diff --git a/src/tools/miri/tests/fail/data_race/alloc_write_race.rs b/src/tools/miri/tests/fail/data_race/alloc_write_race.rs index f1f308b37e7bf..ba8a888de9ea0 100644 --- a/src/tools/miri/tests/fail/data_race/alloc_write_race.rs +++ b/src/tools/miri/tests/fail/data_race/alloc_write_race.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs index 4003892f0a609..8cce54603ce57 100644 --- a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs +++ b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs index c67ce65eb3404..b6c0ef37cb920 100644 --- a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs +++ b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs index 5e328740e8517..03ae6895c5745 100644 --- a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs +++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs index e0876a93fdd8c..4a5edf5cc14dd 100644 --- a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs +++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs index 1010216a4976b..e8d930a51dee3 100644 --- a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs +++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs index fdc0f9e20f079..4c67d2d765415 100644 --- a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs +++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs b/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs index dffafe3cfaa97..fbb2c01e5a9aa 100644 --- a/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs +++ b/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::mem; use std::thread::{sleep, spawn}; diff --git a/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs b/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs index 8dc35c7ea720e..7431bc589ff95 100644 --- a/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs +++ b/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::mem; use std::thread::{sleep, spawn}; diff --git a/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs b/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs index d0a2848205410..999cc2392f5af 100644 --- a/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs +++ b/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows #![feature(rustc_attrs)] diff --git a/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs b/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs index f56c44cabc234..bd3b037e58381 100644 --- a/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs +++ b/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows #![feature(rustc_attrs)] diff --git a/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs b/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs index c67e03d362b05..e3d06660aab34 100644 --- a/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs +++ b/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs b/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs index a16ea25e11ce8..90e87f8c49564 100644 --- a/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs +++ b/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows #![feature(rustc_attrs)] diff --git a/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs b/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs index f3855e33c98d9..d9b1af80af493 100644 --- a/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs +++ b/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows #![feature(rustc_attrs)] diff --git a/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs b/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs index 8e63bc1dc7b41..c1ab1942c6884 100644 --- a/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs +++ b/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs b/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs index 53050608d2715..67af6862737dc 100644 --- a/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs +++ b/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/fence_after_load.rs b/src/tools/miri/tests/fail/data_race/fence_after_load.rs index 5dfb260c20bdc..b1eb1dda6d886 100644 --- a/src/tools/miri/tests/fail/data_race/fence_after_load.rs +++ b/src/tools/miri/tests/fail/data_race/fence_after_load.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering, fence}; diff --git a/src/tools/miri/tests/fail/data_race/local_variable_alloc_race.rs b/src/tools/miri/tests/fail/data_race/local_variable_alloc_race.rs index 751a308a39998..647f209e8bf88 100644 --- a/src/tools/miri/tests/fail/data_race/local_variable_alloc_race.rs +++ b/src/tools/miri/tests/fail/data_race/local_variable_alloc_race.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +//@compile-flags:-Zmiri-deterministic-concurrency #![feature(core_intrinsics)] #![feature(custom_mir)] diff --git a/src/tools/miri/tests/fail/data_race/local_variable_read_race.rs b/src/tools/miri/tests/fail/data_race/local_variable_read_race.rs index 16a23f595ee47..f83d6c89fe5f9 100644 --- a/src/tools/miri/tests/fail/data_race/local_variable_read_race.rs +++ b/src/tools/miri/tests/fail/data_race/local_variable_read_race.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +//@compile-flags:-Zmiri-deterministic-concurrency use std::sync::atomic::Ordering::*; use std::sync::atomic::*; diff --git a/src/tools/miri/tests/fail/data_race/local_variable_write_race.rs b/src/tools/miri/tests/fail/data_race/local_variable_write_race.rs index 7e00573146c25..ee1bef7ba5c08 100644 --- a/src/tools/miri/tests/fail/data_race/local_variable_write_race.rs +++ b/src/tools/miri/tests/fail/data_race/local_variable_write_race.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +//@compile-flags:-Zmiri-deterministic-concurrency use std::sync::atomic::Ordering::*; use std::sync::atomic::*; diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs index e76654806bb1e..720602d011e36 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_read_write.rs @@ -1,6 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags:-Zmiri-deterministic-concurrency // Two variants: the atomic store matches the size of the first or second atomic load. //@revisions: match_first_load match_second_load diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs index 53016bab78045..78bba173ed7d0 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs +++ b/src/tools/miri/tests/fail/data_race/mixed_size_read_write.rs @@ -1,6 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags:-Zmiri-deterministic-concurrency // Two revisions, depending on which access goes first. //@revisions: read_write write_read diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs index 545e354a0372c..808280a4b3113 100644 --- a/src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs +++ b/src/tools/miri/tests/fail/data_race/mixed_size_write_write.rs @@ -1,6 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags:-Zmiri-deterministic-concurrency //@revisions: fst snd use std::sync::atomic::{AtomicU8, AtomicU16, Ordering}; diff --git a/src/tools/miri/tests/fail/data_race/read_write_race.rs b/src/tools/miri/tests/fail/data_race/read_write_race.rs index adf19dda9d3db..2aadef36c5b95 100644 --- a/src/tools/miri/tests/fail/data_race/read_write_race.rs +++ b/src/tools/miri/tests/fail/data_race/read_write_race.rs @@ -1,7 +1,5 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs b/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs index f411767f7b57e..cca39bb002c17 100644 --- a/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs +++ b/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs b/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs index c4f943808229c..262c039e4ae17 100644 --- a/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs +++ b/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/release_seq_race.rs b/src/tools/miri/tests/fail/data_race/release_seq_race.rs index f03ab3efa0624..8aeb6ee6ef1d1 100644 --- a/src/tools/miri/tests/fail/data_race/release_seq_race.rs +++ b/src/tools/miri/tests/fail/data_race/release_seq_race.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::{sleep, spawn}; diff --git a/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs b/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs index 88ae01b3ca1c7..f465160718f45 100644 --- a/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs +++ b/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/rmw_race.rs b/src/tools/miri/tests/fail/data_race/rmw_race.rs index d738caa105879..39588c15ec7ea 100644 --- a/src/tools/miri/tests/fail/data_race/rmw_race.rs +++ b/src/tools/miri/tests/fail/data_race/rmw_race.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/stack_pop_race.rs b/src/tools/miri/tests/fail/data_race/stack_pop_race.rs index 762a8e51f692a..5138bcbf8f74d 100644 --- a/src/tools/miri/tests/fail/data_race/stack_pop_race.rs +++ b/src/tools/miri/tests/fail/data_race/stack_pop_race.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::thread; diff --git a/src/tools/miri/tests/fail/data_race/write_write_race.rs b/src/tools/miri/tests/fail/data_race/write_write_race.rs index 993d8d25b4c17..b1a6b08b4c886 100644 --- a/src/tools/miri/tests/fail/data_race/write_write_race.rs +++ b/src/tools/miri/tests/fail/data_race/write_write_race.rs @@ -1,7 +1,5 @@ -// We want to control preemption here. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::thread::spawn; diff --git a/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs b/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs index 8070a7f4fc2a0..cd21b0a8fa6c1 100644 --- a/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs +++ b/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs @@ -1,6 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows -// Avoid accidental synchronization via address reuse inside `thread::spawn`. -//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 +// We want to control preemption here. Stacked borrows interferes by having its own accesses. +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-stacked-borrows use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs index a6ee7b40c340b..6ff69554387e7 100644 --- a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs +++ b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs @@ -1,5 +1,4 @@ -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags:-Zmiri-deterministic-concurrency use std::thread; #[derive(Copy, Clone)] diff --git a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs index 949f659e7e8e5..f46f13a39e7ad 100644 --- a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs +++ b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs @@ -1,6 +1,5 @@ //! Make sure that a retag acts like a read for the data race model. -// Avoid accidental synchronization via address reuse. -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags:-Zmiri-deterministic-concurrency #[derive(Copy, Clone)] struct SendPtr(*mut u8); diff --git a/src/tools/miri/tests/fail/tree_borrows/children-can-alias.default.stderr b/src/tools/miri/tests/fail/tree_borrows/children-can-alias.default.stderr deleted file mode 100644 index b9651e21ecece..0000000000000 --- a/src/tools/miri/tests/fail/tree_borrows/children-can-alias.default.stderr +++ /dev/null @@ -1,15 +0,0 @@ -error: Undefined Behavior: entering unreachable code - --> tests/fail/tree_borrows/children-can-alias.rs:LL:CC - | -LL | std::hint::unreachable_unchecked(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ entering unreachable code - | - = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior - = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information - = note: BACKTRACE: - = note: inside `main` at tests/fail/tree_borrows/children-can-alias.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/fail/tree_borrows/children-can-alias.rs b/src/tools/miri/tests/fail/tree_borrows/children-can-alias.rs deleted file mode 100644 index d3f272dfbf81d..0000000000000 --- a/src/tools/miri/tests/fail/tree_borrows/children-can-alias.rs +++ /dev/null @@ -1,58 +0,0 @@ -//@revisions: default uniq -//@compile-flags: -Zmiri-tree-borrows -//@[uniq]compile-flags: -Zmiri-unique-is-unique - -//! This is NOT intended behavior. -//! We should eventually find a solution so that the version with `Unique` passes too, -//! otherwise `Unique` is more strict than `&mut`! - -#![feature(ptr_internals)] - -use core::ptr::{Unique, addr_of_mut}; - -fn main() { - let mut data = 0u8; - let raw = addr_of_mut!(data); - unsafe { - raw_children_of_refmut_can_alias(&mut *raw); - raw_children_of_unique_can_alias(Unique::new_unchecked(raw)); - - // Ultimately the intended behavior is that both above tests would - // succeed. - std::hint::unreachable_unchecked(); - //~[default]^ ERROR: entering unreachable code - } -} - -unsafe fn raw_children_of_refmut_can_alias(x: &mut u8) { - let child1 = addr_of_mut!(*x); - let child2 = addr_of_mut!(*x); - // We create two raw aliases of `x`: they have the exact same - // tag and can be used interchangeably. - child1.write(1); - child2.write(2); - child1.write(1); - child2.write(2); -} - -unsafe fn raw_children_of_unique_can_alias(x: Unique) { - let child1 = x.as_ptr(); - let child2 = x.as_ptr(); - // Under `-Zmiri-unique-is-unique`, `Unique` accidentally offers more guarantees - // than `&mut`. Not because it responds differently to accesses but because - // there is no easy way to obtain a copy with the same tag. - // - // The closest (non-hack) attempt is two calls to `as_ptr`. - // - Without `-Zmiri-unique-is-unique`, independent `as_ptr` calls return pointers - // with the same tag that can thus be used interchangeably. - // - With the current implementation of `-Zmiri-unique-is-unique`, they return cousin - // tags with permissions that do not tolerate aliasing. - // Eventually we should make such aliasing allowed in some situations - // (e.g. when there is no protector), which will probably involve - // introducing a new kind of permission. - child1.write(1); - child2.write(2); - //~[uniq]^ ERROR: /write access through .* is forbidden/ - child1.write(1); - child2.write(2); -} diff --git a/src/tools/miri/tests/fail/tree_borrows/children-can-alias.uniq.stderr b/src/tools/miri/tests/fail/tree_borrows/children-can-alias.uniq.stderr deleted file mode 100644 index 83c506abb2d73..0000000000000 --- a/src/tools/miri/tests/fail/tree_borrows/children-can-alias.uniq.stderr +++ /dev/null @@ -1,31 +0,0 @@ -error: Undefined Behavior: write access through at ALLOC[0x0] is forbidden - --> tests/fail/tree_borrows/children-can-alias.rs:LL:CC - | -LL | child2.write(2); - | ^^^^^^^^^^^^^^^ write access through at ALLOC[0x0] is forbidden - | - = help: this indicates a potential bug in the program: it performed an invalid operation, but the Tree Borrows rules it violated are still experimental - = help: the accessed tag has state Disabled which forbids this child write access -help: the accessed tag was created here, in the initial state Reserved - --> tests/fail/tree_borrows/children-can-alias.rs:LL:CC - | -LL | let child2 = x.as_ptr(); - | ^^^^^^^^^^ -help: the accessed tag later transitioned to Disabled due to a foreign write access at offsets [0x0..0x1] - --> tests/fail/tree_borrows/children-can-alias.rs:LL:CC - | -LL | child1.write(1); - | ^^^^^^^^^^^^^^^ - = help: this transition corresponds to a loss of read and write permissions - = note: BACKTRACE (of the first span): - = note: inside `raw_children_of_unique_can_alias` at tests/fail/tree_borrows/children-can-alias.rs:LL:CC -note: inside `main` - --> tests/fail/tree_borrows/children-can-alias.rs:LL:CC - | -LL | raw_children_of_unique_can_alias(Unique::new_unchecked(raw)); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.rs b/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.rs index 7af1a7636fad7..bf963f6a8f774 100644 --- a/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.rs +++ b/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.rs @@ -12,16 +12,16 @@ use std::cell::UnsafeCell; fn main() { unsafe { let n = &mut UnsafeCell::new(0u8); - name!(n.get(), "base"); + name!(n as *mut _, "base"); let x = &mut *(n as *mut UnsafeCell<_>); - name!(x.get(), "x"); - let y = (&mut *n).get(); + name!(x as *mut _, "x"); + let y = (&mut *n) as *mut UnsafeCell<_> as *mut _; name!(y); write_second(x, y); unsafe fn write_second(x: &mut UnsafeCell, y: *mut u8) { let alloc_id = alloc_id!(x.get()); - name!(x.get(), "callee:x"); - name!(x.get()=>1, "caller:x"); + name!(x as *mut _, "callee:x"); + name!((x as *mut _)=>1, "caller:x"); name!(y, "callee:y"); name!(y, "caller:y"); print_state!(alloc_id); diff --git a/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.stderr b/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.stderr index 03f79fe0a5d74..10414df6a6a26 100644 --- a/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.stderr +++ b/src/tools/miri/tests/fail/tree_borrows/reserved/cell-protected-write.stderr @@ -21,7 +21,7 @@ LL | *y = 1; help: the accessed tag was created here --> tests/fail/tree_borrows/reserved/cell-protected-write.rs:LL:CC | -LL | let y = (&mut *n).get(); +LL | let y = (&mut *n) as *mut UnsafeCell<_> as *mut _; | ^^^^^^^^^ help: the protected tag was created here, in the initial state Reserved --> tests/fail/tree_borrows/reserved/cell-protected-write.rs:LL:CC diff --git a/src/tools/miri/tests/fail/tree_borrows/reservedim_spurious_write.rs b/src/tools/miri/tests/fail/tree_borrows/reservedim_spurious_write.rs index 73f227fee2fcb..024a14600b1ba 100644 --- a/src/tools/miri/tests/fail/tree_borrows/reservedim_spurious_write.rs +++ b/src/tools/miri/tests/fail/tree_borrows/reservedim_spurious_write.rs @@ -1,7 +1,7 @@ // Illustrating a problematic interaction between Reserved, interior mutability, // and protectors, that makes spurious writes fail in the previous model of Tree Borrows. // As for all similar tests, we disable preemption so that the error message is deterministic. -//@compile-flags: -Zmiri-tree-borrows -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-tree-borrows -Zmiri-deterministic-concurrency // // One revision without spurious read (default source code) and one with spurious read. // Both are expected to be UB. Both revisions are expected to have the *same* error diff --git a/src/tools/miri/tests/fail/tree_borrows/spurious_read.rs b/src/tools/miri/tests/fail/tree_borrows/spurious_read.rs index 50ef0ceef91e7..4ca0fb9433813 100644 --- a/src/tools/miri/tests/fail/tree_borrows/spurious_read.rs +++ b/src/tools/miri/tests/fail/tree_borrows/spurious_read.rs @@ -1,8 +1,8 @@ // We ensure a deterministic execution. // Note that we are *also* using barriers: the barriers enforce the -// specific interleaving of operations that we want, but only the preemption -// rate guarantees that the error message is also deterministic. -//@compile-flags: -Zmiri-preemption-rate=0 +// specific interleaving of operations that we want, but we need to disable +// preemption to ensure that the error message is also deterministic. +//@compile-flags: -Zmiri-deterministic-concurrency //@compile-flags: -Zmiri-tree-borrows use std::sync::{Arc, Barrier}; diff --git a/src/tools/miri/tests/fail/tree_borrows/unique.rs b/src/tools/miri/tests/fail/tree_borrows/unique.rs deleted file mode 100644 index 0844dd21a5923..0000000000000 --- a/src/tools/miri/tests/fail/tree_borrows/unique.rs +++ /dev/null @@ -1,27 +0,0 @@ -//@revisions: default uniq -//@compile-flags: -Zmiri-tree-borrows -//@[uniq]compile-flags: -Zmiri-unique-is-unique - -// A pattern that detects if `Unique` is treated as exclusive or not: -// activate the pointer behind a `Unique` then do a read that is parent -// iff `Unique` was specially reborrowed. - -#![feature(ptr_internals)] -use core::ptr::Unique; - -fn main() { - let mut data = 0u8; - let refmut = &mut data; - let rawptr = refmut as *mut u8; - - unsafe { - let uniq = Unique::new_unchecked(rawptr); - *uniq.as_ptr() = 1; // activation - let _maybe_parent = *rawptr; // maybe becomes Frozen - *uniq.as_ptr() = 2; - //~[uniq]^ ERROR: /write access through .* is forbidden/ - let _definitely_parent = data; // definitely Frozen by now - *uniq.as_ptr() = 3; - //~[default]^ ERROR: /write access through .* is forbidden/ - } -} diff --git a/src/tools/miri/tests/fail/tree_borrows/unique.uniq.stderr b/src/tools/miri/tests/fail/tree_borrows/unique.uniq.stderr deleted file mode 100644 index 4ecff3ea0e1f1..0000000000000 --- a/src/tools/miri/tests/fail/tree_borrows/unique.uniq.stderr +++ /dev/null @@ -1,38 +0,0 @@ -error: Undefined Behavior: write access through at ALLOC[0x0] is forbidden - --> tests/fail/tree_borrows/unique.rs:LL:CC - | -LL | *uniq.as_ptr() = 2; - | ^^^^^^^^^^^^^^^^^^ write access through at ALLOC[0x0] is forbidden - | - = help: this indicates a potential bug in the program: it performed an invalid operation, but the Tree Borrows rules it violated are still experimental - = help: the accessed tag is a child of the conflicting tag - = help: the conflicting tag has state Frozen which forbids this child write access -help: the accessed tag was created here - --> tests/fail/tree_borrows/unique.rs:LL:CC - | -LL | *uniq.as_ptr() = 2; - | ^^^^^^^^^^^^^ -help: the conflicting tag was created here, in the initial state Reserved - --> tests/fail/tree_borrows/unique.rs:LL:CC - | -LL | let uniq = Unique::new_unchecked(rawptr); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -help: the conflicting tag later transitioned to Active due to a child write access at offsets [0x0..0x1] - --> tests/fail/tree_borrows/unique.rs:LL:CC - | -LL | *uniq.as_ptr() = 1; // activation - | ^^^^^^^^^^^^^^^^^^ - = help: this transition corresponds to the first write to a 2-phase borrowed mutable reference -help: the conflicting tag later transitioned to Frozen due to a foreign read access at offsets [0x0..0x1] - --> tests/fail/tree_borrows/unique.rs:LL:CC - | -LL | let _maybe_parent = *rawptr; // maybe becomes Frozen - | ^^^^^^^ - = help: this transition corresponds to a loss of write permissions - = note: BACKTRACE (of the first span): - = note: inside `main` at tests/fail/tree_borrows/unique.rs:LL:CC - -note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace - -error: aborting due to 1 previous error - diff --git a/src/tools/miri/tests/fail/weak_memory/weak_uninit.rs b/src/tools/miri/tests/fail/weak_memory/weak_uninit.rs index 79c97a5b75273..b4b4b08498773 100644 --- a/src/tools/miri/tests/fail/weak_memory/weak_uninit.rs +++ b/src/tools/miri/tests/fail/weak_memory/weak_uninit.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-ignore-leaks -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-ignore-leaks -Zmiri-fixed-schedule // Tests showing weak memory behaviours are exhibited. All tests // return true when the desired behaviour is seen. diff --git a/src/tools/miri/tests/pass-dep/concurrency/apple-futex.rs b/src/tools/miri/tests/pass-dep/concurrency/apple-futex.rs index becb90eb92307..a28f08c3bb1a0 100644 --- a/src/tools/miri/tests/pass-dep/concurrency/apple-futex.rs +++ b/src/tools/miri/tests/pass-dep/concurrency/apple-futex.rs @@ -1,5 +1,5 @@ //@only-target: darwin -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::time::{Duration, Instant}; use std::{io, ptr, thread}; diff --git a/src/tools/miri/tests/pass-dep/concurrency/env-cleanup-data-race.rs b/src/tools/miri/tests/pass-dep/concurrency/env-cleanup-data-race.rs index c9c9dc5dfd2ac..91cf24a944ad6 100644 --- a/src/tools/miri/tests/pass-dep/concurrency/env-cleanup-data-race.rs +++ b/src/tools/miri/tests/pass-dep/concurrency/env-cleanup-data-race.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-disable-isolation -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-disable-isolation -Zmiri-deterministic-concurrency //@ignore-target: windows # No libc env support on Windows use std::ffi::CStr; diff --git a/src/tools/miri/tests/pass-dep/concurrency/freebsd-futex.rs b/src/tools/miri/tests/pass-dep/concurrency/freebsd-futex.rs index 38a0bf58148ed..b10ded8e4d008 100644 --- a/src/tools/miri/tests/pass-dep/concurrency/freebsd-futex.rs +++ b/src/tools/miri/tests/pass-dep/concurrency/freebsd-futex.rs @@ -1,5 +1,5 @@ //@only-target: freebsd -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-isolation +//@compile-flags: -Zmiri-deterministic-concurrency -Zmiri-disable-isolation use std::mem::{self, MaybeUninit}; use std::ptr::{self, addr_of}; diff --git a/src/tools/miri/tests/pass-dep/concurrency/windows_detach_terminated.rs b/src/tools/miri/tests/pass-dep/concurrency/windows_detach_terminated.rs index fe2d20bb76f8e..41b26c84393d2 100644 --- a/src/tools/miri/tests/pass-dep/concurrency/windows_detach_terminated.rs +++ b/src/tools/miri/tests/pass-dep/concurrency/windows_detach_terminated.rs @@ -1,6 +1,6 @@ //@only-target: windows # Uses win32 api functions // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::os::windows::io::IntoRawHandle; use std::thread; diff --git a/src/tools/miri/tests/pass-dep/concurrency/windows_init_once.rs b/src/tools/miri/tests/pass-dep/concurrency/windows_init_once.rs index 6853395686aa9..0a1fb11750794 100644 --- a/src/tools/miri/tests/pass-dep/concurrency/windows_init_once.rs +++ b/src/tools/miri/tests/pass-dep/concurrency/windows_init_once.rs @@ -1,6 +1,6 @@ //@only-target: windows # Uses win32 api functions // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::ptr::null_mut; use std::thread; diff --git a/src/tools/miri/tests/pass-dep/concurrency/windows_join_multiple.rs b/src/tools/miri/tests/pass-dep/concurrency/windows_join_multiple.rs index 2796541a3d728..acd9270ad16ad 100644 --- a/src/tools/miri/tests/pass-dep/concurrency/windows_join_multiple.rs +++ b/src/tools/miri/tests/pass-dep/concurrency/windows_join_multiple.rs @@ -1,6 +1,6 @@ //@only-target: windows # Uses win32 api functions // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::os::windows::io::IntoRawHandle; use std::sync::atomic::{AtomicBool, Ordering}; diff --git a/src/tools/miri/tests/pass-dep/libc/libc-epoll-blocking.rs b/src/tools/miri/tests/pass-dep/libc/libc-epoll-blocking.rs index 825e1355848bf..54ebfa9d198d1 100644 --- a/src/tools/miri/tests/pass-dep/libc/libc-epoll-blocking.rs +++ b/src/tools/miri/tests/pass-dep/libc/libc-epoll-blocking.rs @@ -1,6 +1,6 @@ //@only-target: linux android illumos // test_epoll_block_then_unblock and test_epoll_race depend on a deterministic schedule. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::convert::TryInto; use std::thread; diff --git a/src/tools/miri/tests/pass-dep/libc/libc-eventfd.rs b/src/tools/miri/tests/pass-dep/libc/libc-eventfd.rs index 30e1bbb8fa1b1..56d215d0ed633 100644 --- a/src/tools/miri/tests/pass-dep/libc/libc-eventfd.rs +++ b/src/tools/miri/tests/pass-dep/libc/libc-eventfd.rs @@ -1,6 +1,6 @@ //@only-target: linux android illumos // test_race, test_blocking_read and test_blocking_write depend on a deterministic schedule. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency // FIXME(static_mut_refs): Do not allow `static_mut_refs` lint #![allow(static_mut_refs)] diff --git a/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs b/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs index d6072c2569e92..05f6c870c3d77 100644 --- a/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs +++ b/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs @@ -1,6 +1,6 @@ //@ignore-target: windows # No libc pipe on Windows // test_race depends on a deterministic schedule. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::thread; fn main() { test_pipe(); diff --git a/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs b/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs index 9163fd3d06fa6..9e48410f7045e 100644 --- a/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs +++ b/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs @@ -1,6 +1,6 @@ //@ignore-target: windows # No libc socketpair on Windows // test_race depends on a deterministic schedule. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency // FIXME(static_mut_refs): Do not allow `static_mut_refs` lint #![allow(static_mut_refs)] diff --git a/src/tools/miri/tests/pass-dep/libc/pthread-sync.rs b/src/tools/miri/tests/pass-dep/libc/pthread-sync.rs index fa11b5b129906..255944662940d 100644 --- a/src/tools/miri/tests/pass-dep/libc/pthread-sync.rs +++ b/src/tools/miri/tests/pass-dep/libc/pthread-sync.rs @@ -1,6 +1,6 @@ //@ignore-target: windows # No pthreads on Windows // We use `yield` to test specific interleavings, so disable automatic preemption. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency #![feature(sync_unsafe_cell)] use std::cell::SyncUnsafeCell; diff --git a/src/tools/miri/tests/pass/stacked-borrows/2phase.rs b/src/tools/miri/tests/pass/both_borrows/2phase.rs similarity index 55% rename from src/tools/miri/tests/pass/stacked-borrows/2phase.rs rename to src/tools/miri/tests/pass/both_borrows/2phase.rs index fb4ba60583736..7a3962a7c1a44 100644 --- a/src/tools/miri/tests/pass/stacked-borrows/2phase.rs +++ b/src/tools/miri/tests/pass/both_borrows/2phase.rs @@ -1,5 +1,5 @@ -// FIXME: this miscompiles with optimizations, see . -//@compile-flags: -Zmir-opt-level=0 +//@revisions: stack tree +//@[tree]compile-flags: -Zmiri-tree-borrows trait S: Sized { fn tpb(&mut self, _s: Self) {} @@ -31,9 +31,9 @@ fn two_phase3(b: bool) { )); } -#[allow(unreachable_code)] fn two_phase_raw() { let x: &mut Vec = &mut vec![]; + #[allow(unreachable_code)] // The `push` itself never gets reached. x.push({ // Unfortunately this does not trigger the problem of creating a // raw ponter from a pointer that had a two-phase borrow derived from @@ -59,52 +59,12 @@ fn two_phase_overlapping2() { x.add_assign(x + *l); } -fn with_interior_mutability() { - use std::cell::Cell; - - trait Thing: Sized { - fn do_the_thing(&mut self, _s: i32) {} - } - - impl Thing for Cell {} - - let mut x = Cell::new(1); - let l = &x; - - x.do_the_thing({ - x.set(3); - l.set(4); - x.get() + l.get() - }); -} - -// This one really shouldn't be accepted, but since we treat 2phase as raw, we do accept it. -// Tree Borrows rejects it. -fn aliasing_violation() { - struct Foo(u64); - impl Foo { - fn add(&mut self, n: u64) -> u64 { - self.0 + n - } - } - - let mut f = Foo(0); - let alias = &mut f.0 as *mut u64; - let res = f.add(unsafe { - *alias = 42; - 0 - }); - assert_eq!(res, 42); -} - fn main() { two_phase1(); two_phase2(); two_phase3(false); two_phase3(true); two_phase_raw(); - with_interior_mutability(); two_phase_overlapping1(); two_phase_overlapping2(); - aliasing_violation(); } diff --git a/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs b/src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs similarity index 90% rename from src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs rename to src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs index 4261f411eea47..c2b6a7e68be50 100644 --- a/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs +++ b/src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs @@ -1,14 +1,15 @@ +//@revisions: stack tree +//@[tree]compile-flags: -Zmiri-tree-borrows #![feature(allocator_api)] use std::ptr; -// Test various stacked-borrows-related things. +// Test various aliasing-model-related things. fn main() { read_does_not_invalidate1(); read_does_not_invalidate2(); mut_raw_then_mut_shr(); mut_shr_then_mut_raw(); mut_raw_mut(); - mut_raw_mut2(); partially_invalidate_mut(); drop_after_sharing(); // direct_mut_to_const_raw(); @@ -97,18 +98,6 @@ fn mut_raw_mut() { assert_eq!(x, 4); } -// A variant of `mut_raw_mut` that does *not* get accepted by Tree Borrows. -// It's kind of an accident that we accept it in Stacked Borrows... -fn mut_raw_mut2() { - unsafe { - let mut root = 0; - let to = &mut root as *mut i32; - *to = 0; - let _val = root; - *to = 0; - } -} - fn partially_invalidate_mut() { let data = &mut (0u8, 0u8); let reborrow = &mut *data as *mut (u8, u8); @@ -124,15 +113,6 @@ fn drop_after_sharing() { let _len = x.len(); } -// Make sure that coercing &mut T to *const T produces a writeable pointer. -// TODO: This is currently disabled, waiting on a decision on -/*fn direct_mut_to_const_raw() { - let x = &mut 0; - let y: *const i32 = x; - unsafe { *(y as *mut i32) = 1; } - assert_eq!(*x, 1); -}*/ - // Make sure that we can create two raw pointers from a mutable reference and use them both. fn two_raw() { unsafe { @@ -178,7 +158,7 @@ fn disjoint_mutable_subborrows() { let b = unsafe { borrow_field_b(ptr) }; b.push(4); a.push_str(" world"); - eprintln!("{:?} {:?}", a, b); + assert_eq!(format!("{:?} {:?}", a, b), r#""hello world" [0, 1, 2, 4]"#); } fn raw_ref_to_part() { @@ -243,7 +223,7 @@ fn not_unpin_not_protected() { pub struct NotUnpin(#[allow(dead_code)] i32, PhantomPinned); fn inner(x: &mut NotUnpin, f: fn(&mut NotUnpin)) { - // `f` may mutate, but it may not deallocate! + // `f` is allowed to deallocate `x`. f(x) } diff --git a/src/tools/miri/tests/pass/stacked-borrows/interior_mutability.rs b/src/tools/miri/tests/pass/both_borrows/interior_mutability.rs similarity index 84% rename from src/tools/miri/tests/pass/stacked-borrows/interior_mutability.rs rename to src/tools/miri/tests/pass/both_borrows/interior_mutability.rs index e86cb3711acb0..f095e215e0043 100644 --- a/src/tools/miri/tests/pass/stacked-borrows/interior_mutability.rs +++ b/src/tools/miri/tests/pass/both_borrows/interior_mutability.rs @@ -1,3 +1,5 @@ +//@revisions: stack tree +//@[tree]compile-flags: -Zmiri-tree-borrows #![allow(dangerous_implicit_autorefs)] use std::cell::{Cell, Ref, RefCell, RefMut, UnsafeCell}; @@ -14,6 +16,7 @@ fn main() { ref_protector(); ref_mut_protector(); rust_issue_68303(); + two_phase(); } fn aliasing_mut_and_shr() { @@ -102,7 +105,14 @@ fn unsafe_cell_invalidate() { let ref1 = unsafe { &mut *raw1 }; let raw2 = ref1 as *mut _; // Now the borrow stack is: raw1, ref2, raw2. - // So using raw1 invalidates raw2. + // + // For TB, the tree is + // + // Act x + // Res `- raw1 + // Res `- ref1, raw2 + // + // Either way, using raw1 invalidates raw2. f(unsafe { mem::transmute(raw2) }, raw1); } @@ -138,7 +148,7 @@ fn refcell_basic() { } } -// Adding a Stacked Borrows protector for `Ref` would break this +// Adding a protector for `Ref` would break this fn ref_protector() { fn break_it(rc: &RefCell, r: Ref<'_, i32>) { // `r` has a shared reference, it is passed in as argument and hence @@ -174,3 +184,27 @@ fn rust_issue_68303() { assert!(optional.is_some()); *handle = true; } + +fn two_phase() { + use std::cell::Cell; + + trait Thing: Sized { + fn do_the_thing(&mut self, _s: i32) {} + } + + impl Thing for Cell {} + + let mut x = Cell::new(1); + let l = &x; + + x.do_the_thing({ + // In TB terms: + // Several Foreign accesses (both Reads and Writes) to the location + // being reborrowed. Reserved + unprotected + interior mut + // makes the pointer immune to everything as long as all accesses + // are child accesses to its parent pointer x. + x.set(3); + l.set(4); + x.get() + l.get() + }); +} diff --git a/src/tools/miri/tests/pass/concurrency/data_race.rs b/src/tools/miri/tests/pass/concurrency/data_race.rs index d16de0ae8e232..d5dd1deb2d9dd 100644 --- a/src/tools/miri/tests/pass/concurrency/data_race.rs +++ b/src/tools/miri/tests/pass/concurrency/data_race.rs @@ -1,4 +1,5 @@ -//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 +// This tests carefully crafted schedules to ensure they are not considered races. +//@compile-flags: -Zmiri-deterministic-concurrency use std::sync::atomic::*; use std::thread::{self, spawn}; diff --git a/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs b/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs index 354a4bef932e9..ecc4ca59bd18a 100644 --- a/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs +++ b/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs @@ -1,6 +1,6 @@ //@compile-flags: -Zmiri-disable-data-race-detector // Avoid non-determinism -//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::thread::spawn; diff --git a/src/tools/miri/tests/pass/concurrency/spin_loops_nopreempt.rs b/src/tools/miri/tests/pass/concurrency/spin_loops_nopreempt.rs index 44b16e1ac74d4..4361f1da92407 100644 --- a/src/tools/miri/tests/pass/concurrency/spin_loops_nopreempt.rs +++ b/src/tools/miri/tests/pass/concurrency/spin_loops_nopreempt.rs @@ -1,5 +1,5 @@ // This specifically tests behavior *without* preemption. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::cell::Cell; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; diff --git a/src/tools/miri/tests/pass/concurrency/sync.rs b/src/tools/miri/tests/pass/concurrency/sync.rs index 91c67b215a1a9..a92359758dadf 100644 --- a/src/tools/miri/tests/pass/concurrency/sync.rs +++ b/src/tools/miri/tests/pass/concurrency/sync.rs @@ -1,7 +1,7 @@ //@revisions: stack tree //@[tree]compile-flags: -Zmiri-tree-borrows // We use `yield` to test specific interleavings, so disable automatic preemption. -//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-disable-isolation -Zmiri-deterministic-concurrency use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock}; use std::thread; diff --git a/src/tools/miri/tests/pass/concurrency/sync_nopreempt.rs b/src/tools/miri/tests/pass/concurrency/sync_nopreempt.rs index c6cff038f81e0..bea8f87243aa7 100644 --- a/src/tools/miri/tests/pass/concurrency/sync_nopreempt.rs +++ b/src/tools/miri/tests/pass/concurrency/sync_nopreempt.rs @@ -1,5 +1,5 @@ // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-strict-provenance -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::sync::{Arc, Condvar, Mutex, RwLock}; use std::thread; diff --git a/src/tools/miri/tests/pass/panic/concurrent-panic.rs b/src/tools/miri/tests/pass/panic/concurrent-panic.rs index e804df90977a9..4bf645960f705 100644 --- a/src/tools/miri/tests/pass/panic/concurrent-panic.rs +++ b/src/tools/miri/tests/pass/panic/concurrent-panic.rs @@ -1,5 +1,5 @@ // We are making scheduler assumptions here. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //! Cause a panic in one thread while another thread is unwinding. This checks //! that separate threads have their own panicking state. diff --git a/src/tools/miri/tests/pass/shims/env/var.rs b/src/tools/miri/tests/pass/shims/env/var.rs index 655b29674e340..31e59c4a41cbc 100644 --- a/src/tools/miri/tests/pass/shims/env/var.rs +++ b/src/tools/miri/tests/pass/shims/env/var.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::{env, thread}; fn main() { diff --git a/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.stderr b/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.stderr deleted file mode 100644 index 8ee4e25dbef84..0000000000000 --- a/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.stderr +++ /dev/null @@ -1 +0,0 @@ -"hello world" [0, 1, 2, 4] diff --git a/src/tools/miri/tests/pass/stacked-borrows/coroutine-self-referential.rs b/src/tools/miri/tests/pass/stacked_borrows/coroutine-self-referential.rs similarity index 100% rename from src/tools/miri/tests/pass/stacked-borrows/coroutine-self-referential.rs rename to src/tools/miri/tests/pass/stacked_borrows/coroutine-self-referential.rs diff --git a/src/tools/miri/tests/pass/stacked-borrows/int-to-ptr.rs b/src/tools/miri/tests/pass/stacked_borrows/int-to-ptr.rs similarity index 100% rename from src/tools/miri/tests/pass/stacked-borrows/int-to-ptr.rs rename to src/tools/miri/tests/pass/stacked_borrows/int-to-ptr.rs diff --git a/src/tools/miri/tests/pass/stacked-borrows/issue-miri-2389.rs b/src/tools/miri/tests/pass/stacked_borrows/issue-miri-2389.rs similarity index 100% rename from src/tools/miri/tests/pass/stacked-borrows/issue-miri-2389.rs rename to src/tools/miri/tests/pass/stacked_borrows/issue-miri-2389.rs diff --git a/src/tools/miri/tests/pass/stacked-borrows/issue-miri-2389.stderr b/src/tools/miri/tests/pass/stacked_borrows/issue-miri-2389.stderr similarity index 89% rename from src/tools/miri/tests/pass/stacked-borrows/issue-miri-2389.stderr rename to src/tools/miri/tests/pass/stacked_borrows/issue-miri-2389.stderr index bcb7a65e90f05..8ca3c6c618ea7 100644 --- a/src/tools/miri/tests/pass/stacked-borrows/issue-miri-2389.stderr +++ b/src/tools/miri/tests/pass/stacked_borrows/issue-miri-2389.stderr @@ -1,5 +1,5 @@ warning: integer-to-pointer cast - --> tests/pass/stacked-borrows/issue-miri-2389.rs:LL:CC + --> tests/pass/stacked_borrows/issue-miri-2389.rs:LL:CC | LL | let wildcard = &root0 as *const Cell as usize as *const Cell; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ integer-to-pointer cast @@ -10,5 +10,5 @@ LL | let wildcard = &root0 as *const Cell as usize as *const Cell. +//@compile-flags: -Zmir-opt-level=0 + +// Test various stacked-borrows-specific things +// (i.e., these do not work the same under TB). +fn main() { + mut_raw_mut2(); + // direct_mut_to_const_raw(); + two_phase_aliasing_violation(); +} + +// A variant of `mut_raw_mut` that does *not* get accepted by Tree Borrows. +// It's kind of an accident that we accept it in Stacked Borrows... +fn mut_raw_mut2() { + unsafe { + let mut root = 0; + let to = &mut root as *mut i32; + *to = 0; + let _val = root; + *to = 0; + } +} + +// Make sure that coercing &mut T to *const T produces a writeable pointer. +// TODO: This is currently disabled, waiting on a decision on +/*fn direct_mut_to_const_raw() { + let x = &mut 0; + let y: *const i32 = x; + unsafe { *(y as *mut i32) = 1; } + assert_eq!(*x, 1); +}*/ + +// This one really shouldn't be accepted, but since we treat 2phase as raw, we do accept it. +// Tree Borrows rejects it. +fn two_phase_aliasing_violation() { + struct Foo(u64); + impl Foo { + fn add(&mut self, n: u64) -> u64 { + self.0 + n + } + } + + let mut f = Foo(0); + let alias = &mut f.0 as *mut u64; + let res = f.add(unsafe { + *alias = 42; + 0 + }); + assert_eq!(res, 42); +} diff --git a/src/tools/miri/tests/pass/stacked-borrows/unknown-bottom-gc.rs b/src/tools/miri/tests/pass/stacked_borrows/unknown-bottom-gc.rs similarity index 100% rename from src/tools/miri/tests/pass/stacked-borrows/unknown-bottom-gc.rs rename to src/tools/miri/tests/pass/stacked_borrows/unknown-bottom-gc.rs diff --git a/src/tools/miri/tests/pass/stacked-borrows/zst-field-retagging-terminates.rs b/src/tools/miri/tests/pass/stacked_borrows/zst-field-retagging-terminates.rs similarity index 100% rename from src/tools/miri/tests/pass/stacked-borrows/zst-field-retagging-terminates.rs rename to src/tools/miri/tests/pass/stacked_borrows/zst-field-retagging-terminates.rs diff --git a/src/tools/miri/tests/pass/tree_borrows/2phase-interiormut.rs b/src/tools/miri/tests/pass/tree_borrows/2phase-interiormut.rs deleted file mode 100644 index af52f53791a4f..0000000000000 --- a/src/tools/miri/tests/pass/tree_borrows/2phase-interiormut.rs +++ /dev/null @@ -1,27 +0,0 @@ -//@compile-flags: -Zmiri-tree-borrows - -// Counterpart to tests/fail/tree-borrows/write-during-2phase.rs, -// this is the opposite situation: the Write is not problematic because -// the Protector has not yet been added and the Reserved has interior -// mutability. -use core::cell::Cell; - -trait Thing: Sized { - fn do_the_thing(&mut self, _s: i32) {} -} -impl Thing for Cell {} - -fn main() { - let mut x = Cell::new(1); - let l = &x; - - x.do_the_thing({ - // Several Foreign accesses (both Reads and Writes) to the location - // being reborrowed. Reserved + unprotected + interior mut - // makes the pointer immune to everything as long as all accesses - // are child accesses to its parent pointer x. - x.set(3); - l.set(4); - x.get() + l.get() - }); -} diff --git a/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.rs b/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.rs index 3269acb511938..019ea369811d9 100644 --- a/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.rs +++ b/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.rs @@ -6,16 +6,16 @@ mod utils; use std::cell::UnsafeCell; -// UnsafeCells use the parent tag, so it is possible to use them with +// UnsafeCells use the `Cell` state, so it is possible to use them with // few restrictions when only among themselves. fn main() { unsafe { let data = &mut UnsafeCell::new(0u8); - name!(data.get(), "data"); + name!(data as *mut _, "data"); let x = &*data; - name!(x.get(), "x"); + name!(x as *const _, "x"); let y = &*data; - name!(y.get(), "y"); + name!(y as *const _, "y"); let alloc_id = alloc_id!(data.get()); print_state!(alloc_id); // y and x tolerate alternating Writes diff --git a/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr b/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr index d13e9ad0215bc..75a30c9a08375 100644 --- a/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr +++ b/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr @@ -2,11 +2,15 @@ Warning: this tree is indicative only. Some tags may have been hidden. 0.. 1 | Act | └─┬── -| ReIM| └──── +| ReIM| └─┬── +| Cel | ├──── +| Cel | └──── ────────────────────────────────────────────────── ────────────────────────────────────────────────── Warning: this tree is indicative only. Some tags may have been hidden. 0.. 1 | Act | └─┬── -| Act | └──── +| Act | └─┬── +| Cel | ├──── +| Cel | └──── ────────────────────────────────────────────────── diff --git a/src/tools/miri/tests/pass/tree_borrows/cell-inside-box.rs b/src/tools/miri/tests/pass/tree_borrows/cell-inside-box.rs new file mode 100644 index 0000000000000..adf2f4e845b5b --- /dev/null +++ b/src/tools/miri/tests/pass/tree_borrows/cell-inside-box.rs @@ -0,0 +1,35 @@ +//@compile-flags: -Zmiri-tree-borrows +#![feature(box_as_ptr)] +#[path = "../../utils/mod.rs"] +#[macro_use] +mod utils; + +use std::cell::UnsafeCell; + +pub fn main() { + let cell = UnsafeCell::new(42); + let box1 = Box::new(cell); + + unsafe { + let ptr1: *mut UnsafeCell = Box::into_raw(box1); + name!(ptr1); + + let mut box2 = Box::from_raw(ptr1); + // `ptr2` will be a descendant of `ptr1`. + let ptr2: *mut UnsafeCell = Box::as_mut_ptr(&mut box2); + name!(ptr2); + + // We perform a write through `x`. + // Because `ptr1` is ReservedIM, a child write will make it transition to Active. + // Because `ptr2` is ReservedIM, a foreign write doesn't have any effect on it. + let x = (*ptr1).get(); + *x = 1; + + // We can still read from `ptr2`. + let val = *(*ptr2).get(); + assert_eq!(val, 1); + + let alloc_id = alloc_id!(ptr1); + print_state!(alloc_id); + } +} diff --git a/src/tools/miri/tests/pass/tree_borrows/vec_unique.uniq.stderr b/src/tools/miri/tests/pass/tree_borrows/cell-inside-box.stderr similarity index 67% rename from src/tools/miri/tests/pass/tree_borrows/vec_unique.uniq.stderr rename to src/tools/miri/tests/pass/tree_borrows/cell-inside-box.stderr index 7942e9884f401..5dbfff718b1e6 100644 --- a/src/tools/miri/tests/pass/tree_borrows/vec_unique.uniq.stderr +++ b/src/tools/miri/tests/pass/tree_borrows/cell-inside-box.stderr @@ -1,8 +1,7 @@ ────────────────────────────────────────────────── Warning: this tree is indicative only. Some tags may have been hidden. -0.. 2 +0.. 4 | Act | └─┬── -|-----| └─┬── -|-----| └─┬── -|-----| └──── +| Act | └─┬── +| ReIM| └──── ────────────────────────────────────────────────── diff --git a/src/tools/miri/tests/pass/tree_borrows/interior_mutability.rs b/src/tools/miri/tests/pass/tree_borrows/interior_mutability.rs deleted file mode 100644 index 6dde593d2cfc2..0000000000000 --- a/src/tools/miri/tests/pass/tree_borrows/interior_mutability.rs +++ /dev/null @@ -1,178 +0,0 @@ -//@revisions: default uniq -//@compile-flags: -Zmiri-tree-borrows -//@[uniq]compile-flags: -Zmiri-unique-is-unique -#![allow(dangerous_implicit_autorefs)] -use std::cell::{Cell, Ref, RefCell, RefMut, UnsafeCell}; -use std::mem::{self, MaybeUninit}; - -fn main() { - aliasing_mut_and_shr(); - aliasing_frz_and_shr(); - into_interior_mutability(); - unsafe_cell_2phase(); - unsafe_cell_deallocate(); - unsafe_cell_invalidate(); - refcell_basic(); - ref_protector(); - ref_mut_protector(); - rust_issue_68303(); -} - -fn aliasing_mut_and_shr() { - fn inner(rc: &RefCell, aliasing: &mut i32) { - *aliasing += 4; - let _escape_to_raw = rc as *const _; - *aliasing += 4; - let _shr = &*rc; - *aliasing += 4; - // also turning this into a frozen ref now must work - let aliasing = &*aliasing; - let _val = *aliasing; - let _escape_to_raw = rc as *const _; // this must NOT unfreeze - let _val = *aliasing; - let _shr = &*rc; // this must NOT unfreeze - let _val = *aliasing; - } - - let rc = RefCell::new(23); - let mut bmut = rc.borrow_mut(); - inner(&rc, &mut *bmut); - drop(bmut); - assert_eq!(*rc.borrow(), 23 + 12); -} - -fn aliasing_frz_and_shr() { - fn inner(rc: &RefCell, aliasing: &i32) { - let _val = *aliasing; - let _escape_to_raw = rc as *const _; // this must NOT unfreeze - let _val = *aliasing; - let _shr = &*rc; // this must NOT unfreeze - let _val = *aliasing; - } - - let rc = RefCell::new(23); - let bshr = rc.borrow(); - inner(&rc, &*bshr); - assert_eq!(*rc.borrow(), 23); -} - -// Getting a pointer into a union with interior mutability used to be tricky -// business (https://github.com/rust-lang/miri/issues/615), but it should work -// now. -fn into_interior_mutability() { - let mut x: MaybeUninit<(Cell, u32)> = MaybeUninit::uninit(); - x.as_ptr(); - x.write((Cell::new(0), 1)); - let ptr = unsafe { x.assume_init_ref() }; - assert_eq!(ptr.1, 1); -} - -// Two-phase borrows of the pointer returned by UnsafeCell::get() should not -// invalidate aliases. -fn unsafe_cell_2phase() { - unsafe { - let x = &UnsafeCell::new(vec![]); - let x2 = &*x; - (*x.get()).push(0); - let _val = (*x2.get()).get(0); - } -} - -/// Make sure we can deallocate an UnsafeCell that was passed to an active fn call. -/// (This is the fix for https://github.com/rust-lang/rust/issues/55005.) -fn unsafe_cell_deallocate() { - fn f(x: &UnsafeCell) { - let b: Box = unsafe { Box::from_raw(x as *const _ as *mut i32) }; - drop(b) - } - - let b = Box::new(0i32); - f(unsafe { mem::transmute(Box::into_raw(b)) }); -} - -/// As a side-effect of the above, we also allow this -- at least for now. -fn unsafe_cell_invalidate() { - fn f(_x: &UnsafeCell, y: *mut i32) { - // Writing to y invalidates x, but that is okay. - unsafe { - *y += 1; - } - } - - let mut x = 0i32; - let raw1 = &mut x as *mut _; - let ref1 = unsafe { &mut *raw1 }; - let raw2 = ref1 as *mut _; - // Now the borrow stack is: raw1, ref2, raw2. - // So using raw1 invalidates raw2. - f(unsafe { mem::transmute(raw2) }, raw1); -} - -fn refcell_basic() { - let c = RefCell::new(42); - { - let s1 = c.borrow(); - let _x: i32 = *s1; - let s2 = c.borrow(); - let _x: i32 = *s1; - let _y: i32 = *s2; - let _x: i32 = *s1; - let _y: i32 = *s2; - } - { - let mut m = c.borrow_mut(); - let _z: i32 = *m; - { - let s: &i32 = &*m; - let _x = *s; - } - *m = 23; - let _z: i32 = *m; - } - { - let s1 = c.borrow(); - let _x: i32 = *s1; - let s2 = c.borrow(); - let _x: i32 = *s1; - let _y: i32 = *s2; - let _x: i32 = *s1; - let _y: i32 = *s2; - } -} - -// Adding a Stacked Borrows protector for `Ref` would break this -fn ref_protector() { - fn break_it(rc: &RefCell, r: Ref<'_, i32>) { - // `r` has a shared reference, it is passed in as argument and hence - // a protector is added that marks this memory as read-only for the entire - // duration of this function. - drop(r); - // *oops* here we can mutate that memory. - *rc.borrow_mut() = 2; - } - - let rc = RefCell::new(0); - break_it(&rc, rc.borrow()) -} - -fn ref_mut_protector() { - fn break_it(rc: &RefCell, r: RefMut<'_, i32>) { - // `r` has a shared reference, it is passed in as argument and hence - // a protector is added that marks this memory as inaccessible for the entire - // duration of this function - drop(r); - // *oops* here we can mutate that memory. - *rc.borrow_mut() = 2; - } - - let rc = RefCell::new(0); - break_it(&rc, rc.borrow_mut()) -} - -/// Make sure we do not have bad enum layout optimizations. -fn rust_issue_68303() { - let optional = Some(RefCell::new(false)); - let mut handle = optional.as_ref().unwrap().borrow_mut(); - assert!(optional.is_some()); - *handle = true; -} diff --git a/src/tools/miri/tests/pass/tree_borrows/read_retag_no_race.rs b/src/tools/miri/tests/pass/tree_borrows/read_retag_no_race.rs index d9897a1033fbe..71b7817a9c700 100644 --- a/src/tools/miri/tests/pass/tree_borrows/read_retag_no_race.rs +++ b/src/tools/miri/tests/pass/tree_borrows/read_retag_no_race.rs @@ -2,7 +2,7 @@ // This test relies on a specific interleaving that cannot be enforced // with just barriers. We must remove preemption so that the execution and the // error messages are deterministic. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency use std::ptr::addr_of_mut; use std::sync::{Arc, Barrier}; use std::thread; diff --git a/src/tools/miri/tests/pass/tree_borrows/reserved.rs b/src/tools/miri/tests/pass/tree_borrows/reserved.rs index f93cac8361e31..c57cd7fcf0abe 100644 --- a/src/tools/miri/tests/pass/tree_borrows/reserved.rs +++ b/src/tools/miri/tests/pass/tree_borrows/reserved.rs @@ -43,11 +43,11 @@ unsafe fn read_second(x: &mut T, y: *mut u8) { unsafe fn cell_protected_read() { print("[interior mut + protected] Foreign Read: Re* -> Frz"); let base = &mut UnsafeCell::new(0u8); - name!(base.get(), "base"); + name!(base as *mut _, "base"); let alloc_id = alloc_id!(base.get()); let x = &mut *(base as *mut UnsafeCell); - name!(x.get(), "x"); - let y = (&mut *base).get(); + name!(x as *mut _, "x"); + let y = &mut *base as *mut UnsafeCell as *mut u8; name!(y); read_second(x, y); // Foreign Read for callee:x print_state!(alloc_id); @@ -57,11 +57,11 @@ unsafe fn cell_protected_read() { unsafe fn cell_unprotected_read() { print("[interior mut] Foreign Read: Re* -> Re*"); let base = &mut UnsafeCell::new(0u64); - name!(base.get(), "base"); + name!(base as *mut _, "base"); let alloc_id = alloc_id!(base.get()); let x = &mut *(base as *mut UnsafeCell<_>); - name!(x.get(), "x"); - let y = (&mut *base).get(); + name!(x as *mut _, "x"); + let y = &mut *base as *mut UnsafeCell as *mut u64; name!(y); let _val = *y; // Foreign Read for x print_state!(alloc_id); @@ -72,11 +72,11 @@ unsafe fn cell_unprotected_read() { unsafe fn cell_unprotected_write() { print("[interior mut] Foreign Write: Re* -> Re*"); let base = &mut UnsafeCell::new(0u64); - name!(base.get(), "base"); + name!(base as *mut _, "base"); let alloc_id = alloc_id!(base.get()); let x = &mut *(base as *mut UnsafeCell); - name!(x.get(), "x"); - let y = (&mut *base).get(); + name!(x as *mut _, "x"); + let y = &mut *base as *mut UnsafeCell as *mut u64; name!(y); *y = 1; // Foreign Write for x print_state!(alloc_id); diff --git a/src/tools/miri/tests/pass/tree_borrows/spurious_read.rs b/src/tools/miri/tests/pass/tree_borrows/spurious_read.rs index 71e93d2f84f57..840832c633cf5 100644 --- a/src/tools/miri/tests/pass/tree_borrows/spurious_read.rs +++ b/src/tools/miri/tests/pass/tree_borrows/spurious_read.rs @@ -2,7 +2,7 @@ // Note that we are *also* using barriers: the barriers enforce the // specific interleaving of operations that we want, but only the preemption // rate guarantees that the error message is also deterministic. -//@compile-flags: -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-deterministic-concurrency //@compile-flags: -Zmiri-tree-borrows use std::sync::{Arc, Barrier}; diff --git a/src/tools/miri/tests/pass/tree_borrows/tree-borrows.rs b/src/tools/miri/tests/pass/tree_borrows/tree-borrows.rs index b9d5ca06ed058..87eb447049d61 100644 --- a/src/tools/miri/tests/pass/tree_borrows/tree-borrows.rs +++ b/src/tools/miri/tests/pass/tree_borrows/tree-borrows.rs @@ -1,10 +1,10 @@ -//@revisions: default uniq //@compile-flags: -Zmiri-tree-borrows -//@[uniq]compile-flags: -Zmiri-unique-is-unique #![feature(allocator_api)] use std::{mem, ptr}; +// Test various tree-borrows-specific things +// (i.e., these do not work the same under SB). fn main() { aliasing_read_only_mutable_refs(); string_as_mut_ptr(); @@ -12,24 +12,6 @@ fn main() { direct_mut_to_const_raw(); local_addr_of_mut(); returned_mut_is_usable(); - - // Stacked Borrows tests - read_does_not_invalidate1(); - read_does_not_invalidate2(); - mut_raw_then_mut_shr(); - mut_shr_then_mut_raw(); - mut_raw_mut(); - partially_invalidate_mut(); - drop_after_sharing(); - two_raw(); - shr_and_raw(); - disjoint_mutable_subborrows(); - raw_ref_to_part(); - array_casts(); - mut_below_shr(); - wide_raw_ptr_in_tuple(); - not_unpin_not_protected(); - write_does_not_invalidate_all_aliases(); } #[allow(unused_assignments)] @@ -111,96 +93,6 @@ fn returned_mut_is_usable() { *y = 1; } -// ----- The tests below were taken from Stacked Borrows ---- - -// Make sure that reading from an `&mut` does, like reborrowing to `&`, -// NOT invalidate other reborrows. -fn read_does_not_invalidate1() { - fn foo(x: &mut (i32, i32)) -> &i32 { - let xraw = x as *mut (i32, i32); - let ret = unsafe { &(*xraw).1 }; - let _val = x.1; // we just read, this does NOT invalidate the reborrows. - ret - } - assert_eq!(*foo(&mut (1, 2)), 2); -} -// Same as above, but this time we first create a raw, then read from `&mut` -// and then freeze from the raw. -fn read_does_not_invalidate2() { - fn foo(x: &mut (i32, i32)) -> &i32 { - let xraw = x as *mut (i32, i32); - let _val = x.1; // we just read, this does NOT invalidate the raw reborrow. - let ret = unsafe { &(*xraw).1 }; - ret - } - assert_eq!(*foo(&mut (1, 2)), 2); -} - -// Escape a mut to raw, then share the same mut and use the share, then the raw. -// That should work. -fn mut_raw_then_mut_shr() { - let mut x = 2; - let xref = &mut x; - let xraw = &mut *xref as *mut _; - let xshr = &*xref; - assert_eq!(*xshr, 2); - unsafe { - *xraw = 4; - } - assert_eq!(x, 4); -} - -// Create first a shared reference and then a raw pointer from a `&mut` -// should permit mutation through that raw pointer. -fn mut_shr_then_mut_raw() { - let xref = &mut 2; - let _xshr = &*xref; - let xraw = xref as *mut _; - unsafe { - *xraw = 3; - } - assert_eq!(*xref, 3); -} - -// Ensure that if we derive from a mut a raw, and then from that a mut, -// and then read through the original mut, that does not invalidate the raw. -// This shows that the read-exception for `&mut` applies even if the `Shr` item -// on the stack is not at the top. -fn mut_raw_mut() { - let mut x = 2; - { - let xref1 = &mut x; - let xraw = xref1 as *mut _; - let _xref2 = unsafe { &mut *xraw }; - let _val = *xref1; - unsafe { - *xraw = 4; - } - // we can now use both xraw and xref1, for reading - assert_eq!(*xref1, 4); - assert_eq!(unsafe { *xraw }, 4); - assert_eq!(*xref1, 4); - assert_eq!(unsafe { *xraw }, 4); - // we cannot use xref2; see `compile-fail/stacked-borrows/illegal_read4.rs` - } - assert_eq!(x, 4); -} - -fn partially_invalidate_mut() { - let data = &mut (0u8, 0u8); - let reborrow = &mut *data as *mut (u8, u8); - let shard = unsafe { &mut (*reborrow).0 }; - data.1 += 1; // the deref overlaps with `shard`, but that is ok; the access does not overlap. - *shard += 1; // so we can still use `shard`. - assert_eq!(*data, (1, 1)); -} - -// Make sure that we can handle the situation where a location is frozen when being dropped. -fn drop_after_sharing() { - let x = String::from("hello!"); - let _len = x.len(); -} - // Make sure that coercing &mut T to *const T produces a writeable pointer. fn direct_mut_to_const_raw() { let x = &mut 0; @@ -210,150 +102,3 @@ fn direct_mut_to_const_raw() { } assert_eq!(*x, 1); } - -// Make sure that we can create two raw pointers from a mutable reference and use them both. -fn two_raw() { - unsafe { - let x = &mut 0; - let y1 = x as *mut _; - let y2 = x as *mut _; - *y1 += 2; - *y2 += 1; - } -} - -// Make sure that creating a *mut does not invalidate existing shared references. -fn shr_and_raw() { - unsafe { - let x = &mut 0; - let y1: &i32 = mem::transmute(&*x); // launder lifetimes - let y2 = x as *mut _; - let _val = *y1; - *y2 += 1; - } -} - -fn disjoint_mutable_subborrows() { - struct Foo { - a: String, - b: Vec, - } - - unsafe fn borrow_field_a<'a>(this: *mut Foo) -> &'a mut String { - &mut (*this).a - } - - unsafe fn borrow_field_b<'a>(this: *mut Foo) -> &'a mut Vec { - &mut (*this).b - } - - let mut foo = Foo { a: "hello".into(), b: vec![0, 1, 2] }; - - let ptr = &mut foo as *mut Foo; - - let a = unsafe { borrow_field_a(ptr) }; - let b = unsafe { borrow_field_b(ptr) }; - b.push(4); - a.push_str(" world"); - assert_eq!(format!("{:?} {:?}", a, b), r#""hello world" [0, 1, 2, 4]"#); -} - -fn raw_ref_to_part() { - struct Part { - _lame: i32, - } - - #[repr(C)] - struct Whole { - part: Part, - extra: i32, - } - - let it = Box::new(Whole { part: Part { _lame: 0 }, extra: 42 }); - let whole = ptr::addr_of_mut!(*Box::leak(it)); - let part = unsafe { ptr::addr_of_mut!((*whole).part) }; - let typed = unsafe { &mut *(part as *mut Whole) }; - assert!(typed.extra == 42); - drop(unsafe { Box::from_raw(whole) }); -} - -/// When casting an array reference to a raw element ptr, that should cover the whole array. -fn array_casts() { - let mut x: [usize; 2] = [0, 0]; - let p = &mut x as *mut usize; - unsafe { - *p.add(1) = 1; - } - - let x: [usize; 2] = [0, 1]; - let p = &x as *const usize; - assert_eq!(unsafe { *p.add(1) }, 1); -} - -/// Transmuting &&i32 to &&mut i32 is fine. -fn mut_below_shr() { - let x = 0; - let y = &x; - let p = unsafe { core::mem::transmute::<&&i32, &&mut i32>(&y) }; - let r = &**p; - let _val = *r; -} - -fn wide_raw_ptr_in_tuple() { - let mut x: Box = Box::new("ouch"); - let r = &mut *x as *mut dyn std::any::Any; - // This triggers the visitor-based recursive retagging. It is *not* supposed to retag raw - // pointers, but then the visitor might recurse into the "fields" of a wide raw pointer and - // finds a reference (to a vtable) there that it wants to retag... and that would be Wrong. - let pair = (r, &0); - let r = unsafe { &mut *pair.0 }; - // Make sure the fn ptr part of the vtable is still fine. - r.type_id(); -} - -fn not_unpin_not_protected() { - // `&mut !Unpin`, at least for now, does not get `noalias` nor `dereferenceable`, so we also - // don't add protectors. (We could, but until we have a better idea for where we want to go with - // the self-referential-coroutine situation, it does not seem worth the potential trouble.) - use std::marker::PhantomPinned; - - pub struct NotUnpin(#[allow(dead_code)] i32, PhantomPinned); - - fn inner(x: &mut NotUnpin, f: fn(&mut NotUnpin)) { - // `f` is allowed to deallocate `x`. - f(x) - } - - inner(Box::leak(Box::new(NotUnpin(0, PhantomPinned))), |x| { - let raw = x as *mut _; - drop(unsafe { Box::from_raw(raw) }); - }); -} - -fn write_does_not_invalidate_all_aliases() { - // In TB there are other ways to do that (`addr_of!(*x)` has the same tag as `x`), - // but let's still make sure this SB test keeps working. - - mod other { - /// Some private memory to store stuff in. - static mut S: *mut i32 = 0 as *mut i32; - - pub fn lib1(x: &&mut i32) { - unsafe { - S = (x as *const &mut i32).cast::<*mut i32>().read(); - } - } - - pub fn lib2() { - unsafe { - *S = 1337; - } - } - } - - let x = &mut 0; - other::lib1(&x); - *x = 42; // a write to x -- invalidates other pointers? - other::lib2(); - assert_eq!(*x, 1337); // oops, the value changed! I guess not all pointers were invalidated -} diff --git a/src/tools/miri/tests/pass/tree_borrows/unique.default.stderr b/src/tools/miri/tests/pass/tree_borrows/unique.default.stderr deleted file mode 100644 index 6098c855bde72..0000000000000 --- a/src/tools/miri/tests/pass/tree_borrows/unique.default.stderr +++ /dev/null @@ -1,21 +0,0 @@ -────────────────────────────────────────────────── -Warning: this tree is indicative only. Some tags may have been hidden. -0.. 1 -| Act | └─┬── -| Res | └─┬── -| Res | └──── -────────────────────────────────────────────────── -────────────────────────────────────────────────── -Warning: this tree is indicative only. Some tags may have been hidden. -0.. 1 -| Act | └─┬── -| Act | └─┬── -| Act | └──── -────────────────────────────────────────────────── -────────────────────────────────────────────────── -Warning: this tree is indicative only. Some tags may have been hidden. -0.. 1 -| Act | └─┬── -| Act | └─┬── -| Act | └──── -────────────────────────────────────────────────── diff --git a/src/tools/miri/tests/pass/tree_borrows/unique.rs b/src/tools/miri/tests/pass/tree_borrows/unique.rs deleted file mode 100644 index f1ca1b51aa857..0000000000000 --- a/src/tools/miri/tests/pass/tree_borrows/unique.rs +++ /dev/null @@ -1,67 +0,0 @@ -//@revisions: default uniq -// We disable the GC for this test because it would change what is printed. -//@compile-flags: -Zmiri-tree-borrows -Zmiri-provenance-gc=0 -//@[uniq]compile-flags: -Zmiri-unique-is-unique - -#![feature(ptr_internals)] - -#[path = "../../utils/mod.rs"] -#[macro_use] -mod utils; - -use core::ptr::Unique; - -// Check general handling of Unique - -fn main() { - unsafe { - let base = &mut 5u8; - let alloc_id = alloc_id!(base); - name!(base); - - let raw = &mut *base as *mut u8; - name!(raw); - - // We create a `Unique` and expect it to have a fresh tag - // and uninitialized permissions. - let uniq = Unique::new_unchecked(raw); - - // With `-Zmiri-unique-is-unique`, `Unique::as_ptr` (which is called by - // `Vec::as_ptr`) generates pointers with a fresh tag, so to name the actual - // `base` pointer we care about we have to walk up the tree a bit. - // - // We care about naming this specific parent tag because it is the one - // that stays `Active` during the entire execution, unlike the leaves - // that will be invalidated the next time `as_ptr` is called. - // - // (We name it twice so that we have an indicator in the output of - // whether we got the distance correct: - // If the output shows - // - // |- - // '- - // - // then `nth_parent` is not big enough. - // The correct value for `nth_parent` should be the minimum - // integer for which the output shows - // - // '- - // ) - // - // Ultimately we want pointers obtained through independent - // calls of `as_ptr` to be able to alias, which will probably involve - // a new permission that allows aliasing when there is no protector. - let nth_parent = if cfg!(uniq) { 2 } else { 0 }; - name!(uniq.as_ptr()=>nth_parent, "uniq"); - name!(uniq.as_ptr()=>nth_parent, "uniq"); - print_state!(alloc_id); - - // We can activate the Unique and use it mutably. - *uniq.as_ptr() = 42; - print_state!(alloc_id); - - // Write through the raw parent disables the Unique - *raw = 42; - print_state!(alloc_id); - } -} diff --git a/src/tools/miri/tests/pass/tree_borrows/unique.uniq.stderr b/src/tools/miri/tests/pass/tree_borrows/unique.uniq.stderr deleted file mode 100644 index 960c7e216e1d0..0000000000000 --- a/src/tools/miri/tests/pass/tree_borrows/unique.uniq.stderr +++ /dev/null @@ -1,24 +0,0 @@ -────────────────────────────────────────────────── -Warning: this tree is indicative only. Some tags may have been hidden. -0.. 1 -| Act | └─┬── -| Res | └─┬── -| Res | └─┬── -|-----| └──── -────────────────────────────────────────────────── -────────────────────────────────────────────────── -Warning: this tree is indicative only. Some tags may have been hidden. -0.. 1 -| Act | └─┬── -| Act | └─┬── -| Act | └─┬── -| Act | └──── -────────────────────────────────────────────────── -────────────────────────────────────────────────── -Warning: this tree is indicative only. Some tags may have been hidden. -0.. 1 -| Act | └─┬── -| Act | └─┬── -| Act | └─┬── -| Dis | └──── -────────────────────────────────────────────────── diff --git a/src/tools/miri/tests/pass/tree_borrows/vec_unique.rs b/src/tools/miri/tests/pass/tree_borrows/vec_unique.rs deleted file mode 100644 index af4c3b0693191..0000000000000 --- a/src/tools/miri/tests/pass/tree_borrows/vec_unique.rs +++ /dev/null @@ -1,69 +0,0 @@ -//@revisions: default uniq -// We disable the GC for this test because it would change what is printed. -//@compile-flags: -Zmiri-tree-borrows -Zmiri-provenance-gc=0 -//@[uniq]compile-flags: -Zmiri-unique-is-unique - -#![feature(vec_into_raw_parts)] - -#[path = "../../utils/mod.rs"] -#[macro_use] -mod utils; - -// Check general handling of `Unique`: -// there is no *explicit* `Unique` being used here, but there is one -// hidden a few layers inside `Vec` that should be reflected in the tree structure. - -fn main() { - unsafe { - let base = vec![0u8, 1]; - let alloc_id = alloc_id!(base.as_ptr()); - - // With `-Zmiri-unique-is-unique`, `Unique::as_ptr` (which is called by - // `Vec::as_ptr`) generates pointers with a fresh tag, so to name the actual - // `base` pointer we care about we have to walk up the tree a bit. - // - // We care about naming this specific parent tag because it is the one - // that stays `Active` during the entire execution, unlike the leaves - // that will be invalidated the next time `as_ptr` is called. - // - // (We name it twice so that we have an indicator in the output of - // whether we got the distance correct: - // If the output shows - // - // ├─ - // └─ - // - // then `nth_parent` is not big enough. - // The correct value for `nth_parent` should be the minimum - // integer for which the output shows - // - // └─ - // ) - // - // Ultimately we want pointers obtained through independent - // calls of `as_ptr` to be able to alias, which will probably involve - // a new permission that allows aliasing when there is no protector. - let nth_parent = if cfg!(uniq) { 9 } else { 0 }; - name!(base.as_ptr()=>nth_parent); - name!(base.as_ptr()=>nth_parent); - - // Destruct the `Vec` - let (ptr, len, cap) = base.into_raw_parts(); - - // Expect this to be again the same pointer as the one obtained from `as_ptr`. - // Under `-Zmiri-unique-is-unique`, this will be a strict child. - name!(ptr, "raw_parts.0"); - - // This is where the presence of `Unique` has implications, - // because there will be a reborrow here iff the exclusivity of `Unique` - // is enforced. - let reconstructed = Vec::from_raw_parts(ptr, len, cap); - - // The `as_ptr` here (twice for the same reason as above) return either - // the same pointer once more (default) or a strict child (uniq). - name!(reconstructed.as_ptr()=>nth_parent); - name!(reconstructed.as_ptr()=>nth_parent); - - print_state!(alloc_id, false); - } -} diff --git a/src/tools/miri/tests/pass/vec.rs b/src/tools/miri/tests/pass/vec.rs index 4ab2bcb7f2281..3e526813bb457 100644 --- a/src/tools/miri/tests/pass/vec.rs +++ b/src/tools/miri/tests/pass/vec.rs @@ -1,7 +1,6 @@ -//@revisions: stack tree tree_uniq +//@revisions: stack tree //@compile-flags: -Zmiri-strict-provenance //@[tree]compile-flags: -Zmiri-tree-borrows -//@[tree_uniq]compile-flags: -Zmiri-tree-borrows -Zmiri-unique-is-unique #![feature(iter_advance_by, iter_next_chunk)] // Gather all references from a mutable iterator and make sure Miri notices if diff --git a/src/tools/miri/tests/pass/vecdeque.rs b/src/tools/miri/tests/pass/vecdeque.rs index 9153c428e184d..bdf57f281a08f 100644 --- a/src/tools/miri/tests/pass/vecdeque.rs +++ b/src/tools/miri/tests/pass/vecdeque.rs @@ -1,8 +1,6 @@ -//@revisions: stack tree tree_uniq +//@revisions: stack tree //@compile-flags: -Zmiri-strict-provenance //@[tree]compile-flags: -Zmiri-tree-borrows -//@[tree_uniq]compile-flags: -Zmiri-tree-borrows -Zmiri-unique-is-unique - use std::collections::VecDeque; fn test_all_refs<'a, T: 'a>(dummy: &mut T, iter: impl Iterator) { diff --git a/src/tools/miri/tests/pass/weak_memory/weak.rs b/src/tools/miri/tests/pass/weak_memory/weak.rs index 5d636431d8675..eeab4ebf129e8 100644 --- a/src/tools/miri/tests/pass/weak_memory/weak.rs +++ b/src/tools/miri/tests/pass/weak_memory/weak.rs @@ -1,4 +1,4 @@ -//@compile-flags: -Zmiri-ignore-leaks -Zmiri-preemption-rate=0 +//@compile-flags: -Zmiri-ignore-leaks -Zmiri-fixed-schedule // Tests showing weak memory behaviours are exhibited. All tests // return true when the desired behaviour is seen.