Skip to content

Commit 9a81177

Browse files
committed
Another round of tidy / warning fixes
1 parent 4aa62ea commit 9a81177

23 files changed

+119
-102
lines changed

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ exclude = [
6060
"obj",
6161
]
6262

63-
[profile.release.package.rustc-rayon-core]
63+
[profile.release.package.rustc_thread_pool]
6464
# The rustc fork of Rayon has deadlock detection code which intermittently
6565
# causes overflows in the CI (see https://github.com/rust-lang/rust/issues/90227)
6666
# so we turn overflow checks off for now.

compiler/rustc_thread_pool/src/broadcast/mod.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ use crate::job::{ArcJob, StackJob};
66
use crate::latch::{CountLatch, LatchRef};
77
use crate::registry::{Registry, WorkerThread};
88

9-
mod test;
9+
mod tests;
1010

1111
/// Executes `op` within every thread in the current threadpool. If this is
1212
/// called from a non-Rayon thread, it will execute in the global threadpool.
@@ -103,18 +103,18 @@ where
103103
};
104104

105105
let n_threads = registry.num_threads();
106-
let current_thread = WorkerThread::current().as_ref();
106+
let current_thread = unsafe { WorkerThread::current().as_ref() };
107107
let tlv = crate::tlv::get();
108108
let latch = CountLatch::with_count(n_threads, current_thread);
109109
let jobs: Vec<_> =
110110
(0..n_threads).map(|_| StackJob::new(tlv, &f, LatchRef::new(&latch))).collect();
111-
let job_refs = jobs.iter().map(|job| job.as_job_ref());
111+
let job_refs = jobs.iter().map(|job| unsafe { job.as_job_ref() });
112112

113113
registry.inject_broadcast(job_refs);
114114

115115
// Wait for all jobs to complete, then collect the results, maybe propagating a panic.
116116
latch.wait(current_thread);
117-
jobs.into_iter().map(|job| job.into_result()).collect()
117+
jobs.into_iter().map(|job| unsafe { job.into_result() }).collect()
118118
}
119119

120120
/// Execute `op` on every thread in the pool. It will be executed on each

compiler/rustc_thread_pool/src/compile_fail/quicksort_race1.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
77
88
let mid = partition(v);
99
let (lo, _hi) = v.split_at_mut(mid);
10-
rustc_thred_pool::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR
10+
rustc_thread_pool::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR
1111
}
1212
1313
fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {

compiler/rustc_thread_pool/src/compile_fail/quicksort_race2.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
77
88
let mid = partition(v);
99
let (lo, _hi) = v.split_at_mut(mid);
10-
rustc_thred_pool::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR
10+
rustc_thread_pool::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR
1111
}
1212
1313
fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {

compiler/rustc_thread_pool/src/compile_fail/quicksort_race3.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
77
88
let mid = partition(v);
99
let (_lo, hi) = v.split_at_mut(mid);
10-
rustc_thred_pool::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR
10+
rustc_thread_pool::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR
1111
}
1212
1313
fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {

compiler/rustc_thread_pool/src/compile_fail/rc_return.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
33
use std::rc::Rc;
44
5-
rustc_thred_pool::join(|| Rc::new(22), || ()); //~ ERROR
5+
rustc_thread_pool::join(|| Rc::new(22), || ()); //~ ERROR
66
77
``` */
88
mod left {}
@@ -11,7 +11,7 @@ mod left {}
1111
1212
use std::rc::Rc;
1313
14-
rustc_thred_pool::join(|| (), || Rc::new(23)); //~ ERROR
14+
rustc_thread_pool::join(|| (), || Rc::new(23)); //~ ERROR
1515
1616
``` */
1717
mod right {}

compiler/rustc_thread_pool/src/compile_fail/rc_upvar.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
use std::rc::Rc;
44
55
let r = Rc::new(22);
6-
rustc_thred_pool::join(|| r.clone(), || r.clone());
6+
rustc_thread_pool::join(|| r.clone(), || r.clone());
77
//~^ ERROR
88
99
``` */

compiler/rustc_thread_pool/src/compile_fail/scope_join_bad.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
fn bad_scope<F>(f: F)
44
where F: FnOnce(&i32) + Send,
55
{
6-
rustc_thred_pool::scope(|s| {
6+
rustc_thread_pool::scope(|s| {
77
let x = 22;
88
s.spawn(|_| f(&x)); //~ ERROR `x` does not live long enough
99
});
@@ -13,7 +13,7 @@ fn good_scope<F>(f: F)
1313
where F: FnOnce(&i32) + Send,
1414
{
1515
let x = 22;
16-
rustc_thred_pool::scope(|s| {
16+
rustc_thread_pool::scope(|s| {
1717
s.spawn(|_| f(&x));
1818
});
1919
}

compiler/rustc_thread_pool/src/job.rs

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ impl JobRef {
6161

6262
#[inline]
6363
pub(super) unsafe fn execute(self) {
64-
(self.execute_fn)(self.pointer)
64+
unsafe { (self.execute_fn)(self.pointer) }
6565
}
6666
}
6767

@@ -97,7 +97,7 @@ where
9797
}
9898

9999
pub(super) unsafe fn as_job_ref(&self) -> JobRef {
100-
JobRef::new(self)
100+
unsafe { JobRef::new(self) }
101101
}
102102

103103
pub(super) unsafe fn run_inline(self, stolen: bool) -> R {
@@ -116,12 +116,16 @@ where
116116
R: Send,
117117
{
118118
unsafe fn execute(this: *const ()) {
119-
let this = &*(this as *const Self);
119+
let this = unsafe { &*(this as *const Self) };
120120
tlv::set(this.tlv);
121121
let abort = unwind::AbortIfPanic;
122-
let func = (*this.func.get()).take().unwrap();
123-
(*this.result.get()) = JobResult::call(func);
124-
Latch::set(&this.latch);
122+
let func = unsafe { (*this.func.get()).take().unwrap() };
123+
unsafe {
124+
(*this.result.get()) = JobResult::call(func);
125+
}
126+
unsafe {
127+
Latch::set(&this.latch);
128+
}
125129
mem::forget(abort);
126130
}
127131
}
@@ -152,7 +156,7 @@ where
152156
/// lifetimes, so it is up to you to ensure that this JobRef
153157
/// doesn't outlive any data that it closes over.
154158
pub(super) unsafe fn into_job_ref(self: Box<Self>) -> JobRef {
155-
JobRef::new(Box::into_raw(self))
159+
unsafe { JobRef::new(Box::into_raw(self)) }
156160
}
157161

158162
/// Creates a static `JobRef` from this job.
@@ -169,7 +173,7 @@ where
169173
BODY: FnOnce() + Send,
170174
{
171175
unsafe fn execute(this: *const ()) {
172-
let this = Box::from_raw(this as *mut Self);
176+
let this = unsafe { Box::from_raw(this as *mut Self) };
173177
tlv::set(this.tlv);
174178
(this.job)();
175179
}
@@ -196,7 +200,7 @@ where
196200
/// lifetimes, so it is up to you to ensure that this JobRef
197201
/// doesn't outlive any data that it closes over.
198202
pub(super) unsafe fn as_job_ref(this: &Arc<Self>) -> JobRef {
199-
JobRef::new(Arc::into_raw(Arc::clone(this)))
203+
unsafe { JobRef::new(Arc::into_raw(Arc::clone(this))) }
200204
}
201205

202206
/// Creates a static `JobRef` from this job.
@@ -213,7 +217,7 @@ where
213217
BODY: Fn() + Send + Sync,
214218
{
215219
unsafe fn execute(this: *const ()) {
216-
let this = Arc::from_raw(this as *mut Self);
220+
let this = unsafe { Arc::from_raw(this as *mut Self) };
217221
(this.job)();
218222
}
219223
}
@@ -254,17 +258,17 @@ impl JobFifo {
254258
// jobs in a thread's deque may be popped from the back (LIFO) or stolen from the front
255259
// (FIFO), but either way they will end up popping from the front of this queue.
256260
self.inner.push(job_ref);
257-
JobRef::new(self)
261+
unsafe { JobRef::new(self) }
258262
}
259263
}
260264

261265
impl Job for JobFifo {
262266
unsafe fn execute(this: *const ()) {
263267
// We "execute" a queue by executing its first job, FIFO.
264-
let this = &*(this as *const Self);
268+
let this = unsafe { &*(this as *const Self) };
265269
loop {
266270
match this.inner.steal() {
267-
Steal::Success(job_ref) => break job_ref.execute(),
271+
Steal::Success(job_ref) => break unsafe { job_ref.execute() },
268272
Steal::Empty => panic!("FIFO is empty"),
269273
Steal::Retry => {}
270274
}

compiler/rustc_thread_pool/src/join/mod.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use crate::tlv::{self, Tlv};
77
use crate::{FnContext, unwind};
88

99
#[cfg(test)]
10-
mod test;
10+
mod tests;
1111

1212
/// Takes two closures and *potentially* runs them in parallel. It
1313
/// returns a pair of the results from those closures.
@@ -41,7 +41,7 @@ mod test;
4141
/// [the `par_sort` method]: ../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
4242
///
4343
/// ```rust
44-
/// # use rustc_thred_pool as rayon;
44+
/// # use rustc_thread_pool as rayon;
4545
/// let mut v = vec![5, 1, 8, 22, 0, 44];
4646
/// quick_sort(&mut v);
4747
/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
@@ -192,7 +192,7 @@ unsafe fn join_recover_from_panic(
192192
err: Box<dyn Any + Send>,
193193
tlv: Tlv,
194194
) -> ! {
195-
worker_thread.wait_until(job_b_latch);
195+
unsafe { worker_thread.wait_until(job_b_latch) };
196196

197197
// Restore the TLV since we might have run some jobs overwriting it when waiting for job b.
198198
tlv::set(tlv);

compiler/rustc_thread_pool/src/latch.rs

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ impl CoreLatch {
116116
/// latch code.
117117
#[inline]
118118
unsafe fn set(this: *const Self) -> bool {
119-
let old_state = (*this).state.swap(SET, Ordering::AcqRel);
119+
let old_state = unsafe { (*this).state.swap(SET, Ordering::AcqRel) };
120120
old_state == SLEEPING
121121
}
122122

@@ -185,26 +185,26 @@ impl<'r> Latch for SpinLatch<'r> {
185185
unsafe fn set(this: *const Self) {
186186
let cross_registry;
187187

188-
let registry: &Registry = if (*this).cross {
188+
let registry: &Registry = if unsafe { (*this).cross } {
189189
// Ensure the registry stays alive while we notify it.
190190
// Otherwise, it would be possible that we set the spin
191191
// latch and the other thread sees it and exits, causing
192192
// the registry to be deallocated, all before we get a
193193
// chance to invoke `registry.notify_worker_latch_is_set`.
194-
cross_registry = Arc::clone((*this).registry);
194+
cross_registry = Arc::clone(unsafe { (*this).registry });
195195
&cross_registry
196196
} else {
197197
// If this is not a "cross-registry" spin-latch, then the
198198
// thread which is performing `set` is itself ensuring
199199
// that the registry stays alive. However, that doesn't
200200
// include this *particular* `Arc` handle if the waiting
201201
// thread then exits, so we must completely dereference it.
202-
(*this).registry
202+
unsafe { (*this).registry }
203203
};
204-
let target_worker_index = (*this).target_worker_index;
204+
let target_worker_index = unsafe { (*this).target_worker_index };
205205

206206
// NOTE: Once we `set`, the target may proceed and invalidate `this`!
207-
if CoreLatch::set(&(*this).core_latch) {
207+
if unsafe { CoreLatch::set(&(*this).core_latch) } {
208208
// Subtle: at this point, we can no longer read from
209209
// `self`, because the thread owning this spin latch may
210210
// have awoken and deallocated the latch. Therefore, we
@@ -249,9 +249,9 @@ impl LockLatch {
249249
impl Latch for LockLatch {
250250
#[inline]
251251
unsafe fn set(this: *const Self) {
252-
let mut guard = (*this).m.lock().unwrap();
252+
let mut guard = unsafe { (*this).m.lock().unwrap() };
253253
*guard = true;
254-
(*this).v.notify_all();
254+
unsafe { (*this).v.notify_all() };
255255
}
256256
}
257257

@@ -286,7 +286,7 @@ impl OnceLatch {
286286
registry: &Registry,
287287
target_worker_index: usize,
288288
) {
289-
if CoreLatch::set(&(*this).core_latch) {
289+
if unsafe { CoreLatch::set(&(*this).core_latch) } {
290290
registry.notify_worker_latch_is_set(target_worker_index);
291291
}
292292
}
@@ -384,17 +384,17 @@ impl CountLatch {
384384
impl Latch for CountLatch {
385385
#[inline]
386386
unsafe fn set(this: *const Self) {
387-
if (*this).counter.fetch_sub(1, Ordering::SeqCst) == 1 {
387+
if unsafe { (*this).counter.fetch_sub(1, Ordering::SeqCst) == 1 } {
388388
// NOTE: Once we call `set` on the internal `latch`,
389389
// the target may proceed and invalidate `this`!
390-
match (*this).kind {
391-
CountLatchKind::Stealing { ref latch, ref registry, worker_index } => {
390+
match unsafe { &(*this).kind } {
391+
CountLatchKind::Stealing { latch, registry, worker_index } => {
392392
let registry = Arc::clone(registry);
393-
if CoreLatch::set(latch) {
394-
registry.notify_worker_latch_is_set(worker_index);
393+
if unsafe { CoreLatch::set(latch) } {
394+
registry.notify_worker_latch_is_set(*worker_index);
395395
}
396396
}
397-
CountLatchKind::Blocking { ref latch } => LockLatch::set(latch),
397+
CountLatchKind::Blocking { latch } => unsafe { LockLatch::set(latch) },
398398
}
399399
}
400400
}
@@ -426,6 +426,6 @@ impl<L> Deref for LatchRef<'_, L> {
426426
impl<L: Latch> Latch for LatchRef<'_, L> {
427427
#[inline]
428428
unsafe fn set(this: *const Self) {
429-
L::set((*this).inner);
429+
unsafe { L::set((*this).inner) };
430430
}
431431
}

compiler/rustc_thread_pool/src/lib.rs

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
//! conflicting requirements will need to be resolved before the build will
6262
//! succeed.
6363
64+
#![cfg_attr(test, allow(unused_crate_dependencies))]
6465
#![warn(rust_2018_idioms)]
6566

6667
use std::any::Any;
@@ -85,7 +86,7 @@ mod unwind;
8586
mod worker_local;
8687

8788
mod compile_fail;
88-
mod test;
89+
mod tests;
8990

9091
pub mod tlv;
9192

@@ -152,14 +153,14 @@ enum ErrorKind {
152153
/// The following creates a thread pool with 22 threads.
153154
///
154155
/// ```rust
155-
/// # use rustc_thred_pool as rayon;
156+
/// # use rustc_thread_pool as rayon;
156157
/// let pool = rayon::ThreadPoolBuilder::new().num_threads(22).build().unwrap();
157158
/// ```
158159
///
159160
/// To instead configure the global thread pool, use [`build_global()`]:
160161
///
161162
/// ```rust
162-
/// # use rustc_thred_pool as rayon;
163+
/// # use rustc_thread_pool as rayon;
163164
/// rayon::ThreadPoolBuilder::new().num_threads(22).build_global().unwrap();
164165
/// ```
165166
///
@@ -315,7 +316,7 @@ impl ThreadPoolBuilder {
315316
/// A scoped pool may be useful in combination with scoped thread-local variables.
316317
///
317318
/// ```
318-
/// # use rustc_thred_pool as rayon;
319+
/// # use rustc_thread_pool as rayon;
319320
///
320321
/// scoped_tls::scoped_thread_local!(static POOL_DATA: Vec<i32>);
321322
///
@@ -382,7 +383,7 @@ impl<S> ThreadPoolBuilder<S> {
382383
/// A minimal spawn handler just needs to call `run()` from an independent thread.
383384
///
384385
/// ```
385-
/// # use rustc_thred_pool as rayon;
386+
/// # use rustc_thread_pool as rayon;
386387
/// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
387388
/// let pool = rayon::ThreadPoolBuilder::new()
388389
/// .spawn_handler(|thread| {
@@ -400,7 +401,7 @@ impl<S> ThreadPoolBuilder<S> {
400401
/// any errors from the thread builder.
401402
///
402403
/// ```
403-
/// # use rustc_thred_pool as rayon;
404+
/// # use rustc_thread_pool as rayon;
404405
/// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
405406
/// let pool = rayon::ThreadPoolBuilder::new()
406407
/// .spawn_handler(|thread| {
@@ -429,7 +430,7 @@ impl<S> ThreadPoolBuilder<S> {
429430
/// [`std::thread::scope`]: https://doc.rust-lang.org/std/thread/fn.scope.html
430431
///
431432
/// ```
432-
/// # use rustc_thred_pool as rayon;
433+
/// # use rustc_thread_pool as rayon;
433434
/// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
434435
/// std::thread::scope(|scope| {
435436
/// let pool = rayon::ThreadPoolBuilder::new()

0 commit comments

Comments
 (0)