Skip to content

Commit

Permalink
Put some more pieces into place
Browse files Browse the repository at this point in the history
  • Loading branch information
spacejam committed Mar 15, 2021
1 parent d3e053b commit 96927dc
Show file tree
Hide file tree
Showing 31 changed files with 6,121 additions and 0 deletions.
5 changes: 5 additions & 0 deletions build.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
extern crate lalrpop;

fn main() {
lalrpop::process_root().unwrap();
}
58 changes: 58 additions & 0 deletions src/args.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
const USAGE: &str = "Usage: puke [--max-machines=<#>] [--reorder] [--fail-io] [module file.pk]
Falls back to repl if no module is provided.
Options:
--max-machines=<#> Maximum concurrency bound [default: number of cores * 1.2].
--scramble Scramble in-flight messages and scheduling orders.
--fail-io=<#> Causes random IO failures on 1 out of every N IO operations.
--fuzz=<corpus path> Feeds the provided module fuzzed inputs derived from the provided corpus directory.
";

/// Args for the puke `Interpreter`.
#[derive(Debug, Clone)]
pub struct Args {
pub max_machines: usize,
pub scramble: bool,
pub fail_io: Option<usize>,
pub fuzz: Option<String>,
pub module: Option<String>,
}

impl Default for Args {
fn default() -> Args {
Args {
max_machines: ((num_cpus::get_physical() as f64) * 1.2) as usize,
scramble: false,
fail_io: None,
fuzz: None,
module: None,
}
}
}

fn parse<'a, I, T>(mut iter: I) -> T
where
I: Iterator<Item = &'a str>,
T: std::str::FromStr,
<T as std::str::FromStr>::Err: std::fmt::Debug,
{
iter.next().expect(USAGE).parse().expect(USAGE)
}

impl Args {
pub fn parse() -> Args {
let mut args = Args::default();
for raw_arg in std::env::args().skip(1) {
let mut splits = raw_arg[2..].split('=');
match splits.next().unwrap() {
"max-machines" => args.max_machines = parse(&mut splits),
"scramble" => args.scramble = true,
"fail-io" => args.fail_io = Some(parse(&mut splits)),
"fuzz" => args.fuzz = Some(parse(&mut splits)),
other => panic!("unknown option: {}, {}", other, USAGE),
}
}
args
}
}
56 changes: 56 additions & 0 deletions src/ast.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
pub struct Module {
behavior: Behavior,
items: Vec<Item>,
}

pub struct Struct {
name: String,
fields: Vec<Field>,
}

pub struct Enum {
name: String,
variants: Vec<Struct>,
}

pub struct Field {
pub name: String,
pub ty: Type,
}

pub struct Type;

pub struct Function {
subfunctions: Vec<SubFunction>,
}

pub struct SubFunction {
// implicit arity in arguments length
name: String,
ret: Type,
arguments: Vec<Field>,
statements: Vec<Statement>,
}

pub enum Statement {
Assign(String, Expression),
Expression(Expression),
}

pub enum Expression {
Call,
Case,
}

pub enum Item {
Import { path: String },
Export { name: String, arity: usize },
Struct(Struct),
Function(Function),
}

pub enum Behavior {
Http,
Grpc,
StateMachine,
}
66 changes: 66 additions & 0 deletions src/cache_padded.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
/// Vendored and simplified from crossbeam-utils.
use core::fmt;
use core::ops::{Deref, DerefMut};

// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
// lines at a time, so we have to align to 128 bytes rather than 64.
//
// Sources:
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
//
// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128 byte cache line size
// Sources:
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
//
#[cfg_attr(
any(target_arch = "x86_64", target_arch = "aarch64"),
repr(align(128))
)]
#[cfg_attr(
not(any(target_arch = "x86_64", target_arch = "aarch64")),
repr(align(64))
)]
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
pub struct CachePadded<T> {
value: T,
}

#[allow(unsafe_code)]
unsafe impl<T: Send> Send for CachePadded<T> {}

#[allow(unsafe_code)]
unsafe impl<T: Sync> Sync for CachePadded<T> {}

impl<T> CachePadded<T> {
/// Pads and aligns a value to the length of a cache line.
pub const fn new(t: T) -> CachePadded<T> {
CachePadded::<T> { value: t }
}
}

impl<T> Deref for CachePadded<T> {
type Target = T;

fn deref(&self) -> &T {
&self.value
}
}

impl<T> DerefMut for CachePadded<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}

impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CachePadded").field("value", &self.value).finish()
}
}

impl<T> From<T> for CachePadded<T> {
fn from(t: T) -> Self {
CachePadded::new(t)
}
}
105 changes: 105 additions & 0 deletions src/debug_delay.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#![allow(clippy::float_arithmetic)]

use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};

use crate::Lazy;

/// This function is useful for inducing random jitter into our atomic
/// operations, shaking out more possible interleavings quickly. It gets
/// fully eliminated by the compiler in non-test code.
pub fn debug_delay() {
use std::thread;
use std::time::Duration;

static GLOBAL_DELAYS: AtomicUsize = AtomicUsize::new(0);

static INTENSITY: Lazy<u32, fn() -> u32> = Lazy::new(|| {
std::env::var("SLED_LOCK_FREE_DELAY_INTENSITY")
.unwrap_or_else(|_| "100".into())
.parse()
.expect(
"SLED_LOCK_FREE_DELAY_INTENSITY must be set to a \
non-negative integer (ideally below 1,000,000)",
)
});

static CRASH_CHANCE: Lazy<u32, fn() -> u32> = Lazy::new(|| {
std::env::var("SLED_CRASH_CHANCE")
.unwrap_or_else(|_| "0".into())
.parse()
.expect(
"SLED_CRASH_CHANCE must be set to a \
non-negative integer (ideally below 50,000)",
)
});

thread_local!(
static LOCAL_DELAYS: std::cell::RefCell<usize> = std::cell::RefCell::new(0)
);

if cfg!(feature = "miri_optimizations") {
// Each interaction with LOCAL_DELAYS adds more stacked borrows
// tracking information, and Miri is single-threaded anyway.
return;
}

let global_delays = GLOBAL_DELAYS.fetch_add(1, Relaxed);
let local_delays = LOCAL_DELAYS.with(|ld| {
let mut ld = ld.borrow_mut();
let old = *ld;
*ld = std::cmp::max(global_delays + 1, *ld + 1);
old
});

if *CRASH_CHANCE > 0 && random(*CRASH_CHANCE) == 0 {
std::process::exit(9)
}

if global_delays == local_delays {
// no other threads seem to be
// calling this, so we may as
// well skip it
return;
}

if random(1000) == 1 {
let duration = random(*INTENSITY);

#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
thread::sleep(Duration::from_micros(u64::from(duration)));
}

if random(2) == 0 {
thread::yield_now();
}
}

/// Generates a random number in `0..n`.
fn random(n: u32) -> u32 {
use std::cell::Cell;
use std::num::Wrapping;

thread_local! {
static RNG: Cell<Wrapping<u32>> = Cell::new(Wrapping(1_406_868_647));
}

#[allow(clippy::cast_possible_truncation)]
RNG.try_with(|rng| {
// This is the 32-bit variant of Xorshift.
//
// Source: https://en.wikipedia.org/wiki/Xorshift
let mut x = rng.get();
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
rng.set(x);

// This is a fast alternative to `x % n`.
//
// Author: Daniel Lemire
// Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
(u64::from(x.0).wrapping_mul(u64::from(n)) >> 32) as u32
})
.unwrap_or(0)
}
Loading

0 comments on commit 96927dc

Please sign in to comment.