Remove C interface for now pending a full rewrite, adds address sanitizer to CI.
continuous-integration/drone/push Build is passing Details

This commit is contained in:
Deukhoofd 2022-06-16 18:25:25 +02:00
parent ff541b0696
commit bd646df225
Signed by: Deukhoofd
GPG Key ID: F63E044490819F6F
11 changed files with 11 additions and 895 deletions

View File

@ -20,6 +20,15 @@ steps:
- cargo miri test --release --color=always
depends_on:
- test-debug-linux
- name: test-address-sanitizer
image: deukhoofd/linux64builder
environment:
RUSTFLAGS: -Zsanitizer=address
RUSTDOCFLAGS: -Zsanitizer=address
commands:
- cargo test -Zbuild-std --target x86_64-unknown-linux-gnu --color=always
depends_on:
- test-debug-linux
- name: test-coverage
image: deukhoofd/linux64builder
commands:

View File

@ -1,3 +1,5 @@
cargo-features = ["profile-rustflags"]
[package]
name = "pkmn_lib"
version = "0.1.0"
@ -46,8 +48,6 @@ chrono = "0.4.19"
# Used for RNG
rand = "0.8.5"
rand_pcg = "0.3.1"
failure = "0.1.8"
failure_derive = "0.1.8"
hashbrown = "0.12.1"
indexmap = "1.8.2"
parking_lot = "0.12.1"

View File

@ -1,209 +0,0 @@
#![allow(dead_code)]
use std::{
marker::PhantomData,
panic::{RefUnwindSafe, UnwindSafe},
ptr, slice,
};
pub(crate) mod thread_bound;
use self::thread_bound::ThreadBound;
use super::is_null::IsNull;
/*
The handles here are wrappers for a shared `&T` and an exclusive `&mut T`.
They protect from data races, but don't protect from use-after-free bugs.
The caller is expected to maintain that invariant, which in .NET can be
achieved using `SafeHandle`s.
*/
/**
A shared handle that can be accessed concurrently by multiple threads.
The interior value can be treated like `&T`.
Consumers must ensure a handle is not used again after it has been deallocated.
*/
#[repr(transparent)]
pub struct HandleShared<'a, T: ?Sized>(*const T, PhantomData<&'a T>);
unsafe_impl!("The handle is semantically `&T`" => impl<'a, T: ?Sized> Send for HandleShared<'a, T> where &'a T: Send {});
unsafe_impl!("The handle is semantically `&T`" => impl<'a, T: ?Sized> Sync for HandleShared<'a, T> where &'a T: Sync {});
impl<'a, T: ?Sized + RefUnwindSafe> UnwindSafe for HandleShared<'a, T> {}
impl<'a, T> HandleShared<'a, T>
where
HandleShared<'a, T>: Send + Sync,
{
pub(super) fn alloc(value: T) -> Self
where
T: 'static,
{
let v = Box::new(value);
HandleShared(Box::into_raw(v), PhantomData)
}
pub(super) fn as_ref(&self) -> &T {
unsafe_block!("We own the interior value" => &*self.0)
}
unsafe_fn!("There are no other live references and the handle won't be used again" =>
pub(super) fn dealloc<R>(handle: Self, f: impl FnOnce(T) -> R) -> R {
let v = Box::from_raw(handle.0 as *mut T);
f(*v)
});
}
/**
A non-shared handle that cannot be accessed by multiple threads.
The interior value can be treated like `&mut T`.
The handle is bound to the thread that it was created on to ensure
there's no possibility for data races. Note that, if reverse PInvoke is supported
then it's possible to mutably alias the handle from the same thread if the reverse
call can re-enter the FFI using the same handle. This is technically undefined behaviour.
The handle _can_ be deallocated from a different thread than the one that created it.
Consumers must ensure a handle is not used again after it has been deallocated.
*/
#[repr(transparent)]
pub struct HandleExclusive<'a, T: ?Sized>(*mut ThreadBound<T>, PhantomData<&'a mut T>);
unsafe_impl!("The handle is semantically `&mut T`" => impl<'a, T: ?Sized> Send for HandleExclusive<'a, T> where &'a mut ThreadBound<T>: Send {});
unsafe_impl!("The handle uses `ThreadBound` for synchronization" => impl<'a, T: ?Sized> Sync for HandleExclusive<'a, T> where &'a mut ThreadBound<T>: Sync {});
impl<'a, T: ?Sized + RefUnwindSafe> UnwindSafe for HandleExclusive<'a, T> {}
impl<'a, T> HandleExclusive<'a, T>
where
HandleExclusive<'a, T>: Send + Sync,
{
pub(super) fn alloc(value: T) -> Self
where
T: 'static,
{
let v = Box::new(ThreadBound::new(value));
HandleExclusive(Box::into_raw(v), PhantomData)
}
pub(super) fn as_mut(&mut self) -> &mut T {
unsafe_block!("We own the interior value" => &mut *(*self.0).get_raw())
}
unsafe_fn!("There are no other live references and the handle won't be used again" =>
pub(super) fn dealloc<R>(handle: Self, f: impl FnOnce(T) -> R) -> R
where
T: Send,
{
let v = Box::from_raw(handle.0);
f(v.into_inner())
});
}
/**
An initialized parameter passed by shared reference.
*/
#[repr(transparent)]
pub struct Ref<'a, T: ?Sized>(*const T, PhantomData<&'a T>);
impl<'a, T: ?Sized + RefUnwindSafe> UnwindSafe for Ref<'a, T> {}
unsafe_impl!("The handle is semantically `&mut T`" => impl<'a, T: ?Sized> Send for Ref<'a, T> where &'a T: Send {});
unsafe_impl!("The handle uses `ThreadBound` for synchronization" => impl<'a, T: ?Sized> Sync for Ref<'a, T> where &'a T: Sync {});
impl<'a, T: ?Sized> Ref<'a, T> {
unsafe_fn!("The pointer must be nonnull and will remain valid" => pub fn as_ref(&self) -> &T {
&*self.0
});
}
impl<'a> Ref<'a, u8> {
unsafe_fn!("The pointer must be nonnull, the length is correct, and will remain valid" => pub fn as_bytes(&self, len: usize) -> &[u8] {
slice::from_raw_parts(self.0, len)
});
}
/**
An initialized parameter passed by exclusive reference.
*/
#[repr(transparent)]
pub struct RefMut<'a, T: ?Sized>(*mut T, PhantomData<&'a mut T>);
impl<'a, T: ?Sized + RefUnwindSafe> UnwindSafe for RefMut<'a, T> {}
unsafe_impl!("The handle is semantically `&mut T`" => impl<'a, T: ?Sized> Send for RefMut<'a, T> where &'a mut T: Send {});
unsafe_impl!("The handle uses `ThreadBound` for synchronization" => impl<'a, T: ?Sized> Sync for RefMut<'a, T> where &'a mut T: Sync {});
impl<'a, T: ?Sized> RefMut<'a, T> {
unsafe_fn!("The pointer must be nonnull and will remain valid" => pub fn as_mut(&mut self) -> &mut T {
&mut *self.0
});
}
impl<'a> RefMut<'a, u8> {
unsafe_fn!("The pointer must be nonnull, the length is correct, and will remain valid" => pub fn as_bytes_mut(&mut self, len: usize) -> &mut [u8] {
slice::from_raw_parts_mut(self.0, len)
});
}
/**
An uninitialized, assignable out parameter.
*/
#[repr(transparent)]
pub struct Out<'a, T: ?Sized>(*mut T, PhantomData<&'a mut T>);
impl<'a, T: ?Sized + RefUnwindSafe> UnwindSafe for Out<'a, T> {}
unsafe_impl!("The handle is semantically `&mut T`" => impl<'a, T: ?Sized> Send for Out<'a, T> where &'a mut T: Send {});
unsafe_impl!("The handle uses `ThreadBound` for synchronization" => impl<'a, T: ?Sized> Sync for Out<'a, T> where &'a mut T: Sync {});
impl<'a, T> Out<'a, T> {
unsafe_fn!("The pointer must be nonnull and valid for writes" => pub fn init(&mut self, value: T) {
ptr::write(self.0, value);
});
}
impl<'a> Out<'a, u8> {
unsafe_fn!("The pointer must be nonnull, not overlap the slice, must be valid for the length of the slice, and valid for writes" => pub fn init_bytes(&mut self, value: &[u8]) {
ptr::copy_nonoverlapping(value.as_ptr(), self.0, value.len());
});
unsafe_fn!("The slice must never be read from and must be valid for the length of the slice" => pub fn as_uninit_bytes_mut(&mut self, len: usize) -> &mut [u8] {
slice::from_raw_parts_mut(self.0, len)
});
}
impl<'a, T: ?Sized> IsNull for HandleExclusive<'a, T> {
fn is_null(&self) -> bool {
self.0.is_null()
}
}
impl<'a, T: ?Sized + Sync> IsNull for HandleShared<'a, T> {
fn is_null(&self) -> bool {
self.0.is_null()
}
}
impl<'a, T: ?Sized> IsNull for Ref<'a, T> {
fn is_null(&self) -> bool {
self.0.is_null()
}
}
impl<'a, T: ?Sized + Sync> IsNull for RefMut<'a, T> {
fn is_null(&self) -> bool {
self.0.is_null()
}
}
impl<'a, T: ?Sized> IsNull for Out<'a, T> {
fn is_null(&self) -> bool {
self.0.is_null()
}
}

View File

@ -1,260 +0,0 @@
use std::{
cell::UnsafeCell,
collections::HashMap,
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
panic::{RefUnwindSafe, UnwindSafe},
sync::{
atomic::{AtomicUsize, Ordering},
Mutex,
},
};
type ThreadId = usize;
type ValueId = usize;
static GLOBAL_ID: AtomicUsize = AtomicUsize::new(0);
thread_local!(static THREAD_ID: usize = next_thread_id());
fn next_thread_id() -> usize {
GLOBAL_ID.fetch_add(1, Ordering::SeqCst)
}
fn get_thread_id() -> usize {
THREAD_ID.with(|x| *x)
}
thread_local!(static VALUE_ID: UnsafeCell<usize> = UnsafeCell::new(0));
fn next_value_id() -> usize {
VALUE_ID.with(|x| {
unsafe_block!("The value never has overlapping mutable aliases" => {
let x = x.get();
let next = *x;
*x += 1;
next
})
})
}
struct Registry(HashMap<ValueId, (UnsafeCell<*mut ()>, Box<dyn Fn(&UnsafeCell<*mut ()>)>)>);
impl Drop for Registry {
fn drop(&mut self) {
// Remove this thread from the garbage list
let thread_id = get_thread_id();
{
let mut garbage = GARBAGE.lock().expect("failed to lock garbage queue");
let _ = garbage.remove(&thread_id);
}
// Drop any remaining values in the registry
for (_, value) in self.0.iter() {
(value.1)(&value.0);
}
}
}
thread_local!(static REGISTRY: UnsafeCell<Registry> = UnsafeCell::new(Registry(Default::default())));
lazy_static::lazy_static! {
static ref GARBAGE: Mutex<HashMap<ThreadId, Vec<ValueId>>> = Mutex::new(HashMap::new());
}
/**
A value that's bound to the thread it's created on.
*/
pub struct ThreadBound<T: ?Sized> {
thread_id: ThreadId,
inner: UnsafeCell<T>,
}
impl<T> ThreadBound<T> {
pub(super) fn new(inner: T) -> Self {
ThreadBound {
thread_id: get_thread_id(),
inner: UnsafeCell::new(inner),
}
}
}
/*
We don't need to check the thread id when moving out of the inner
value so long as the inner value is itself `Send`. This allows
the .NET runtime to potentially finalize a value on another thread.
*/
impl<T: Send> ThreadBound<T> {
pub(super) fn into_inner(self) -> T {
self.inner.into_inner()
}
}
impl<T: ?Sized> ThreadBound<T> {
fn check(&self) {
let current = get_thread_id();
if self.thread_id != current {
panic!("attempted to access resource from a different thread");
}
}
pub(super) fn get_raw(&self) -> *mut T {
self.check();
self.inner.get()
}
}
impl<T: ?Sized + UnwindSafe> UnwindSafe for ThreadBound<T> {}
impl<T: ?Sized + RefUnwindSafe> RefUnwindSafe for ThreadBound<T> {}
unsafe_impl!("The inner value is safe to send to another thread" => impl<T: ?Sized + Send> Send for ThreadBound<T> {});
unsafe_impl!("The inner value can't actually be accessed concurrently" => impl<T: ?Sized> Sync for ThreadBound<T> {});
/**
A thread-bound value that can be safely dropped from a different thread.
The value is allocated in thread-local storage. When dropping, if the value
is being accessed from a different thread it will be put onto a garbage queue
for cleanup instead of being moved onto the current thread.
*/
// NOTE: We require `T: 'static` because the value may live as long
// as the current thread
pub(crate) struct DeferredCleanup<T: 'static> {
thread_id: ThreadId,
value_id: ValueId,
_m: PhantomData<*mut T>,
}
impl<T: 'static> Drop for DeferredCleanup<T> {
fn drop(&mut self) {
if mem::needs_drop::<T>() {
if self.is_valid() {
unsafe_block!("The value exists on the current thread" => {
self.into_inner_unchecked();
});
} else {
let mut garbage = GARBAGE.lock().expect("failed to lock garbage queue");
let garbage = garbage.entry(self.thread_id).or_insert_with(|| Vec::new());
garbage.push(self.value_id);
}
}
}
}
impl<T: 'static> DeferredCleanup<T> {
pub fn new(value: T) -> Self {
let thread_id = get_thread_id();
let value_id = next_value_id();
// Check for any garbage that needs cleaning up
// If we can't acquire a lock to the global queue
// then we just continue on.
let garbage = {
GARBAGE
.try_lock()
.ok()
.and_then(|mut garbage| garbage.remove(&thread_id))
};
if let Some(garbage) = garbage {
let remove = |value_id: ValueId| {
REGISTRY.with(|registry| {
unsafe_block!("The value never has overlapping mutable aliases" => {
let registry = &mut (*registry.get()).0;
registry.remove(&value_id)
})
})
};
for value_id in garbage {
if let Some((data, drop)) = remove(value_id) {
drop(&data);
}
}
}
REGISTRY.with(|registry| {
unsafe_block!("The value never has overlapping mutable aliases" => {
(*registry.get()).0.insert(
value_id,
(
UnsafeCell::new(Box::into_raw(Box::new(value)) as *mut _),
Box::new(|cell| {
let b: Box<T> = Box::from_raw(*(cell.get() as *mut *mut T));
mem::drop(b);
}),
),
);
})
});
DeferredCleanup {
thread_id,
value_id,
_m: PhantomData,
}
}
fn with_value<F: FnOnce(&UnsafeCell<Box<T>>) -> R, R>(&self, f: F) -> R {
let current_thread = get_thread_id();
if current_thread != self.thread_id {
panic!("attempted to access resource from a different thread");
}
REGISTRY.with(|registry| {
unsafe_block!("There are no active mutable references" => {
let registry = &(*registry.get()).0;
if let Some(item) = registry.get(&self.value_id) {
f(mem::transmute(&item.0))
} else {
panic!("attempted to access resource from a different thread");
}
})
})
}
fn is_valid(&self) -> bool {
let current_thread = get_thread_id();
let has_value = unsafe_block!("There are no active mutable references" => {
REGISTRY
.try_with(|registry| (*registry.get()).0.contains_key(&self.value_id))
.unwrap_or(false)
});
self.thread_id == current_thread && has_value
}
unsafe_fn!("The value must originate on the current thread" => fn into_inner_unchecked(&mut self) -> T {
let ptr = REGISTRY
.with(|registry| (*registry.get()).0.remove(&self.value_id))
.unwrap()
.0
.into_inner();
let value = Box::from_raw(ptr as *mut T);
*value
});
}
unsafe_impl!(
"The inner value is pinned to the current thread and isn't actually sent. \
Dropping from another thread will signal cleanup on the original" =>
impl<T: 'static> Send for DeferredCleanup<T> {});
impl<T: 'static> Deref for DeferredCleanup<T> {
type Target = T;
fn deref(&self) -> &T {
self.with_value(|value| unsafe_block!("The borrow of self protects the inner value" => &*value.get()))
}
}
impl<T: 'static> DerefMut for DeferredCleanup<T> {
fn deref_mut(&mut self) -> &mut T {
self.with_value(|value| unsafe_block!("The borrow of self protects the inner value" => &mut *value.get()))
}
}

View File

@ -1,55 +0,0 @@
use failure_derive::*;
#[derive(Debug, Fail)]
#[fail(display = "argument `{}` was null", arg)]
pub(super) struct Error {
pub(super) arg: &'static str,
}
/**
Whether or not a value passed across an FFI boundary is null.
*/
pub(super) trait IsNull {
fn is_null(&self) -> bool;
}
macro_rules! never_null {
($($t:ty),*) => {
$(
impl IsNull for $t {
fn is_null(&self) -> bool {
false
}
}
)*
}
}
impl<T: ?Sized> IsNull for *const T {
fn is_null(&self) -> bool {
<*const T>::is_null(*self)
}
}
impl<T: ?Sized> IsNull for *mut T {
fn is_null(&self) -> bool {
<*mut T>::is_null(*self)
}
}
never_null!(
usize,
isize,
u8,
u16,
u32,
u64,
u128,
i8,
i16,
i32,
i64,
i128,
crate::static_data::items::item_category::ItemCategory,
crate::static_data::items::item_category::BattleItemCategory
);

View File

@ -1,86 +0,0 @@
macro_rules! ffi {
($(fn $name:ident ( $( $arg_ident:ident : $arg_ty:ty),* ) -> PkmnResult $body:expr)*) => {
$(
#[allow(unsafe_code, unused_attributes)]
#[no_mangle]
pub unsafe extern "C" fn $name( $($arg_ident : $arg_ty),* ) -> PkmnResult {
#[allow(unused_mut)]
fn call( $(mut $arg_ident: $arg_ty),* ) -> PkmnResult {
$(
if $crate::c_interface::is_null::IsNull::is_null(&$arg_ident) {
return PkmnResult::argument_null().context($crate::c_interface::is_null::Error { arg: stringify!($arg_ident) });
}
)*
$body
PkmnResult::ok()
}
PkmnResult::catch(move || call( $($arg_ident),* ))
}
)*
};
}
// macro_rules! ffi_no_catch {
// ($(fn $name:ident ( $( $arg_ident:ident : $arg_ty:ty),* ) -> PkmnResult $body:expr)*) => {
// $(
// #[allow(unsafe_code, unused_attributes)]
// #[no_mangle]
// pub unsafe extern "cdecl" fn $name( $($arg_ident : $arg_ty),* ) -> PkmnResult {
// #[allow(unused_mut)]
// fn call( $(mut $arg_ident: $arg_ty),* ) -> PkmnResult {
// $(
// if $crate::c_interface::is_null::IsNull::is_null(&$arg_ident) {
// return PkmnResult::argument_null().context($crate::c_interface::is_null::Error { arg: stringify!($arg_ident) });
// }
// )*
//
// $body
// }
//
// call( $($arg_ident),* )
// }
// )*
// };
// }
/**
Allow a block of `unsafe` code with a reason.
The macro will expand to an `unsafe` block.
*/
macro_rules! unsafe_block {
($reason:tt => $body:expr) => {{
#[allow(unsafe_code)]
let __result = unsafe { $body };
__result
}};
}
/**
Allow an `unsafe` function with a reason.
The macro will expand to an `unsafe fn`.
*/
macro_rules! unsafe_fn {
($reason: tt => fn $name:ident $($body:tt)*) => {
unsafe_fn!($reason => pub(self) fn $name $($body)*);
};
($reason: tt => $publicity:vis fn $name:ident $($body:tt)*) => {
#[allow(unsafe_code)]
$publicity unsafe fn $name $($body)*
};
}
/**
Allow an `unsafe` trait implementation with a reason.
The macro will expand to an `unsafe impl`.
*/
macro_rules! unsafe_impl {
($reason: tt => impl $($body:tt)*) => {
#[allow(unsafe_code)]
unsafe impl $($body)*
};
}

View File

@ -1,7 +0,0 @@
#[macro_use]
mod macros;
pub(super) mod handle;
pub(super) mod is_null;
pub(super) mod pkmn_result;
mod static_data;

View File

@ -1,224 +0,0 @@
#![allow(dead_code)]
use failure::Fail;
use std::{
any::Any,
cell::RefCell,
fmt::Write,
panic::{catch_unwind, UnwindSafe},
sync::atomic::{AtomicU32, Ordering},
};
static LAST_ERR_ID: AtomicU32 = AtomicU32::new(0);
fn next_err_id() -> u32 {
LAST_ERR_ID.fetch_add(1, Ordering::SeqCst)
}
thread_local! {
static LAST_RESULT: RefCell<Option<LastResult>> = RefCell::new(None);
}
#[repr(C)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PkmnResult {
kind: Kind,
id: u32,
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Kind {
Ok,
ArgumentNull,
InternalError,
}
impl PkmnResult {
pub(super) fn ok() -> Self {
PkmnResult { kind: Kind::Ok, id: 0 }
}
pub(super) fn argument_null() -> Self {
PkmnResult {
kind: Kind::ArgumentNull,
id: next_err_id(),
}
}
pub(super) fn internal_error() -> Self {
PkmnResult {
kind: Kind::InternalError,
id: next_err_id(),
}
}
pub fn as_err(&self) -> Option<&'static str> {
match self.kind {
Kind::Ok => None,
Kind::ArgumentNull => Some("a required argument was null"),
Kind::InternalError => Some("an internal error occurred"),
}
}
pub(super) fn context(self, e: impl Fail) -> Self {
assert!(self.as_err().is_some(), "context can only be attached to errors");
let err = Some(format_error(&e));
LAST_RESULT.with(|last_result| {
*last_result.borrow_mut() = Some(LastResult { value: self, err });
});
self
}
pub(super) fn catch(f: impl FnOnce() -> Self + UnwindSafe) -> Self {
LAST_RESULT.with(|last_result| {
{
*last_result.borrow_mut() = None;
}
match catch_unwind(f) {
Ok(pkmn_result) => {
let extract_err = || pkmn_result.as_err().map(Into::into);
// Always set the last result so it matches what's returned.
// This `Ok` branch doesn't necessarily mean the result is ok,
// only that there wasn't a panic.
last_result
.borrow_mut()
.map_mut(|last_result| {
last_result.value = pkmn_result;
last_result.err.or_else_mut(extract_err);
})
.get_or_insert_with(|| LastResult {
value: pkmn_result,
err: extract_err(),
})
.value
}
Err(e) => {
let extract_panic = || extract_panic(&e).map(|s| format!("internal panic with '{}'", s));
// Set the last error to the panic message if it's not already set
last_result
.borrow_mut()
.map_mut(|last_result| {
last_result.err.or_else_mut(extract_panic);
})
.get_or_insert_with(|| LastResult {
value: PkmnResult::internal_error(),
err: extract_panic(),
})
.value
}
}
})
}
pub(super) fn with_last_result<R>(f: impl FnOnce(Option<(PkmnResult, Option<&str>)>) -> R) -> R {
LAST_RESULT.with(|last_result| {
let last_result = last_result.borrow();
let last_result = last_result.as_ref().map(|last_result| {
let msg = last_result
.value
.as_err()
.and_then(|_| last_result.err.as_ref().map(|msg| msg.as_ref()));
(last_result.value, msg)
});
f(last_result)
})
}
}
impl<E> From<E> for PkmnResult
where
E: Fail,
{
fn from(e: E) -> Self {
PkmnResult::internal_error().context(e)
}
}
#[derive(Debug)]
struct LastResult {
value: PkmnResult,
err: Option<String>,
}
fn format_error(err: &dyn Fail) -> String {
let mut error_string = String::new();
let mut causes = Some(err).into_iter().chain(err.iter_causes());
if let Some(cause) = causes.next() {
let _ = writeln!(error_string, "{}.", cause);
}
let mut next = causes.next();
while next.is_some() {
let cause = next.unwrap();
let _ = writeln!(error_string, " caused by: {}", cause);
next = causes.next();
}
if let Some(backtrace) = err.backtrace() {
let _ = writeln!(error_string, "backtrace: {}", backtrace);
}
error_string
}
fn extract_panic(err: &Box<dyn Any + Send + 'static>) -> Option<String> {
if let Some(err) = err.downcast_ref::<String>() {
Some(err.clone())
} else if let Some(err) = err.downcast_ref::<&'static str>() {
Some((*err).to_owned())
} else {
None
}
}
trait OptionMutExt<T> {
/**
Map and mutate an option in place.
*/
fn map_mut<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce(&mut T);
/**
Replace an option if it doesn't contain a value.
*/
fn or_else_mut<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce() -> Option<T>;
}
impl<T> OptionMutExt<T> for Option<T> {
fn map_mut<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce(&mut T),
{
if let Some(ref mut t) = *self {
f(t)
}
self
}
fn or_else_mut<F>(&mut self, f: F) -> &mut Self
where
F: FnOnce() -> Option<T>,
{
if self.is_none() {
*self = f();
}
self
}
}

View File

@ -1,48 +0,0 @@
use super::super::handle::Out;
use super::super::pkmn_result::PkmnResult;
use crate::c_interface::handle::{thread_bound, HandleExclusive};
use crate::static_data::items::item::Item;
use std::collections::HashSet;
use std::ffi::CStr;
use std::os::raw::c_char;
use std::slice;
use thread_bound::DeferredCleanup;
#[repr(C)]
pub struct ItemC {
inner: DeferredCleanup<Item>,
}
type ItemHandle<'a> = HandleExclusive<'a, ItemC>;
ffi! {
fn pkmnlib_item_new(
name: *const c_char,
category: u8,
battle_category: u8,
price: i32,
flags: *mut *const c_char,
flags_length: usize,
out: Out<ItemHandle>
) -> PkmnResult {
unsafe {
let v = slice::from_raw_parts(flags, flags_length as usize).to_vec();
let mut flags_map = HashSet::new();
for flag in v {
flags_map.insert(CStr::from_ptr(flag).to_str().unwrap().into());
}
let handle = ItemHandle::alloc(ItemC {
inner: thread_bound::DeferredCleanup::new(Item::new(
&CStr::from_ptr(name).to_str().unwrap().into(),
std::mem::transmute(category),
std::mem::transmute(battle_category),
price,
flags_map,
)),
});
out.init(handle);
}
}
}

View File

@ -1 +0,0 @@
mod item;

View File

@ -9,9 +9,6 @@ extern crate core;
use crate::dynamic_data::libraries::script_resolver::ScriptCategory;
#[cfg(feature = "c_interface")]
mod c_interface;
pub mod defines;
pub mod dynamic_data;
pub mod static_data;