Skip to content
Open
18 changes: 14 additions & 4 deletions include/eurydice/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,31 +173,37 @@ static inline size_t core_num__usize__wrapping_mul(size_t x, size_t y) {
}

static inline uint64_t core_num__u64__rotate_left(uint64_t x0, uint32_t x1) {
assert(x1 < 64);
return (x0 << x1) | (x0 >> ((-x1) & 63));
}

static inline uint32_t core_num__u32__rotate_left(uint32_t x0, uint32_t x1) {
assert(x1 < 32);
return (x0 << x1) | (x0 >> ((-x1) & 31));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if this function can be called with x1 == 32 (and I think it can), then your implementation exercises undefined behavior in C (which is bad)

for instance, a well-defined Rust program such as fn main () { assert_eq!(1u32.rotate_left(64), 1) } will generate undefined behavior here

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, is it because of the <<? Would it help to assert(x1 < 32)?

And the same would apply to core_num__u64__rotate_left as well, I assume (where x1 should be less than 64).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, is it because of the <<?

yes, shifting by widths greater or equal than the bit width of the type is undefined behavior in C

consider the following Rust program:

fn main() {
    assert_eq!(1u32.rotate_left(32), 1);
}

if you compile and run it in Rust, you get a successful execution

Would it help to assert(x1 < 32)?

Unfortunately no, because if you compile the program above to C via Eurydice, using your suggestion of adding an assert, the program will start crashing.

And the same would apply to core_num__u64__rotate_left as well, I assume (where x1 should be less than 64).

You cannot assume that x1 is < 64 because callers are legally allowed to pick x1 == 64; instead, the implementation must be able to deal with any x1 (because Rust's rotate_left behaves that way).

There are way to implement a rotate macro efficiently for cryptographic libraries. I think there's one called rotl in OpenSSL. Let's see what they do.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's also an intrinsic apparently on some toolchains/platforms: https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/rotl-rotl64-rotr-rotr64?view=msvc-170

}

static inline void core_ops_arith__i32__add_assign(int32_t *x0, int32_t *x1) {
*x0 = *x0 + *x1;
}

static inline uint8_t Eurydice_bitand_pv_u8(uint8_t *p, uint8_t v) {
static inline uint8_t Eurydice_bitand_pv_u8(const uint8_t *p, uint8_t v) {
return (*p) & v;
}
static inline uint8_t Eurydice_shr_pv_u8(uint8_t *p, int32_t v) {
static inline uint8_t Eurydice_shr_pv_u8(const uint8_t *p, int32_t v) {
return (*p) >> v;
}
static inline uint32_t Eurydice_min_u32(uint32_t x, uint32_t y) {
return x < y ? x : y;
}

static inline uint8_t
core_ops_bit__core__ops__bit__BitAnd_u8__u8__for___a__u8___bitand(uint8_t *x0,
core_ops_bit__core__ops__bit__BitAnd_u8__u8__for__0__u8___bitand(const uint8_t *x0,
uint8_t x1) {
return Eurydice_bitand_pv_u8(x0, x1);
}

static inline uint8_t
core_ops_bit__core__ops__bit__Shr_i32__u8__for___a__u8___shr(uint8_t *x0,
core_ops_bit__core__ops__bit__Shr_i32__u8__for__0__u8___shr(const uint8_t *x0,
int32_t x1) {
return Eurydice_shr_pv_u8(x0, x1);
}
Expand All @@ -209,6 +215,10 @@ core_num_nonzero_private___core__clone__Clone_for_core__num__nonzero__private__N
return *x0;
}

#define core_option__core__option__Option_T__TraitClause_0___is_some(X, _0, \
_1) \
((X)->tag == 1)

#if defined(__cplusplus) && !defined(KRML_CXX17_COMPAT)
}
#endif
Expand Down
8 changes: 8 additions & 0 deletions include/eurydice/slice.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

#include <inttypes.h>
#include <stdlib.h>
#include <string.h>

// SLICES, ARRAYS, ETC.

Expand Down Expand Up @@ -42,6 +43,9 @@ typedef struct Eurydice_mut_borrow_slice_i16_s {
(KRML_CLITERAL(ret_t){EURYDICE_CFIELD(.ptr =)(ptr_)->data, \
EURYDICE_CFIELD(.meta =) len_})

#define core_array___Array_T__N___as_mut_slice(len_, ptr_, t, ret_t) \
core_array___Array_T__N___as_slice(len_, ptr_, t, ret_t)

#define core_array__core__clone__Clone_for__Array_T__N___clone( \
len, src, elem_type, _ret_t) \
(*(src))
Expand All @@ -59,6 +63,10 @@ typedef struct Eurydice_mut_borrow_slice_i16_s {
#define Eurydice_array_eq_slice_mut(sz, a1, s2, t, _) \
Eurydice_array_eq_slice_shared(sz, a1, s2, t, _)

#define Eurydice_slice_eq_shared(s1, s2, t, _) \
((s1)->meta == (s2)->meta && \
memcmp((s1)->ptr, (s2)->ptr, (s1)->meta * sizeof(t)) == 0)

// DEPRECATED -- should no longer be generated
#define core_array_equality__core__cmp__PartialEq__Array_U__N___for__Array_T__N___eq( \
sz, a1, a2, t, _, _ret_t) \
Expand Down
Loading