-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathshared_buffer.rs
183 lines (159 loc) · 6 KB
/
shared_buffer.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
use std::os::fd::{AsRawFd, OwnedFd};
use std::ptr;
use std::sync::atomic::{AtomicI8, Ordering};
use std::ffi::c_void;
use nix::sys::mman::{mmap, munmap, shm_open, shm_unlink, MapFlags, ProtFlags};
use nix::sys::stat::Mode;
use nix::unistd::{close,ftruncate};
use crate::matrix;
/// A class to perform matrix interchange between processes.
///
/// This buffer uses shared memory as the mos efficient IPC within a
/// node. I use socket communication only for the initial connection.
/// After that initial "sync" the Server creates an instance of this
/// class and all the following communications between the two processes
/// use this shared memory region which is more than two times faster.
///
/// Using shared memory also avoids the bottle neck in the socket when
/// more than one client is connected.
///
/// The communicatio process is pretty simple. There is an atomic
/// memory region in the beginning of the buffer (ready_flag).
///
/// 0. When the server receives a connection request (in the socket).
/// It creates a new thread and that thread attempts to construct this buffer.
/// 1. The server's thread initializes the flag to FALSE after the reserve.
/// 2. Then informs the client (using the socket)
/// 3. The client with the id information creates a "mirror" buffer sharing the memory.
/// 4. The client sets the information in the payload and sets the flag to TRUE.
/// 5. When the server's thread finds that the flag is on true. It starts the
/// read process and writes back the information in the same place when done.
/// 6. The sets the flag to false again
#[derive(Debug)]
pub struct SharedBuffer<'a> {
shm_name: String,
id: i8,
rid: i8,
shm_full_size: usize,
shm_fd: OwnedFd,
ptr: ptr::NonNull<c_void>,
ready_flag: &'a AtomicI8,
payload: *mut c_void,
}
impl SharedBuffer<'_> {
/// Constructor
///
/// The constructor reserves a shared memory region and assumes
/// and initializes
pub fn new(id: i8, rid: i8, payload_size: usize) -> nix::Result<Self>
{
let shm_name: String = format!("/rust_ipc_shm_{}", std::cmp::max(id, rid));
let shm_full_size: usize = align_of::<u128>() + payload_size;
let shm_fd: OwnedFd;
if id == 0 {
shm_fd = shm_open(shm_name.as_str(), nix::fcntl::OFlag::O_CREAT | nix::fcntl::OFlag::O_RDWR, Mode::S_IRWXU)
.expect("Server failed to create shared memory");
ftruncate(&shm_fd, shm_full_size as i64).unwrap();
} else {
shm_fd = shm_open(shm_name.as_str(), nix::fcntl::OFlag::O_RDWR, nix::sys::stat::Mode::empty())
.expect("Client failed to open shared memory");
}
let ptr = unsafe {
mmap(
None,
std::num::NonZeroUsize::new(shm_full_size).unwrap(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_SHARED,
&shm_fd,
0,
).expect("Failed to map shared memory")
};
let ready_flag: &AtomicI8 = unsafe { &*(ptr.as_ptr() as *mut AtomicI8) };
let payload: *mut c_void = unsafe { ptr.byte_add(align_of::<u128>()).as_ptr() };
// Server initializes the flag on creation.
if id == 0 {
ready_flag.store(0, Ordering::SeqCst);
}
Ok(Self { shm_name, id, rid, shm_full_size, shm_fd, ptr, ready_flag, payload})
}
/// Get the buffer id (same as client id)
pub fn id(&self) -> i8
{
self.rid
}
/// Get the payload start address
pub fn payload(&self) -> *mut c_void
{
self.payload
}
/// Change the flag value to notify the peer we are done.
pub fn notify(&self) -> bool
{
let notified = self.ready_flag.compare_exchange(
self.rid, // Expected value
self.id, // New value
Ordering::SeqCst, // Success memory ordering
Ordering::SeqCst, // Failure memory ordering
);
match notified {
Ok(_) => true,
Err(_) => false
}
}
/// Change the flag value to notify the peer we are done.
pub fn notify_action(&self, action: i8)
{
assert!(action < 0, "Action notification value must be negative");
self.ready_flag.store(action, Ordering::SeqCst);
}
/// Effectively write the matrix to the shared payload
pub fn send<T, S>(&self, matrix: &matrix::MatrixTemp<T, S>)
where
T: matrix::Numeric64,
S: matrix::SliceOrVec<T>,
rand::distributions::Standard: rand::prelude::Distribution<T>,
{
matrix.to_buffer(self.payload);
}
/// Pooling check the flag value for replies.
/// This function was extended to handle peers disconnections
pub fn wait_response(&self) -> bool
{
loop {
let ready: i8 = self.ready_flag.load(Ordering::SeqCst);
if ready == self.id {
// I wrote, so yield and sleep
std::thread::yield_now();
} else if ready == self.rid {
// the remote wrote, to I have work to do
return true;
} else if ready < 0 {
// The remote sets negative values on error, so, close
// this connection
return false;
} else {
// We should never be here, no other client is
// intended to put its id here
return false;
}
}
}
/// Effectively read the matrix from the shared payload
pub fn receive(&mut self) -> matrix::MatrixBorrow::<f64>
{
matrix::MatrixBorrow::<f64>::from_buffer(self.payload)
}
}
impl Drop for SharedBuffer<'_> {
fn drop(&mut self) {
unsafe {
munmap(self.ptr, self.shm_full_size).unwrap();
}
if self.id == 0 {
close(self.shm_fd.as_raw_fd()).unwrap();
}
if self.id == 0 {
shm_unlink(self.shm_name.as_str()).expect("Failed to unlink shared memory");
}
}
}