Skip to content

Commit 5b61b84

Browse files
committed
modify Dockerfile to include Eigen install and change working directory, add a simple eigen run folder, play around in simple_scatterv_gatherv
1 parent 34aaf42 commit 5b61b84

File tree

4 files changed

+71
-10
lines changed

4 files changed

+71
-10
lines changed

Dockerfile

+3
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,7 @@ FROM alpine:latest
22
RUN apk upgrade && apk update && apk add --no-cache g++ openmpi openmpi-dev openssh make
33
ENV OMPI_ALLOW_RUN_AS_ROOT=1
44
ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1
5+
WORKDIR /usr/include
6+
RUN wget -O Eigen.zip https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.zip && unzip Eigen.zip && rm Eigen.zip
7+
WORKDIR /usr/code
58
CMD /bin/sh

code/simple_eigen/Makefile

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
CC=g++ # compiler for MPI c++ program
2+
files=simple_eigen.cpp # files to complile
3+
exe=./run.exe # name of executable
4+
EigenPath = /usr/include/eigen-3.4.0
5+
6+
all:
7+
$(CC) -I $(EigenPath) $(files) -o $(exe)
8+
9+
clean:
10+
rm $(exe)

code/simple_eigen/simple_eigen.cpp

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
#include <iostream>
2+
#include <Eigen/Dense>
3+
4+
using Eigen::MatrixXd;
5+
6+
int main()
7+
{
8+
MatrixXd m(2,2);
9+
m(0,0) = 3;
10+
m(1,0) = 2.5;
11+
m(0,1) = -1;
12+
m(1,1) = m(1,0) + m(0,1);
13+
std::cout << m << std::endl;
14+
}
15+
16+
// int main(int argc, char **argv)
17+
// {
18+
19+
20+
// // wget -O Eigen.zip https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.zip
21+
// // unzip Eigen.zip
22+
// // rm Eigen.zip
23+
// // /usr/include
24+
// return 0;
25+
// }

code/simple_scatterv_gatherv/scatterv_gatherv.cpp

+33-10
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@ using namespace std;
1616

1717

1818
// function declarations
19-
vector<int> get_master_data(int& N, int& my_rank);
19+
vector<int> get_master_data(int N, int my_rank);
2020
tuple<int, int> initialize_MPI(int argc, char **argv);
21-
void print_rank_vector(vector<int> vec, int position, int my_rank);
22-
21+
void print_rank_vector(vector<int>& vec, int position, int my_rank);
22+
vector<int> scatter_data(vector<int>& data, int my_rank, int num_proc);
2323

2424
int main(int argc, char **argv)
2525
{
@@ -32,15 +32,13 @@ int main(int argc, char **argv)
3232
// construct data that will be scattered
3333
auto master_data = get_master_data(N, my_rank);
3434

35-
// create vector that will hold the scatter data
36-
vector<int> data(2);
37-
38-
MPI_Barrier(MPI_COMM_WORLD); // blocking call
3935

40-
MPI_Scatter( master_data.data() , 2, MPI_INT, data.data() , 2, MPI_INT , 0, MPI_COMM_WORLD);
4136

4237
print_rank_vector(data, 2, my_rank);
4338

39+
40+
// MPI_Barrier(MPI_COMM_WORLD); // blocking call
41+
4442
// // flag to say what to print
4543
// int print_flag = stoi(argv[2]);
4644

@@ -101,6 +99,10 @@ int main(int argc, char **argv)
10199
}
102100

103101
tuple<int, int> initialize_MPI(int argc, char **argv){
102+
/**
103+
* Initializes MPI and returns the number of
104+
* processes and the rank of the running process.
105+
*/
104106

105107
// initialize MPI
106108
MPI_Init(&argc , &argv);
@@ -116,7 +118,12 @@ tuple<int, int> initialize_MPI(int argc, char **argv){
116118
return {num_proc,my_rank};
117119
}
118120

119-
void print_rank_vector(vector<int> vec, int position, int my_rank){
121+
void print_rank_vector(vector<int>& vec, int position, int my_rank){
122+
/**
123+
* A simple helper function that will print the rank of the
124+
* running process and then print the input vector up until
125+
* the given position.
126+
*/
120127

121128
for (int j = 0; j < position; j++)
122129
{
@@ -132,7 +139,7 @@ void print_rank_vector(vector<int> vec, int position, int my_rank){
132139
}
133140

134141

135-
vector<int> get_master_data(int& N, int& my_rank){
142+
vector<int> get_master_data(int N, int my_rank){
136143
/**
137144
* Constructs an array filled with values 1 to N,
138145
* if my_rank is zero, else creates an empty array,
@@ -154,4 +161,20 @@ vector<int> get_master_data(int& N, int& my_rank){
154161
}
155162

156163
return master_data;
164+
}
165+
166+
vector<int> scatter_data(vector<int>& input_data, int my_rank, int num_proc){
167+
/**
168+
* A function that scatters the input data to all
169+
* processes in MPI_COMM_WORLD.
170+
*/
171+
172+
173+
174+
// create vector that will hold the scatter data
175+
vector<int> local_data(2);
176+
177+
MPI_Scatter( input_data.data() , 2, MPI_INT, local_data.data() , 2, MPI_INT , 0, MPI_COMM_WORLD);
178+
179+
return local_data;
157180
}

0 commit comments

Comments
 (0)