diff --git a/example/test_problem/ELBDM/UniformGranule/Input__Parameter b/example/test_problem/ELBDM/UniformGranule/Input__Parameter new file mode 100644 index 0000000000..c92652f388 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/Input__Parameter @@ -0,0 +1,355 @@ + + +# ================================================================================================================= +# NOTE: +# 1. Comment symbol: # +# 2. [*]: defaults +# 3. Parameters set to "auto" (usually by setting to a negative value) do not have deterministic default values +# and will be set according to the adopted compilation options and/or other runtime parameters +# 4. To add new parameters, please edit "Init/Init_Load_Parameter.cpp" +# 5. All dimensional variables should be set consistently with the code units (set by UNIT_L/M/T/V/D) unless +# otherwise specified (e.g., SF_CREATE_STAR_MIN_GAS_DENS & SF_CREATE_STAR_MIN_STAR_MASS) +# 6. For boolean options: 0/1 -> off/on +# ================================================================================================================= + + +# simulation scale +BOX_SIZE 0.08 # box size along the longest side (in Mpc/h if COMOVING is adopted) +NX0_TOT_X 256 # number of base-level cells along x +NX0_TOT_Y 256 # number of base-level cells along y +NX0_TOT_Z 256 # number of base-level cells along z +OMP_NTHREAD -1 # number of OpenMP threads (<=0=auto) [-1] ##OPENMP ONLY## +END_T 2.0e-3 # end physical time (<0=auto -> must be set by test problems or restart) [-1.0] +END_STEP -1 # end step (<0=auto -> must be set by test problems or restart) [-1] + + +# test problems +TESTPROB_ID 1014 # test problem ID [0] + # 1014: ELBDM uniform granule (+GRAVITY) + + +# code units (in cgs) +OPT__UNIT 1 # specify code units -> must set exactly 3 basic units below [0] ##USELESS FOR COMOVING## +UNIT_L 3.0856800e+21 # length unit (<=0 -> set to UNIT_V*UNIT_T or (UNIT_M/UNIT_D)^(1/3)) [-1.0] +UNIT_M 1.9890000e+43 # mass unit (<=0 -> set to UNIT_D*UNIT_L^3) [-1.0] +UNIT_T -1.0 # time unit (<=0 -> set to UNIT_L/UNIT_V) [-1.0] +UNIT_V 1.0000000e+05 # velocity unit (<=0 -> set to UNIT_L/UNIT_T) [-1.0] +UNIT_D -1.0 # mass density unit (<=0 -> set to UNIT_M/UNIT_L^3) [-1.0] + + +# boundary conditions +OPT__BC_FLU_XM 1 # fluid boundary condition at the -x face: (1=periodic, 2=outflow, 3=reflecting, 4=user, 5=diode) ##2/3/5 for HYDRO ONLY## +OPT__BC_FLU_XP 1 # fluid boundary condition at the +x face: (1=periodic, 2=outflow, 3=reflecting, 4=user, 5=diode) ##2/3/5 for HYDRO ONLY## +OPT__BC_FLU_YM 1 # fluid boundary condition at the -y face: (1=periodic, 2=outflow, 3=reflecting, 4=user, 5=diode) ##2/3/5 for HYDRO ONLY## +OPT__BC_FLU_YP 1 # fluid boundary condition at the +y face: (1=periodic, 2=outflow, 3=reflecting, 4=user, 5=diode) ##2/3/5 for HYDRO ONLY## +OPT__BC_FLU_ZM 1 # fluid boundary condition at the -z face: (1=periodic, 2=outflow, 3=reflecting, 4=user, 5=diode) ##2/3/5 for HYDRO ONLY## +OPT__BC_FLU_ZP 1 # fluid boundary condition at the +z face: (1=periodic, 2=outflow, 3=reflecting, 4=user, 5=diode) ##2/3/5 for HYDRO ONLY## +OPT__BC_POT 1 # gravity boundary condition: (1=periodic, 2=isolated) +GFUNC_COEFF0 -1.0 # Green's function coefficient at the origin for the isolated BC (<0=auto) [-1.0] + + +# particle (PARTICLE only) +PAR_NPAR 0 # total number of particles (must be set for PAR_INIT==1/3; must be an integer) +PAR_INIT 1 # initialization option for particles: (1=FUNCTION, 2=RESTART, 3=FILE->"PAR_IC") +PAR_IC_FORMAT 1 # data format of PAR_IC: (1=[attribute][id], 2=[id][attribute]; row-major) [1] +PAR_IC_FLOAT8 -1 # floating-point precision for PAR_IC (<0: default, 0: single, 1: double) [default: same as FLOAT8_PAR] +PAR_IC_MASS -1.0 # mass of all particles for PAR_INIT==3 (<0=off) [-1.0] +PAR_IC_TYPE -1 # type of all particles for PAR_INIT==3 (<0=off) [-1] +PAR_INTERP 3 # particle interpolation scheme: (1=NGP, 2=CIC, 3=TSC) [2] +PAR_INTEG 2 # particle integration scheme: (1=Euler, 2=KDK) [2] +PAR_TR_INTERP 3 # tracer particle interpolation scheme: (1=NGP, 2=CIC, 3=TSC) [3] +PAR_TR_INTEG 2 # tracer particle integration scheme: (1=Euler, 2=RK2) [2] +PAR_IMPROVE_ACC 1 # improve force accuracy at patch boundaries [1] ##STORE_POT_GHOST and PAR_INTERP=2/3 ONLY## +PAR_PREDICT_POS 1 # predict particle position during mass assignment [1] +PAR_REMOVE_CELL -1.0 # remove particles X-root-cells from the boundaries (non-periodic BC only; <0=auto) [-1.0] +OPT__FREEZE_PAR 0 # do not update particles (except for tracers) [0] +PAR_TR_VEL_CORR 0 # correct tracer particle velocities in regions of discontinuous flow [0] + + +# time-step +DT__MAX -1.0 # dt criterion: maximum allowed dt (<0=off) [-1.0] +DT__FLUID -1.0 # dt criterion: fluid solver CFL factor (<0=auto) [-1.0] +DT__FLUID_INIT -1.0 # dt criterion: DT__FLUID at the first step (<0=auto) [-1.0] +DT__SPEED_OF_LIGHT 0 # dt criterion: speed of light [0] ##SRHD ONLY## +DT__GRAVITY -1.0 # dt criterion: gravity solver safety factor (<0=auto) [-1.0] +DT__PHASE 0.0 # dt criterion: phase rotation safety factor (0=off) [0.0] ##ELBDM ONLY## +DT__HYBRID_CFL -1.0 # dt criterion: hybrid solver CFL factor (<0=auto) (diffusion) [-1.0] ## ELBDM_HYBRID ONLY## +DT__HYBRID_CFL_INIT -1.0 # dt criterion: DT__HYBRID_CFL in the first step (<0=auto) [-1.0] ## ELBDM_HYBRID ONLY## +DT__HYBRID_VELOCITY -1.0 # dt criterion: hybrid solver CFL factor (<0=auto) (Hamilton-Jacobi) [-1.0] ## ELBDM_HYBRID ONLY## +DT__HYBRID_VELOCITY_INIT -1.0 # dt criterion: DT__HYBRID_VELOCITY in the first step (<0=auto) [-1.0] ## ELBDM_HYBRID ONLY## +DT__PARVEL 0.5 # dt criterion: particle velocity safety factor [0.5] +DT__PARVEL_MAX -1.0 # dt criterion: maximum allowed dt from particle velocity (<0=off) [-1.0] +DT__PARACC 0.5 # dt criterion: particle acceleration safety factor (0=off) [0.5] ##STORE_PAR_ACC ONLY## +DT__CR_DIFFUSION 0.3 # dt criterion: cosmic-ray diffusion CFL factor [0.3] ##CR_DIFFUSION only## +DT__MAX_DELTA_A 0.01 # dt criterion: maximum variation of the cosmic scale factor [0.01] +DT__SYNC_PARENT_LV 0.1 # dt criterion: allow dt to adjust by (1.0+DT__SYNC_PARENT) in order to synchronize + # with the parent level (for OPT__DT_LEVEL==3 only) [0.1] +DT__SYNC_CHILDREN_LV 0.1 # dt criterion: allow dt to adjust by (1.0-DT__SYNC_CHILDREN) in order to synchronize + # with the children level (for OPT__DT_LEVEL==3 only; 0=off) [0.1] +OPT__DT_USER 0 # dt criterion: user-defined -> edit "Mis_GetTimeStep_UserCriteria.cpp" [0] +OPT__DT_LEVEL 3 # dt at different AMR levels (1=shared, 2=differ by two, 3=flexible) [3] +OPT__RECORD_DT 1 # record info of the dt determination [1] +AUTO_REDUCE_DT 1 # reduce dt automatically when the program fails (for OPT__DT_LEVEL==3 only) [1] +AUTO_REDUCE_DT_FACTOR 1.0 # reduce dt by a factor of AUTO_REDUCE_DT_FACTOR when the program fails [1.0] +AUTO_REDUCE_DT_FACTOR_MIN 0.1 # minimum allowed AUTO_REDUCE_DT_FACTOR after consecutive failures [0.1] +AUTO_REDUCE_MINMOD_FACTOR 0.8 # reduce MINMOD_COEFF by this factor together with AUTO_REDUCE_DT (1.0=off) [0.8] ##HYDRO ONLY## +AUTO_REDUCE_MINMOD_MIN 1.0e-2 # minimum allowed MINMOD_COEFF after consecutive failures [1.0e-2] ##HYDRO ONLY## +AUTO_REDUCE_INT_MONO_FACTOR 0.8 # reduce INT_MONO_COEFF(_B) by this factor together with AUTO_REDUCE_DT (1.0=off) [0.8] +AUTO_REDUCE_INT_MONO_MIN 1.0e-2 # minimum allowed INT_MONO_COEFF(_B) after consecutive failures [1.0e-2] + + +# grid refinement (examples of Input__Flag_XXX tables are put at "example/input/") +REGRID_COUNT 4 # refine every REGRID_COUNT sub-step [4] +REFINE_NLEVEL 1 # number of new AMR levels to be created at once during refinement [1] +FLAG_BUFFER_SIZE -1 # number of buffer cells for the flag operation (0~PATCH_SIZE; <0=auto -> PATCH_SIZE) [-1] +FLAG_BUFFER_SIZE_MAXM1_LV -1 # FLAG_BUFFER_SIZE at the level MAX_LEVEL-1 (<0=auto -> REGRID_COUNT) [-1] +FLAG_BUFFER_SIZE_MAXM2_LV -1 # FLAG_BUFFER_SIZE at the level MAX_LEVEL-2 (<0=auto) [-1] +MAX_LEVEL 0 # maximum refinement level (0~NLEVEL-1) [NLEVEL-1] +OPT__FLAG_RHO 0 # flag: density (Input__Flag_Rho) [0] +OPT__FLAG_RHO_GRADIENT 0 # flag: density gradient (Input__Flag_RhoGradient) [0] +OPT__FLAG_PRES_GRADIENT 0 # flag: pressure gradient (Input__Flag_PresGradient) [0] ##HYDRO ONLY## +OPT__FLAG_LRTZ_GRADIENT 0 # flag: Lorentz factor gradient (Input__Flag_LrtzGradient) [0] ##SRHD ONLY## +OPT__FLAG_VORTICITY 0 # flag: vorticity (Input__Flag_Vorticity) [0] ##HYDRO ONLY## +OPT__FLAG_JEANS 0 # flag: Jeans length (Input__Flag_Jeans) [0] ##HYDRO ONLY## +OPT__FLAG_CURRENT 0 # flag: current density in MHD (Input__Flag_Current) [0] ##MHD ONLY## +OPT__FLAG_CRAY 0 # flag: cosmic-ray energy (Input__Flag_CRay) [0] ##COSMIC_RAY ONLY## +OPT__FLAG_ENGY_DENSITY 0 # flag: energy density (Input_Flag_EngyDensity) [0] ##ELBDM ONLY## +OPT__FLAG_INTERFERENCE 0 # flag: interference level (Input__Flag_Interference) [0] ##ELBDM ONLY## +OPT__FLAG_SPECTRAL 0 # flag: spectral refinement (Input__Flag_Spectral) [0] ##ELBDM ONLY## +OPT__FLAG_SPECTRAL_N 2 # number of pol. coefficients to use for spectral refinement [2] ##ELBDM ONLY## +OPT__FLAG_LOHNER_DENS 0 # flag: Lohner for mass density (Input__Flag_Lohner) [0] ##BOTH HYDRO AND ELBDM## +OPT__FLAG_LOHNER_ENGY 0 # flag: Lohner for energy density (Input__Flag_Lohner) [0] ##HYDRO ONLY## +OPT__FLAG_LOHNER_PRES 0 # flag: Lohner for pressure (Input__Flag_Lohner) [0] ##HYDRO ONLY## +OPT__FLAG_LOHNER_TEMP 0 # flag: Lohner for temperature (Input__Flag_Lohner) [0] ##HYDRO ONLY## +OPT__FLAG_LOHNER_ENTR 0 # flag: Lohner for entropy (Input__Flag_Lohner) [0] ##HYDRO ONLY## +OPT__FLAG_LOHNER_CRAY 0 # flag: Lohner for cosmic-ray energy (Input__Flag_Lohner) [0] ##COSMIC_RAY ONLY## +OPT__FLAG_LOHNER_FORM 2 # form of Lohner: (1=FLASH-1, 2=FLASH-2, 3=form-invariant-1, 4=form-invariant-2) [2] +OPT__FLAG_USER 0 # flag: user-defined (Input__Flag_User) -> edit "Flag_User.cpp" [0] +OPT__FLAG_USER_NUM 1 # number of threshold values in user-defined table (Input__Flag_User) [1] +OPT__FLAG_REGION 0 # flag: specify the regions **allowed** to be refined -> edit "Flag_Region.cpp" [0] +OPT__FLAG_NPAR_PATCH 0 # flag: # of particles per patch (Input__Flag_NParPatch): (0=off, 1=itself, 2=itself+siblings) [0] +OPT__FLAG_NPAR_CELL 0 # flag: # of particles per cell (Input__Flag_NParCell) [0] +OPT__FLAG_PAR_MASS_CELL 0 # flag: total particle mass per cell (Input__Flag_ParMassCell) [0] +OPT__NO_FLAG_NEAR_BOUNDARY 0 # flag: disallow refinement near the boundaries [0] +OPT__PATCH_COUNT 1 # record the # of patches at each level: (0=off, 1=every step, 2=every sub-step) [1] +OPT__PARTICLE_COUNT 1 # record the # of particles at each level: (0=off, 1=every step, 2=every sub-step) [1] +OPT__REUSE_MEMORY 2 # reuse patch memory to reduce memory fragmentation: (0=off, 1=on, 2=aggressive) [2] +OPT__MEMORY_POOL 0 # preallocate patches for OPT__REUSE_MEMORY=1/2 (Input__MemoryPool) [0] + + +# load balance (LOAD_BALANCE only) +LB_INPUT__WLI_MAX 0.1 # weighted-load-imbalance (WLI) threshold for redistributing all patches [0.1] +LB_INPUT__PAR_WEIGHT 0.0 # load-balance weighting of one particle over one cell [0.0] +OPT__RECORD_LOAD_BALANCE 1 # record the load-balance info [1] +OPT__MINIMIZE_MPI_BARRIER 0 # minimize MPI barriers to improve load balance, especially with particles [0] + # (STORE_POT_GHOST, PAR_IMPROVE_ACC=1, OPT__TIMING_BARRIER=0 only; recommend AUTO_REDUCE_DT=0) +OPT__LB_EXCHANGE_FATHER 1 # exchange all cells of all father patches during load balancing (must enable for hybrid scheme + MPI) [0 usually, 1 for ELBDM_HYBRID] ## ELBDM_HYBRID ONLY### + + +# source terms +SRC_DELEPTONIZATION 0 # deleptonization (for simulations of stellar core collapse) [0] ##HYDRO ONLY## +SRC_USER 0 # user-defined source terms -> edit "Src_User.cpp" [0] +SRC_GPU_NPGROUP -1 # number of patch groups sent into the CPU/GPU source-term solver (<=0=auto) [-1] + + +# fluid solver in ELBDM (MODEL==ELBDM only) +ELBDM_MASS 3.0e-19 # particle mass in ev/c^2 (input unit is fixed even when OPT__UNIT or COMOVING is on) +ELBDM_PLANCK_CONST 1.0 # reduced Planck constant (will be overwritten if OPT__UNIT or COMOVING is on) +ELBDM_LAMBDA 1.0 # quartic self-interaction coefficient [1.0] ##QUARTIC_SELF_INTERACTION ONLY## +ELBDM_TAYLOR3_COEFF 0.166666667 # 3rd Taylor expansion coefficient [1.0/6.0] ##USELESS if ELBDM_TAYLOR3_AUTO is on## +ELBDM_TAYLOR3_AUTO 0 # Optimize ELBDM_TAYLOR3_COEFF automatically to minimize the damping at kmax [0] +ELBDM_REMOVE_MOTION_CM 0 # remove the motion of center-of-mass (must enable OPT__CK_CONSERVATION): + # (0=off, 1=init, 2=every step) [0] +ELBDM_BASE_SPECTRAL 1 # adopt the spectral method to evolve base-level wave function (must enable SUPPORT_FFTW) [0] +ELBDM_MATCH_PHASE 1 # match child phases with father phases during data restriction [1] ##ELBDM_HYBRID ONLY## +ELBDM_FIRST_WAVE_LEVEL -1 # level at which to switch to the wave solver (must >=1) [-1] ##ELBDM_HYBRID ONLY## + +# fluid solvers in all models +FLU_GPU_NPGROUP -1 # number of patch groups sent into the CPU/GPU fluid solver (<=0=auto) [-1] +GPU_NSTREAM -1 # number of CUDA streams for the asynchronous memory copy in GPU (<=0=auto) [-1] +OPT__FIXUP_FLUX 1 # correct coarse grids by the fine-grid boundary fluxes [1] ##HYDRO and ELBDM ONLY## +OPT__FIXUP_ELECTRIC 1 # correct coarse grids by the fine-grid boundary electric field [1] ##MHD ONLY## +OPT__FIXUP_RESTRICT 1 # correct coarse grids by averaging the fine-grid data [1] +OPT__CORR_AFTER_ALL_SYNC -1 # apply various corrections after all levels are synchronized (see "Flu_CorrAfterAllSync"): + # (-1=auto, 0=off, 1=every step, 2=before dump) [-1] +OPT__NORMALIZE_PASSIVE 0 # ensure "sum(passive_scalar_density) == gas_density" [1] +OPT__INT_FRAC_PASSIVE_LR 1 # convert specified passive scalars to mass fraction during data reconstruction [1] +OPT__OVERLAP_MPI 0 # overlap MPI communication with CPU/GPU computations [0] ##NOT SUPPORTED YET## +OPT__RESET_FLUID 0 # reset fluid variables after each update -> edit "Flu_ResetByUser.cpp" [0] +OPT__RESET_FLUID_INIT -1 # reset fluid variables during initialization (<0=auto -> OPT__RESET_FLUID, 0=off, 1=on) [-1] +OPT__FREEZE_FLUID 0 # do not evolve fluid at all [0] +OPT__CHECK_PRES_AFTER_FLU -1 # check unphysical pressure at the end of the fluid solver (<0=auto) [-1] +OPT__LAST_RESORT_FLOOR 1 # apply floor values as the last resort when the fluid solver fails [1] ##HYDRO and MHD ONLY## +MIN_DENS 0.0 # minimum mass density (must >= 0.0) [0.0] ##HYDRO, MHD, and ELBDM ONLY## +MIN_PRES 0.0 # minimum pressure (must >= 0.0) [0.0] ##HYDRO and MHD ONLY## +MIN_EINT 0.0 # minimum internal energy (must >= 0.0) [0.0] ##HYDRO and MHD ONLY## +MIN_TEMP 0.0 # minimum temperature in K (must >= 0.0) [0.0] ##HYDRO and MHD ONLY## +MIN_ENTR 0.0 # minimum entropy (must >= 0.0) [0.0] ##HYDRO and MHD ONLY## +JEANS_MIN_PRES 0 # minimum pressure estimated from the Jeans length [0] ##HYDRO/MHD and GRAVITY ONLY## +JEANS_MIN_PRES_LEVEL -1 # for JEANS_MIN_PRES; ensure Jeans length is resolved by JEANS_MIN_PRES_NCELL*dh[JEANS_MIN_PRES_LEVEL] + # (<0=auto -> MAX_LEVEL) [-1] +JEANS_MIN_PRES_NCELL 4 # for JEANS_MIN_PRES; see JEANS_MIN_PRES_LEVEL [4] + + +# gravity solvers in all models +NEWTON_G 1.0 # gravitational constant (will be overwritten if OPT__UNIT or COMOVING is on) +SOR_OMEGA -1.0 # over-relaxation parameter in SOR: (<0=auto) [-1.0] +SOR_MAX_ITER -1 # maximum number of iterations in SOR: (<0=auto) [-1] +SOR_MIN_ITER -1 # minimum number of iterations in SOR: (<0=auto) [-1] +MG_MAX_ITER -1 # maximum number of iterations in multigrid: (<0=auto) [-1] +MG_NPRE_SMOOTH -1 # number of pre-smoothing steps in multigrid: (<0=auto) [-1] +MG_NPOST_SMOOTH -1 # number of post-smoothing steps in multigrid: (<0=auto) [-1] +MG_TOLERATED_ERROR -1.0 # maximum tolerated error in multigrid (<0=auto) [-1.0] +POT_GPU_NPGROUP -1 # number of patch groups sent into the CPU/GPU Poisson solver (<=0=auto) [-1] +OPT__GRA_P5_GRADIENT 0 # 5-points gradient in the Gravity solver (must have GRA/USG_GHOST_SIZE_G>=2) [0] +OPT__SELF_GRAVITY 1 # add self-gravity [1] +OPT__EXT_ACC 0 # add external acceleration (0=off, 1=function, 2=table) [0] ##HYDRO ONLY## + # --> 2 (table) is not supported yet +OPT__EXT_POT 0 # add external potential (0=off, 1=function, 2=table) [0] + # --> for 2 (table), edit the corresponding parameters below too + +# tabular external potential +EXT_POT_TABLE_NAME ExtPotTable # external potential table: filename +EXT_POT_TABLE_NPOINT_X 129 # ... : table size (i.e., number of data points) along x/y/z +EXT_POT_TABLE_NPOINT_Y 129 # +EXT_POT_TABLE_NPOINT_Z 129 # +EXT_POT_TABLE_DH_X 0.0078125 # ... : spatial interval between adjacent data points +EXT_POT_TABLE_DH_Y 0.0078125 # +EXT_POT_TABLE_DH_Z 0.0078125 # +EXT_POT_TABLE_EDGEL_X 0.0 # ... : starting x/y/z coordinates +EXT_POT_TABLE_EDGEL_Y 0.0 # +EXT_POT_TABLE_EDGEL_Z 0.0 # +EXT_POT_TABLE_FLOAT8 -1 # ... : double precision (<0=auto -> FLOAT8, 0=off, 1=on) [-1] + # --> not supported yet; use -1 for now +OPT__GRAVITY_EXTRA_MASS 0 # add extra mass source when computing gravity [0] + + +# initialization +OPT__INIT 3 # initialization option: (1=FUNCTION, 2=RESTART, 3=FILE->"UM_IC") +OPT__INIT_BFIELD_BYVECPOT 0 # initialize the magnetic field from vector potential + # (0=off, 1=external disk file named "B_IC", see tool/inits/gen_vec_pot.py for example, 2=function) [0] ##MHD ONLY## +RESTART_LOAD_NRANK 4 # number of parallel I/O (i.e., number of MPI ranks) for restart [1] +OPT__RESTART_RESET 0 # reset some simulation status parameters (e.g., current step and time) during restart [0] +OPT__UM_IC_LEVEL 0 # starting AMR level in UM_IC [0] +OPT__UM_IC_NLEVEL 1 # number of AMR levels UM_IC [1] --> edit "Input__UM_IC" if >1 +OPT__UM_IC_NVAR 2 # number of variables in UM_IC: (1~NCOMP_TOTAL; <=0=auto) [HYDRO=5+passive/ELBDM=2] +OPT__UM_IC_FORMAT 1 # data format of UM_IC: (1=vzyx, 2=zyxv; row-major and v=field) [1] +OPT__UM_IC_FLOAT8 -1 # floating-point precision for UM_IC (<0: default, 0: single, 1: double) [default: same as FLOAT8] +OPT__UM_IC_DOWNGRADE 0 # downgrade UM_IC from level OPT__UM_IC_LEVEL to 0 [1] +OPT__UM_IC_REFINE 0 # refine UM_IC from level OPT__UM_IC_LEVEL to MAX_LEVEL [1] +OPT__UM_IC_LOAD_NRANK 4 # number of parallel I/O (i.e., number of MPI ranks) for loading UM_IC [1] +OPT__INIT_RESTRICT 1 # restrict all data during the initialization [1] +OPT__INIT_GRID_WITH_OMP 1 # enable OpenMP when assigning the initial condition of each grid patch [1] +OPT__GPUID_SELECT -1 # GPU ID selection mode: (-3=Laohu, -2=CUDA, -1=MPI rank, >=0=input) [-1] +INIT_SUBSAMPLING_NCELL 0 # perform sub-sampling during initialization: (0=off, >0=# of sub-sampling cells) [0] +OPT__FFTW_STARTUP -1 # initialise fftw plans: (-1=auto, 0=ESTIMATE, 1=MEASURE, 2=PATIENT (only FFTW3)) [-1] + +# interpolation schemes: (-1=auto, 1=MinMod-3D, 2=MinMod-1D, 3=vanLeer, 4=CQuad, 5=Quad, 6=CQuar, 7=Quar, 8=Spectral (##ELBDM & SUPPORT_SPECTRAL_INT ONLY##)) +OPT__INT_TIME 1 # perform "temporal" interpolation for OPT__DT_LEVEL == 2/3 [1] +OPT__INT_PRIM 1 # switch to primitive variables when the interpolation on conserved variables fails [1] ##HYDRO ONLY## +OPT__INT_PHASE 1 # interpolation on phase (does not support MinMod-1D) [1] ##ELBDM ONLY## +OPT__RES_PHASE 0 # restriction on phase [0] ##ELBDM ONLY## +OPT__FLU_INT_SCHEME -1 # ghost-zone fluid variables for the fluid solver [-1] +OPT__REF_FLU_INT_SCHEME -1 # newly allocated fluid variables during grid refinement [-1] +OPT__MAG_INT_SCHEME 4 # ghost-zone magnetic field for the MHD solver (2,3,4,6 only) [4] +OPT__REF_MAG_INT_SCHEME 4 # newly allocated magnetic field during grid refinement (2,3,4,6 only) [4] +OPT__POT_INT_SCHEME 4 # ghost-zone potential for the Poisson solver (only supports 4 & 5) [4] +OPT__RHO_INT_SCHEME 4 # ghost-zone mass density for the Poisson solver [4] +OPT__GRA_INT_SCHEME 4 # ghost-zone potential for the gravity solver (for UNSPLIT_GRAVITY as well) [4] +OPT__REF_POT_INT_SCHEME 4 # newly allocated potential during grid refinement [4] +INT_MONO_COEFF 2.0 # coefficient for ensuring the interpolation monotonicity (1.0~4.0) [2.0] +INT_MONO_COEFF_B 2.0 # coefficient for ensuring the interpolation monotonicity of B field (1.0~4.0) [2.0] ##MHD ONLY## +MONO_MAX_ITER 10 # maximum number of iterations to reduce INT_MONO_COEFF when interpolation fails (0=off) [10] +INT_OPP_SIGN_0TH_ORDER 1 # switch to 0th-order interpolation if adjacent values change signs [HYDRO:1; ELBDM:0] +SPEC_INT_TABLE_PATH ./ # path to tables for spectral interpolation ##ELBDM & SUPPORT_SPECTRAL_INT ONLY## +SPEC_INT_XY_INSTEAD_DEPHA 1 # interpolate x = density^0.5*cos( phase/SPEC_INT_WAVELENGTH_MAGNIFIER ), + # y = density^0.5*sin( phase/SPEC_INT_WAVELENGTH_MAGNIFIER ) instead of density and phase + # for the spectral interpolation, which has the advantage of being well-defined across vortices [1] +SPEC_INT_VORTEX_THRESHOLD 0.1 # vortex detection threshold for SPEC_INT_XY_INSTEAD_DEPHA [0.1] ##ELBDM & SUPPORT_SPECTRAL_INT ONLY## +SPEC_INT_GHOST_BOUNDARY 4 # ghost boundary size for spectral interpolation [4] ##ELBDM & SUPPORT_SPECTRAL_INT ONLY## + + +# data dump +OPT__OUTPUT_TOTAL 1 # output the simulation snapshot: (0=off, 1=HDF5, 2=C-binary) [1] +OPT__OUTPUT_PART 0 # output a single line or slice: (0=off, 1=xy, 2=yz, 3=xz, 4=x, 5=y, 6=z, 7=diag) [0] +OPT__OUTPUT_TEXT_FORMAT_FLT %24.16e # string format of output text files [%24.16e] +OPT__OUTPUT_USER 0 # output the user-specified data -> edit "Output_User.cpp" [0] +OPT__OUTPUT_PAR_MODE 0 # output the particle data: (0=off, 1=text-file, 2=C-binary) [0] ##PARTICLE ONLY## +OPT__OUTPUT_BASEPS 1 # output the base-level power spectrum [0] +OPT__OUTPUT_BASE 0 # only output the base-level data [0] ##OPT__OUTPUT_PART ONLY## +OPT__OUTPUT_POT 1 # output gravitational potential [1] ##OPT__OUTPUT_TOTAL ONLY## +OPT__OUTPUT_PAR_DENS 0 # output the particle or total mass density on grids: + # (0=off, 1=particle mass density, 2=total mass density) [1] ##OPT__OUTPUT_TOTAL ONLY## +OPT__OUTPUT_CC_MAG 1 # output **cell-centered** magnetic field (necessary for yt analysis) [1] ##MHD ONLY## +OPT__OUTPUT_PRES 0 # output gas pressure [0] ##HYDRO ONLY## +OPT__OUTPUT_TEMP 0 # output gas temperature [0 (HD) or 1 (SRHD)] ##HYDRO ONLY## +OPT__OUTPUT_ENTR 0 # output gas entropy [0] ##HYDRO ONLY## +OPT__OUTPUT_CS 0 # output sound speed [0] ##HYDRO ONLY## +OPT__OUTPUT_DIVVEL 0 # output divergence(velocity) [0] ##HYDRO ONLY## +OPT__OUTPUT_MACH 0 # output mach number [0] ##HYDRO ONLY## +OPT__OUTPUT_DIVMAG 0 # output |divergence(B)*dh/|B|| [0] ##MHD ONLY## +OPT__OUTPUT_LORENTZ 0 # output Lorentz factor [0] ##SRHD ONLY## +OPT__OUTPUT_3VELOCITY 0 # output 3-velocities [0] ##SRHD ONLY## +OPT__OUTPUT_ENTHALPY 1 # output reduced enthalpy [1] ##SRHD ONLY## +OPT__OUTPUT_USER_FIELD 0 # output user-defined derived fields [0] -> edit "Flu_DerivedField_User.cpp" +OPT__OUTPUT_MODE 2 # (1=const step, 2=const dt, 3=dump table) -> edit "Input__DumpTable" for 3 +OPT__OUTPUT_RESTART 0 # output data immediately after restart [0] +OUTPUT_STEP 5 # output data every OUTPUT_STEP step ##OPT__OUTPUT_MODE==1 ONLY## +OUTPUT_DT 4.0e-5 # output data every OUTPUT_DT time interval ##OPT__OUTPUT_MODE==2 ONLY## +OUTPUT_WALLTIME -1.0 # output data every OUTPUT_WALLTIME walltime (<=0.0=off) [-1.0] +OUTPUT_WALLTIME_UNIT 0 # unit of OUTPUT_WALLTIME (0=second, 1=minute, 2=hour, 3=day) [0] +OUTPUT_PART_X -1.0 # x coordinate for OPT__OUTPUT_PART [-1.0] +OUTPUT_PART_Y -1.0 # y coordinate for OPT__OUTPUT_PART [-1.0] +OUTPUT_PART_Z -1.0 # z coordinate for OPT__OUTPUT_PART [-1.0] +INIT_DUMPID -1 # set the first dump ID (<0=auto) [-1] + + +# yt inline analysis (SUPPORT_LIBYT only) +YT_SCRIPT yt_inline # yt inline analysis script (do not include the ".py" file extension) +YT_VERBOSE 1 # verbose level of yt (0=off, 1=info, 2=warning, 3=debug) [1] +YT_FIG_BASENAME Fig # figure basename [Fig] +YT_JUPYTER_USE_CONNECTION_FILE 0 # use user-provided connection file when using libyt Jupyter UI [0] + + +# miscellaneous +OPT__VERBOSE 0 # output the simulation progress in detail [0] +OPT__TIMING_BARRIER -1 # synchronize before timing -> more accurate, but may slow down the run (<0=auto) [-1] +OPT__TIMING_BALANCE 0 # record the max/min elapsed time in various code sections for checking load balance [0] +OPT__TIMING_MPI 0 # record the MPI bandwidth achieved in various code sections [0] ##LOAD_BALANCE ONLY## +OPT__RECORD_NOTE 1 # take notes for the general simulation info [1] +OPT__RECORD_UNPHY 1 # record the number of cells with unphysical results being corrected [1] +OPT__RECORD_MEMORY 1 # record the memory consumption [1] +OPT__RECORD_PERFORMANCE 1 # record the code performance [1] +OPT__MANUAL_CONTROL 1 # support manually dump data, stop run, or pause run during the runtime + # (by generating the file DUMP_GAMER_DUMP, STOP_GAMER_STOP, PAUSE_GAMER_PAUSE, respectively) [1] +OPT__RECORD_CENTER 0 # record the position of maximum density, minimum potential, and center of mass [0] +COM_CEN_X -1.0 # x coordinate as an initial guess for determining center of mass (if one of COM_CEN_X/Y/Z < 0 -> peak density position x) [-1.0] +COM_CEN_Y -1.0 # y coordinate as an initial guess for determining center of mass (if one of COM_CEN_X/Y/Z < 0 -> peak density position y) [-1.0] +COM_CEN_Z -1.0 # z coordinate as an initial guess for determining center of mass (if one of COM_CEN_X/Y/Z < 0 -> peak density position z) [-1.0] +COM_MAX_R -1.0 # maximum radius for determining center of mass (<0=auto -> __FLT_MAX__) [-1.0] +COM_MIN_RHO 0.0 # minimum density for determining center of mass (must >= 0.0) [0.0] +COM_TOLERR_R -1.0 # maximum tolerated error of deviation in radius during the iterations of determining the center of mass (<0=auto -> amr->dh[MAX_LEVEL]) [-1.0] +COM_MAX_ITER 10 # maximum number of iterations for determining the center of mass (must >= 1) [10] +OPT__RECORD_USER 1 # record the user-specified info -> edit "Aux_Record_User.cpp" [0] +OPT__OPTIMIZE_AGGRESSIVE 0 # apply aggressive optimizations (experimental) [0] +OPT__SORT_PATCH_BY_LBIDX 1 # sort patches to improve bitwise reproducibility [SERIAL:0, LOAD_BALACNE:1] + + +# checks +OPT__CK_REFINE 0 # check the grid refinement [0] +OPT__CK_PROPER_NESTING 0 # check the proper-nesting condition [0] +OPT__CK_CONSERVATION 1 # check the conservation law [0] +ANGMOM_ORIGIN_X -1.0 # x coordinate of the origin for angular momentum (<0=auto -> BoxCenter) [-1.0] +ANGMOM_ORIGIN_Y -1.0 # y coordinate of the origin for angular momentum (<0=auto -> BoxCenter) [-1.0] +ANGMOM_ORIGIN_Z -1.0 # z coordinate of the origin for angular momentum (<0=auto -> BoxCenter) [-1.0] +OPT__CK_NORMALIZE_PASSIVE 0 # check the normalization of passive scalars [0] ##OPT__NORMALIZE_PASSIVE ONLY## +OPT__CK_RESTRICT 0 # check the data restriction [0] +OPT__CK_FINITE 0 # check if all variables are finite [0] +OPT__CK_PATCH_ALLOCATE 0 # check if all patches are properly allocated [0] +OPT__CK_FLUX_ALLOCATE 0 # check if all flux arrays are properly allocated [0] ##HYDRO and ELBDM ONLY## +OPT__CK_NEGATIVE 0 # check the negative values: (0=off, 1=density, 2=pressure and entropy, 3=both) [0] ##HYDRO ONLY## +OPT__CK_MEMFREE 1.0 # check the free memory in GB (0=off, >0=threshold) [1.0] +OPT__CK_PARTICLE 0 # check the particle allocation [0] +OPT__CK_INTERFACE_B 0 # check the consistency of patch interface B field [0] ##MHD ONLY## +OPT__CK_DIVERGENCE_B 0 # check the divergence-free constraint on B field (0=off, 1=on, 2=on+verbose) [0] ##MHD ONLY## +OPT__CK_INPUT_FLUID 0 # check the input data of the fluid solver [0] diff --git a/example/test_problem/ELBDM/UniformGranule/Input__TestProb b/example/test_problem/ELBDM/UniformGranule/Input__TestProb new file mode 100644 index 0000000000..e39dd9d5d0 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/Input__TestProb @@ -0,0 +1,20 @@ +# problem-specific runtime parameters +ComputeCorrelation 1 # compute correlation function (must set OPT__RECORD_USER=1) [0] +ReComputeCorrelation 0 # use the simulation time of RESTART as initial time for computing time correlation; only available for RESTART [0] +FilePath_corr ./Record__Correlation # output path for correlation function text files +MinLv_corr 0 # do statistics from MinLv to MaxLv [0] +MaxLv_corr 0 # do statistics from MinLv to MaxLv [MAX_LEVEL] +StepInitial_corr 0 # inital step for recording correlation function [0] +StepInterval_corr 1 # interval for recording correlation function (only for OutputCorrelationMode=0) [1] +StepEnd_corr 200 # end step for recording correlation function (only for OutputCorrelationMode=0) [NoMax_int] + +# parameters for correlation function profile (only useful when ComputeCorrelation=1) +RadiusMax_corr 0.04 # maximum radius of correlation profile [0.5*BoxSize] +dr_min_corr 0.00234224 # minimum bin size [1e-2*0.5*BoxSize] +LogBin_corr 1 # use log bin [0] +LogBinRatio_corr 1.1940617521534918 # ratio of adjacent log bins [2] +RemoveEmpty_corr 0 # remove 0 sample bins; false: Data[empty_bin]=Weight[empty_bin]=NCell[empty_bin]=0 [0] + +# parameters for initial density profile (only useful when ComputeCorrelation=1) +dr_min_prof 0.00058556 # minimum bin size [0.25*dr_min_corr] + diff --git a/example/test_problem/ELBDM/UniformGranule/README b/example/test_problem/ELBDM/UniformGranule/README new file mode 100644 index 0000000000..1d16892abc --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/README @@ -0,0 +1,32 @@ +Compilation flags: +======================================== +Enable : MODEL=ELBDM, GRAVITY, SUPPORT_HDF5, NCOMP_PASSIVE_USER=1 +Disable : COMOVING + + +Default setup: +======================================= +0. Copy "generate_make.sh" to the directory "src/", execute "sh generate_make.sh" to generate "Makefile," + and then execute "make clean" and "make -j 4" to generate the executable "gamer" + +1. Code units + (1) UNIT_L = kpc + (2) UNIT_V = km/s + (3) UNIT_M = 1.0e10 Msun + +2. ELBDM_MASS 3.0e-19 + ELBDM_TAYLOR3_AUTO 0 + ELBDM_BASE_SPECTRAL 1 + +3. OPT__BC_FLU_* 1 (periodic) + OPT__BC_POT 1 (periodic) + +4. To enable ComputeCorrelation in Input_TestProb, + one must turn on OPT__RECORD_USER in Input__Parameter. + +Default UM_IC info: +======================================= +BoxSize = 0.08 kpc +dimension = 256^3 +average density = 0.25e10 Msun/kpc^3 +1-D velocity dispersion = 6.0 km/s (the actual computed dispersion ~5.75 km/s) diff --git a/example/test_problem/ELBDM/UniformGranule/Record__Correlation/.empty b/example/test_problem/ELBDM/UniformGranule/Record__Correlation/.empty new file mode 100644 index 0000000000..e69de29bb2 diff --git a/example/test_problem/ELBDM/UniformGranule/clean.sh b/example/test_problem/ELBDM/UniformGranule/clean.sh new file mode 100644 index 0000000000..9787f8357c --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/clean.sh @@ -0,0 +1,3 @@ +rm -rf Record__* Data_* PowerSpec_* log +mkdir Record__Correlation +touch Record__Correlation/.empty diff --git a/example/test_problem/ELBDM/UniformGranule/download_ic.sh b/example/test_problem/ELBDM/UniformGranule/download_ic.sh new file mode 100644 index 0000000000..783ae524d4 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/download_ic.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +LOCAL_FILENAME="uniform-granule" +FILE_ID="67da7961ef64ad0f8e84e795" + +# 1. download +curl https://hub.yt/api/v1/item/${FILE_ID}/download -o "${LOCAL_FILENAME}.tar.gz" + +# 2. unzip +tar -zxvf ${LOCAL_FILENAME}.tar.gz +rm ${LOCAL_FILENAME}.tar.gz + diff --git a/example/test_problem/ELBDM/UniformGranule/generate_make.sh b/example/test_problem/ELBDM/UniformGranule/generate_make.sh new file mode 100644 index 0000000000..e68d6cf616 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/generate_make.sh @@ -0,0 +1,6 @@ +# This script should run in the same directory as configure.py + +PYTHON=python3 + +${PYTHON} configure.py --mpi=true --hdf5=true --fftw=FFTW3 --gpu=true \ + --model=ELBDM --gravity=true --passive=1 "$@" diff --git a/example/test_problem/ELBDM/UniformGranule/python_script/get_vel_dispersion.py b/example/test_problem/ELBDM/UniformGranule/python_script/get_vel_dispersion.py new file mode 100644 index 0000000000..bf26239f11 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/python_script/get_vel_dispersion.py @@ -0,0 +1,88 @@ +import yt +import numpy as np +import argparse +import sys + +#------------------------------------------------------------------------------------------------------------------------- +# load the command-line parameters +parser = argparse.ArgumentParser( description='Get average velocity dispersion of the entire box' ) + +parser.add_argument( '-s', action='store', required=True, type=int, dest='idx_start', + help='first data index' ) +parser.add_argument( '-e', action='store', required=True, type=int, dest='idx_end', + help='last data index' ) +parser.add_argument( '-d', action='store', required=False, type=int, dest='didx', + help='delta data index [%(default)d]', default=1 ) + +args=parser.parse_args() + +idx_start = args.idx_start +idx_end = args.idx_end +didx = args.didx + +# print command-line parameters +print( '\nCommand-line arguments:' ) +print( '-------------------------------------------------------------------' ) +for t in range( len(sys.argv) ): + print( str(sys.argv[t])) +print( '' ) +print( '-------------------------------------------------------------------\n' ) + +yt.enable_parallelism() +ts = yt.DatasetSeries( [ '../Data_%06d'%idx for idx in range(idx_start, idx_end+1, didx) ] ) + +for ds in ts.piter(): + idx = ds.parameters["DumpID"] + time = ds.parameters["Time"][0] + N = ds.parameters["NX0"] + N_tot = N[0]*N[1]*N[2] + UNIT_L = ds.parameters["Unit_L"] + ma = ds.parameters["ELBDM_Mass"] + hbar = ds.parameters["ELBDM_PlanckConst"] + fac = hbar/ma + + grad_Real = ds.add_gradient_fields(("Real")) + grad_Imag = ds.add_gradient_fields(("Imag")) + + dd = ds.all_data() + + if ( N_tot != len(dd["Dens"])): + print('Data_%06d file size not matched!'%idx) + sys.exit(1) + + dens = np.array(dd["Dens"]) + real = np.array(dd["Real"]) + imag = np.array(dd["Imag"]) + avedens = np.mean(dens) + + grad_real = np.array([dd["Real_gradient_x"], dd["Real_gradient_y"], dd["Real_gradient_z"]])*UNIT_L + grad_imag = np.array([dd["Imag_gradient_x"], dd["Imag_gradient_y"], dd["Imag_gradient_z"]])*UNIT_L + + vx_bk = fac*(imag*grad_real[0] - real*grad_imag[0])/dens + vy_bk = fac*(imag*grad_real[1] - real*grad_imag[1])/dens + vz_bk = fac*(imag*grad_real[2] - real*grad_imag[2])/dens + + vx_qp = fac*(imag*grad_imag[0] + real*grad_real[0])/dens + vy_qp = fac*(imag*grad_imag[1] + real*grad_real[1])/dens + vz_qp = fac*(imag*grad_imag[2] + real*grad_real[2])/dens + + sigma_x_square_bk = np.average(dens*vx_bk**2)/avedens - (np.average(dens*vx_bk))**2/avedens**2 + sigma_y_square_bk = np.average(dens*vy_bk**2)/avedens - (np.average(dens*vy_bk))**2/avedens**2 + sigma_z_square_bk = np.average(dens*vz_bk**2)/avedens - (np.average(dens*vz_bk))**2/avedens**2 + + sigma_x_square_qp = np.average(dens*vx_qp**2)/avedens - (np.average(dens*vx_qp))**2/avedens**2 + sigma_y_square_qp = np.average(dens*vy_qp**2)/avedens - (np.average(dens*vy_qp))**2/avedens**2 + sigma_z_square_qp = np.average(dens*vz_qp**2)/avedens - (np.average(dens*vz_qp))**2/avedens**2 + + sigma_bk = ((sigma_x_square_bk + sigma_y_square_bk +sigma_z_square_bk)/3.)**0.5 + sigma_qp = ((sigma_x_square_qp + sigma_y_square_qp +sigma_z_square_qp)/3.)**0.5 + sigma_total = (sigma_bk**2+sigma_qp**2)**0.5 + + d = 0.35*2*np.pi*hbar/(ma*sigma_total) + + print('\nDumpID = %06d, time = %13.7e\n'%(idx, time) + + 'average density = %13.7e\n'%avedens + + 'minimum density = %13.7e\n'%(min(dens)) + + 'velocity dispersion (bulk, thermal, total) = (%13.7e, %13.7e, %13.7e)\n'%(sigma_bk, sigma_qp, sigma_total) + + 'estimated granule diameter = %13.7e\n'%d) + diff --git a/example/test_problem/ELBDM/UniformGranule/python_script/plot_correlation.py b/example/test_problem/ELBDM/UniformGranule/python_script/plot_correlation.py new file mode 100644 index 0000000000..58751ffb36 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/python_script/plot_correlation.py @@ -0,0 +1,73 @@ +import numpy as np +import matplotlib.pyplot as plt +import glob +import re +import sys +import yt + +# ------------------------------------------------------------------------------------------------------------------------- +# user-specified parameters +sigma = 6.0 # velocity dispersion in km/s +# ------------------------------------------------------------------------------------------------------------------------- +ds = yt.load( '../Data_000000' ) +UNIT_L = ds.parameters["Unit_L"] # code length unit (default is kpc) +UNIT_V = ds.parameters["Unit_V"] # code velocity unit (default is km/s) +UNIT_T = ds.parameters["Unit_T"] +ma = ds.parameters["ELBDM_Mass"] # FDM particle mass in code unit +h_bar = ds.parameters["ELBDM_PlanckConst"] # reduced planck constant in code unit +d = 0.35*2.0*np.pi*h_bar/ma/sigma # granule diameter in code uniti (kpc) + +# C(t) decay time scale, C(t_corr)=(1+2*0.35**2)**(-1.5)*C(t=0) = 0.72*C(t=0) +t_corr = d/(2**0.5*np.pi*sigma)*UNIT_T/(3600.*24.*365.*1e6) # expected correlation time scale in Myr + +print("velocity dispersion = %g km/s"%sigma) +print("estimated granule size = %g kpc"%d) +print("expected correlation time scale = %g Myr"%t_corr) +print("") + +file_dir = '../Record__Correlation/' +files = glob.glob(file_dir + 'correlation_function_t=*.txt') + +filename = np.array(files) + +r = [] +C = [] +t = [] +for f in filename: + Corr = np.loadtxt(f, skiprows=1, dtype=float) + if not r: + r.append(Corr[:,0]) + else: + if not np.array_equal(r[0], Corr[:,0]): + print('radius bin not matched!! filename=\"%s\"'%f) + sys.exit(1) + + C.append(Corr[:,1]) + match = re.search(r'correlation_function_t=(\d+\.\d+e[+-]\d+)', f) + if match: + time = float(match.group(1)) + t.append(time) + else: + print('time pattern not matched!! filename=\"%s\"'%f) + sys.exit(1) + +if not r: + print("no file loaded, check ../Record__Correlation/ !!") + +t = np.array(t)*UNIT_T/(3600.*24.*365.*1e6) +C = np.array(C) + +color = plt.cm.turbo(np.linspace(0.1, 0.9, len(r[0]))) + +plt.figure() +for i in range(len(r[0])): + plt.plot(t, C[:,i], c=color[i], label = r'$r$ = %1.3e kpc'%(r[0][i])) +plt.axvline(x=t_corr, ls='--', lw = '0.9', color = '0.7', label = 'expected correlation time scale') +plt.xlabel('$t$ (Myr)') +plt.ylabel(r'$C(t)$') +plt.xlim(0, t[-1]) +plt.legend(bbox_to_anchor=(1.03,0.03), loc='lower left') +plt.savefig('fig_correlation.png', dpi = 150, bbox_inches="tight") +plt.close() + + diff --git a/example/test_problem/ELBDM/UniformGranule/python_script/plot_powerspectrum.py b/example/test_problem/ELBDM/UniformGranule/python_script/plot_powerspectrum.py new file mode 100644 index 0000000000..178cd91726 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/python_script/plot_powerspectrum.py @@ -0,0 +1,87 @@ +import numpy as np +import matplotlib.pyplot as plt +import yt +import argparse +import sys + +#------------------------------------------------------------------------------------------------------------------------- +# load the command-line parameters +parser = argparse.ArgumentParser( description='Get power spectrum' ) + +parser.add_argument( '-s', action='store', required=True, type=int, dest='idx_start', + help='first data index' ) +parser.add_argument( '-e', action='store', required=True, type=int, dest='idx_end', + help='last data index' ) +parser.add_argument( '-d', action='store', required=False, type=int, dest='didx', + help='delta data index [%(default)d]', default=1 ) + +args=parser.parse_args() + +idx_start = args.idx_start +idx_end = args.idx_end +didx = args.didx + +# print command-line parameters +print( '\nCommand-line arguments:' ) +print( '-------------------------------------------------------------------' ) +for t in range( len(sys.argv) ): + print( str(sys.argv[t])) +print( '' ) +print( '-------------------------------------------------------------------\n' ) + +# plot GAMER generated power spectrum as reference +GAMER_PS = True +print('GAMER_PS='+str(GAMER_PS)+'\n') + +yt.enable_parallelism() +ts = yt.DatasetSeries( [ '../Data_%06d'%idx for idx in range(idx_start, idx_end+1, didx) ] ) + +for ds in ts.piter(): + idx = ds.parameters["DumpID"] + time = ds.parameters["Time"][0] + N = ds.parameters["NX0"][0] + BoxSize = ds.parameters["BoxSize"][0] + dh = ds.parameters["CellSize"][0] + dd = ds.covering_grid(level=0, left_edge=[0, 0, 0], dims=ds.domain_dimensions) + rho = dd["Dens"].d + rhok = np.fft.rfftn( rho ) + + kx = 2*np.pi*np.fft.fftfreq( N, dh ) + ky = 2*np.pi*np.fft.fftfreq( N, dh ) + kz = 2*np.pi*np.fft.rfftfreq( N, dh ) + + Pk3d = abs(rhok)**2 + Pk_total = np.zeros(N//2+1) + count = np.zeros(N//2+1) + + for i in range( N ): + for j in range( N ): + for k in range( N//2+1 ): + l = round((kx[i]**2+ky[j]**2+kz[k]**2)**0.5*BoxSize/(2.0*np.pi)) + if (l < N//2+1): + Pk_total[l] = Pk_total[l] + Pk3d[i,j,k] + count[l] = count[l] + 1 + Pk1d = np.divide(Pk_total, count, out=np.zeros_like(Pk_total), where=count!=0) + k3Pk = Pk1d*kz**3 + d = 2*np.pi/kz[np.argmax(k3Pk)] + print('estimated granule diameter = %13.7e, time = %13.7e\n'%(d, time)) + + if(GAMER_PS): + gamer_ps = np.loadtxt('../PowerSpec_%06d'%idx, skiprows=1, dtype=float) + gamer_k = gamer_ps[:,0] + gamer_Pk1d = gamer_ps[:,1] + gamer_k3Pk = gamer_k**3*gamer_Pk1d + + plt.figure() + plt.title("Dimensionless Power Spectrum") + plt.plot(kz[1:], k3Pk[1:]/max(k3Pk), label = 'numpy') + if(GAMER_PS): + plt.plot(gamer_k, gamer_k3Pk/max(gamer_k3Pk), label = 'gamer') + plt.xlabel('$k$') + plt.ylabel(r'$k^3P(k)$') + plt.yscale('log') + plt.legend(loc='lower right') + plt.savefig('fig_powerspectrum_dimensionless_%06d.png'%idx, dpi = 150, bbox_inches="tight") + plt.close() + + diff --git a/example/test_problem/ELBDM/UniformGranule/python_script/plot_slice.py b/example/test_problem/ELBDM/UniformGranule/python_script/plot_slice.py new file mode 100644 index 0000000000..aeb20be654 --- /dev/null +++ b/example/test_problem/ELBDM/UniformGranule/python_script/plot_slice.py @@ -0,0 +1,68 @@ +import yt +import numpy as np +import argparse +import sys + +#------------------------------------------------------------------------------------------------------------------------- +# load the command-line parameters +parser = argparse.ArgumentParser( description='Get disk properties' ) + +parser.add_argument( '-s', action='store', required=True, type=int, dest='idx_start', + help='first data index' ) +parser.add_argument( '-e', action='store', required=True, type=int, dest='idx_end', + help='last data index' ) +parser.add_argument( '-d', action='store', required=False, type=int, dest='didx', + help='delta data index [%(default)d]', default=1 ) + +args=parser.parse_args() + +idx_start = args.idx_start +idx_end = args.idx_end +didx = args.didx + +# print command-line parameters +print( '\nCommand-line arguments:' ) +print( '-------------------------------------------------------------------' ) +for t in range( len(sys.argv) ): + print( str(sys.argv[t])) +print( '' ) +print( '-------------------------------------------------------------------\n' ) + +field = 'Dens' +colormap = 'algae' +#colormap = 'magma' +dpi = 300 + +yt.enable_parallelism() +ts = yt.DatasetSeries( [ '../Data_%06d'%idx for idx in range(idx_start, idx_end+1, didx) ] ) + +for ds in ts.piter(): + dd = ds.all_data() + dens = np.array(dd["Dens"]) + avedens = np.mean(dens) + + plt = yt.SlicePlot( ds, 0, fields = field, center = 'c') + plt.set_zlim( field, avedens*1.0e-4, avedens*1.0e+1, dynamic_range=None) + plt.set_axes_unit( 'kpc' ) + plt.set_unit( field, 'Msun/kpc**3') + plt.set_cmap( field, colormap ) + plt.annotate_timestamp( time_unit='Myr', corner='upper_right', text_args={'color':'k'} ) + plt.save( mpl_kwargs={"dpi":dpi} ) + + plt = yt.SlicePlot( ds, 1, fields = field, center = 'c') + plt.set_zlim( field, avedens*1.0e-4, avedens*1.0e+1, dynamic_range=None) + plt.set_axes_unit( 'kpc' ) + plt.set_unit( field, 'Msun/kpc**3') + plt.set_cmap( field, colormap ) + plt.annotate_timestamp( time_unit='Myr', corner='upper_right', text_args={'color':'k'} ) + plt.save( mpl_kwargs={"dpi":dpi} ) + + plt = yt.SlicePlot( ds, 2, fields = field, center = 'c') + plt.set_zlim( field, avedens*1.0e-4, avedens*1.0e+1, dynamic_range=None) + plt.set_axes_unit( 'kpc' ) + plt.set_unit( field, 'Msun/kpc**3') + plt.set_cmap( field, colormap ) + plt.annotate_timestamp( time_unit='Myr', corner='upper_right', text_args={'color':'k'} ) + plt.save( mpl_kwargs={"dpi":dpi} ) + + diff --git a/include/Profile.h b/include/Profile.h index f912b5db7a..1ac7f12392 100644 --- a/include/Profile.h +++ b/include/Profile.h @@ -20,6 +20,7 @@ void Aux_Error( const char *File, const int Line, const char *Func, const char * // Allocated : Whether or not member arrays such as Radius[] have been allocated // Radius : Radial coordinate at each bin // Data : Profile data at each bin +// Data_Sigma : Standard deviation of Data at each bin // Weight : Total weighting at each bin // NCell : Number of cells at each bin // @@ -42,6 +43,7 @@ struct Profile_t double *Radius; double *Data; + double *Data_Sigma; double *Weight; long *NCell; @@ -57,12 +59,13 @@ struct Profile_t Profile_t() { - NBin = -1; - Allocated = false; - Radius = NULL; - Data = NULL; - Weight = NULL; - NCell = NULL; + NBin = -1; + Allocated = false; + Radius = NULL; + Data = NULL; + Data_Sigma = NULL; + Weight = NULL; + NCell = NULL; } // METHOD : Profile_t @@ -103,10 +106,11 @@ struct Profile_t // --> allows the same Profile_t object to be reused without the need to manually free memory first if ( Allocated ) FreeMemory(); - Radius = new double [NBin]; - Data = new double [NBin]; - Weight = new double [NBin]; - NCell = new long [NBin]; + Radius = new double [NBin]; + Data = new double [NBin]; + Data_Sigma = new double [NBin]; + Weight = new double [NBin]; + NCell = new long [NBin]; Allocated = true; @@ -127,10 +131,11 @@ struct Profile_t void FreeMemory() { - delete [] Radius; Radius = NULL; - delete [] Data; Data = NULL; - delete [] Weight; Weight = NULL; - delete [] NCell; NCell = NULL; + delete [] Radius; Radius = NULL; + delete [] Data; Data = NULL; + delete [] Data_Sigma; Data_Sigma = NULL; + delete [] Weight; Weight = NULL; + delete [] NCell; NCell = NULL; Allocated = false; diff --git a/include/Prototype.h b/include/Prototype.h index af1ba95c9b..f45ee37035 100644 --- a/include/Prototype.h +++ b/include/Prototype.h @@ -41,7 +41,7 @@ int Aux_CountRow( const char *FileName ); void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double r_max_input, const double dr_min, const bool LogBin, const double LogBinRatio, const bool RemoveEmpty, const long TVarBitIdx[], const int NProf, const int MinLv, const int MaxLv, const PatchType_t PatchType, - const double PrepTimeIn ); + const double PrepTimeIn, const bool GetSigma ); void Aux_FindExtrema( Extrema_t *Extrema, const ExtremaMode_t Mode, const int MinLv, const int MaxLv, const PatchType_t PatchType ); void Aux_FindWeightedAverageCenter( double WeightedAverageCenter[], const double Center_ref[], const double MaxR, const double MinWD, diff --git a/include/Typedef.h b/include/Typedef.h index cd91b4fda2..49dd5880f3 100644 --- a/include/Typedef.h +++ b/include/Typedef.h @@ -110,7 +110,9 @@ const TestProbID_t TESTPROB_ELBDM_PLANE_WAVE = 1010, TESTPROB_ELBDM_PERTURBATION = 1011, TESTPROB_ELBDM_HALO_MERGER = 1012, - TESTPROB_ELBDM_DISK_HEATING = 1013; + TESTPROB_ELBDM_DISK_HEATING = 1013, + TESTPROB_ELBDM_UNIFORM_GRANULE = 1014; + // program initialization options typedef int OptInit_t; diff --git a/src/Auxiliary/Aux_ComputeProfile.cpp b/src/Auxiliary/Aux_ComputeProfile.cpp index b408e6a61e..b417172a9f 100644 --- a/src/Auxiliary/Aux_ComputeProfile.cpp +++ b/src/Auxiliary/Aux_ComputeProfile.cpp @@ -11,11 +11,12 @@ extern void SetTempIntPara( const int lv, const int Sg0, const double PrepTime, // Description : Compute the average radial profile of target field(s) // // Note : 1. Results will be stored in the input "Prof" object -// --> Prof->Radius[]: Radial coordinate at each bin -// Prof->Data []: Profile data at each bin -// Prof->Weight[]: Total weighting at each bin -// Prof->NCell []: Number of cells at each bin -// Prof->NBin : Total number of bins +// --> Prof->Radius []: Radial coordinate at each bin +// Prof->Data []: Profile data at each bin +// Prof->Data_Sigma[]: Standard deviation of profile data at each bin +// Prof->Weight []: Total weighting at each bin +// Prof->NCell []: Number of cells at each bin +// Prof->NBin : Total number of bins // --> See the "Profile_t" structure defined in "include/Profile.h" for details // --> These arrays will be free'd when deleting "Prof" // 2. The exact maximum radius adopted may be slightly larger than the input "r_max" @@ -44,7 +45,7 @@ extern void SetTempIntPara( const int lv, const int Sg0, const double PrepTime, // --> Right edge of log bin n = dr_min*LogBinRatio^n // RemoveEmpty : true --> remove empty bins from the data // false --> these empty bins will still be in the profile arrays with -// Data[empty_bin]=Weight[empty_bin]=NCell[empty_bin]=0 +// Data[empty_bin]=Data_Sigma[empty_bin]=Weight[empty_bin]=NCell[empty_bin]=0 // TVarBitIdx : Bitwise indices of target variables for computing the profiles // --> Supported indices (defined in Macro.h): // HYDRO : _DENS, _MOMX, _MOMY, _MOMZ, _ENGY, _VELX, _VELY, _VELZ, _VELR, @@ -63,6 +64,8 @@ extern void SetTempIntPara( const int lv, const int Sg0, const double PrepTime, // (i.e., MinLv ~ MaxLv) and non-leaf patches only on MaxLv // PrepTimeIn : Target physical time to prepare data // --> If PrepTimeIn<0, turn off temporal interpolation and always use the most recent data +// GetSigma : true --> compute the standard deviation of profile data +// false --> set Data_Sigma=0 // // Example : const double Center[3] = { amr->BoxCenter[0], amr->BoxCenter[1], amr->BoxCenter[2] }; // const double MaxRadius = 0.5*amr->BoxSize[0]; @@ -76,12 +79,13 @@ extern void SetTempIntPara( const int lv, const int Sg0, const double PrepTime, // const int MaxLv = MAX_LEVEL; // const PatchType_t PatchType = PATCH_LEAF_PLUS_MAXNONLEAF; // const double PrepTime = -1.0; +// const bool GetSigma = false; // // Profile_t Prof_Dens, Prof_Pres; // Profile_t *Prof[] = { &Prof_Dens, &Prof_Pres }; // // Aux_ComputeProfile( Prof, Center, MaxRadius, MinBinSize, LogBin, LogBinRatio, RemoveEmptyBin, -// TVar, NProf, MinLv, MaxLv, PatchType, PrepTime ); +// TVar, NProf, MinLv, MaxLv, PatchType, PrepTime, GetSigma ); // // if ( MPI_Rank == 0 ) // { @@ -90,10 +94,10 @@ extern void SetTempIntPara( const int lv, const int Sg0, const double PrepTime, // char Filename[MAX_STRING]; // sprintf( Filename, "Profile%d.txt", p ); // FILE *File = fopen( Filename, "w" ); -// fprintf( File, "#%19s %21s %21s %10s\n", "Radius", "Data", "Weight", "Cells" ); +// fprintf( File, "#%19s %21s %21s %21s %10s\n", "Radius", "Data", "Data_Sigma", "Weight", "Cells" ); // for (int b=0; bNBin; b++) -// fprintf( File, "%20.14e %21.14e %21.14e %10ld\n", -// Prof[p]->Radius[b], Prof[p]->Data[b], Prof[p]->Weight[b], Prof[p]->NCell[b] ); +// fprintf( File, "%20.14e %21.14e %21.14e %21.14e %10ld\n", +// Prof[p]->Radius[b], Prof[p]->Data[b], Prof[p]->Data_Sigma[b], Prof[p]->Weight[b], Prof[p]->NCell[b] ); // fclose( File ); // } // } @@ -103,7 +107,7 @@ extern void SetTempIntPara( const int lv, const int Sg0, const double PrepTime, void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double r_max_input, const double dr_min, const bool LogBin, const double LogBinRatio, const bool RemoveEmpty, const long TVarBitIdx[], const int NProf, const int MinLv, const int MaxLv, const PatchType_t PatchType, - const double PrepTimeIn ) + const double PrepTimeIn, const bool GetSigma ) { // check @@ -213,21 +217,23 @@ void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double const int NT = 1; # endif - double ***OMP_Data=NULL, ***OMP_Weight=NULL; + double ***OMP_Data=NULL, ***OMP_Data_Sigma=NULL, ***OMP_Weight=NULL; long ***OMP_NCell=NULL; - Aux_AllocateArray3D( OMP_Data, NProf, NT, Prof[0]->NBin ); - Aux_AllocateArray3D( OMP_Weight, NProf, NT, Prof[0]->NBin ); - Aux_AllocateArray3D( OMP_NCell, NProf, NT, Prof[0]->NBin ); + Aux_AllocateArray3D( OMP_Data, NProf, NT, Prof[0]->NBin ); + Aux_AllocateArray3D( OMP_Data_Sigma, NProf, NT, Prof[0]->NBin ); + Aux_AllocateArray3D( OMP_Weight, NProf, NT, Prof[0]->NBin ); + Aux_AllocateArray3D( OMP_NCell, NProf, NT, Prof[0]->NBin ); // initialize profile arrays for (int p=0; pNBin; b++) { - OMP_Data [p][t][b] = 0.0; - OMP_Weight[p][t][b] = 0.0; - OMP_NCell [p][t][b] = 0; + OMP_Data [p][t][b] = 0.0; + OMP_Data_Sigma[p][t][b] = 0.0; + OMP_Weight [p][t][b] = 0.0; + OMP_NCell [p][t][b] = 0; } real (*Patch_Data)[8][PS1][PS1][PS1] = new real [NT][8][PS1][PS1][PS1]; // field data of each cell @@ -538,6 +544,9 @@ void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double OMP_Data [p][TID][bin] += Patch_Data[TID][LocalID][k][j][i]*Weight; OMP_Weight[p][TID][bin] += Weight; OMP_NCell [p][TID][bin] ++; + if ( GetSigma ) + OMP_Data_Sigma[p][TID][bin] += SQR( Patch_Data[TID][LocalID][k][j][i] )*Weight; + } // i,j,k } // for (int LocalID=0; LocalID<8; LocalID++) } // for (int p=0; pNBin; b++) { - Prof[p]->Data [b] = OMP_Data [p][0][b]; - Prof[p]->Weight[b] = OMP_Weight[p][0][b]; - Prof[p]->NCell [b] = OMP_NCell [p][0][b]; + Prof[p]->Data [b] = OMP_Data [p][0][b]; + Prof[p]->Weight[b] = OMP_Weight[p][0][b]; + Prof[p]->NCell [b] = OMP_NCell [p][0][b]; + if ( GetSigma ) + Prof[p]->Data_Sigma[b] = OMP_Data_Sigma[p][0][b]; + } for (int t=1; tData [b] += OMP_Data [p][t][b]; Prof[p]->Weight[b] += OMP_Weight[p][t][b]; Prof[p]->NCell [b] += OMP_NCell [p][t][b]; + if ( GetSigma ) + Prof[p]->Data_Sigma[b] += OMP_Data_Sigma[p][t][b]; } } // free per-thread arrays Aux_DeallocateArray3D( OMP_Data ); + Aux_DeallocateArray3D( OMP_Data_Sigma ); Aux_DeallocateArray3D( OMP_Weight ); Aux_DeallocateArray3D( OMP_NCell ); @@ -593,16 +608,21 @@ void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double { if ( MPI_Rank == 0 ) { - MPI_Reduce( MPI_IN_PLACE, Prof[p]->Data, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); - MPI_Reduce( MPI_IN_PLACE, Prof[p]->Weight, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); - MPI_Reduce( MPI_IN_PLACE, Prof[p]->NCell , Prof[p]->NBin, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( MPI_IN_PLACE, Prof[p]->Data, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( MPI_IN_PLACE, Prof[p]->Weight, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( MPI_IN_PLACE, Prof[p]->NCell, Prof[p]->NBin, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); + if ( GetSigma ) + MPI_Reduce( MPI_IN_PLACE, Prof[p]->Data_Sigma, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + } else { - MPI_Reduce( Prof[p]->Data, NULL, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); - MPI_Reduce( Prof[p]->Weight, NULL, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); - MPI_Reduce( Prof[p]->NCell, NULL, Prof[p]->NBin, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( Prof[p]->Data, NULL, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( Prof[p]->Weight, NULL, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( Prof[p]->NCell, NULL, Prof[p]->NBin, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); + if ( GetSigma ) + MPI_Reduce( Prof[p]->Data_Sigma, NULL, Prof[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); } } # endif @@ -615,7 +635,18 @@ void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double for (int b=0; bNBin; b++) { // skip empty bins since both their data and weight are zero - if ( Prof[p]->NCell[b] > 0L ) Prof[p]->Data[b] /= Prof[p]->Weight[b]; + if ( Prof[p]->NCell[b] > 0L ) + { + Prof[p]->Data [b] /= Prof[p]->Weight[b]; + Prof[p]->Data_Sigma[b] = ( GetSigma ) + ? Prof[p]->Data_Sigma[b]/Prof[p]->Weight[b] - SQR( Prof[p]->Data[b] ) + : 0.0; + + if ( Prof[p]->Data_Sigma[b] < 0.0 ) + Aux_Error( ERROR_INFO, "Prof[%d]->Data_Sigma[%d] = %14.7e < 0.0 !!\n", p, b, Prof[p]->Data_Sigma[b] ); + else + Prof[p]->Data_Sigma[b] = sqrt( Prof[p]->Data_Sigma[b] ); + } } } @@ -623,9 +654,10 @@ void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double // broadcast data to all ranks for (int p=0; pData, Prof[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); - MPI_Bcast( Prof[p]->Weight, Prof[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); - MPI_Bcast( Prof[p]->NCell, Prof[p]->NBin, MPI_LONG, 0, MPI_COMM_WORLD ); + MPI_Bcast( Prof[p]->Data, Prof[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); + MPI_Bcast( Prof[p]->Data_Sigma, Prof[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); + MPI_Bcast( Prof[p]->Weight, Prof[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); + MPI_Bcast( Prof[p]->NCell, Prof[p]->NBin, MPI_LONG, 0, MPI_COMM_WORLD ); } @@ -650,10 +682,11 @@ void Aux_ComputeProfile( Profile_t *Prof[], const double Center[], const double for (int p=0; pRadius[b_up_ms] = Prof[p]->Radius[b_up]; - Prof[p]->Data [b_up_ms] = Prof[p]->Data [b_up]; - Prof[p]->Weight[b_up_ms] = Prof[p]->Weight[b_up]; - Prof[p]->NCell [b_up_ms] = Prof[p]->NCell [b_up]; + Prof[p]->Radius [b_up_ms] = Prof[p]->Radius [b_up]; + Prof[p]->Data [b_up_ms] = Prof[p]->Data [b_up]; + Prof[p]->Data_Sigma[b_up_ms] = Prof[p]->Data_Sigma[b_up]; + Prof[p]->Weight [b_up_ms] = Prof[p]->Weight [b_up]; + Prof[p]->NCell [b_up_ms] = Prof[p]->NCell [b_up]; } } diff --git a/src/Init/Init_TestProb.cpp b/src/Init/Init_TestProb.cpp index 819bc5cd4a..5cc08863bb 100644 --- a/src/Init/Init_TestProb.cpp +++ b/src/Init/Init_TestProb.cpp @@ -45,6 +45,7 @@ void Init_TestProb_ELBDM_PlaneWave(); void Init_TestProb_ELBDM_Perturbation(); void Init_TestProb_ELBDM_HaloMerger(); void Init_TestProb_ELBDM_DiskHeating(); +void Init_TestProb_ELBDM_UniformGranule(); @@ -111,6 +112,7 @@ void Init_TestProb() case TESTPROB_ELBDM_PERTURBATION : Init_TestProb_ELBDM_Perturbation(); break; case TESTPROB_ELBDM_HALO_MERGER : Init_TestProb_ELBDM_HaloMerger(); break; case TESTPROB_ELBDM_DISK_HEATING : Init_TestProb_ELBDM_DiskHeating(); break; + case TESTPROB_ELBDM_UNIFORM_GRANULE : Init_TestProb_ELBDM_UniformGranule(); break; default: Aux_Error( ERROR_INFO, "unsupported TESTPROB_ID (%d) !!\n", TESTPROB_ID ); } // switch( TESTPROB_ID ) diff --git a/src/SourceTerms/Deleptonization/CPU_Src_Deleptonization.cpp b/src/SourceTerms/Deleptonization/CPU_Src_Deleptonization.cpp index 0f6289667d..a941dc34b9 100644 --- a/src/SourceTerms/Deleptonization/CPU_Src_Deleptonization.cpp +++ b/src/SourceTerms/Deleptonization/CPU_Src_Deleptonization.cpp @@ -188,7 +188,7 @@ void Src_WorkBeforeMajorFunc_Deleptonization( const int lv, const double TimeNew for (int v=0; v prof_init[0]->Radius[bin_index] ) + { + int bin_index_right = bin_index+1; + delta_r = prof_init[0]->Radius[bin_index_right] - prof_init[0]->Radius[bin_index]; + x = (r - prof_init[0]->Radius[bin_index]) / delta_r; +// check x + if (x<(real)(0.0)) + Aux_Error( ERROR_INFO, "x (%14.7e) < 0.0 !! index = %d ; r = %14.7e ; left-hand point = %14.7e ; right-hand point = %14.7e \n", x , bin_index, r, prof_init[0]->Radius[bin_index], prof_init[0]->Radius[bin_index_right] ); + else if (x>(real)(1.0)) + Aux_Error( ERROR_INFO, "x (%14.7e) > 1.0 !! index = %d ; r = %14.7e ; left-hand point = %14.7e ; right-hand point = %14.7e \n", x , bin_index, r, prof_init[0]->Radius[bin_index], prof_init[0]->Radius[bin_index_right] ); +// interpolate + for (int i=0; iData [bin_index]*(real)(1.-x) + prof_init[i]->Data [bin_index_right]*(real)x; + std_inter[i] = prof_init[i]->Data_Sigma[bin_index]*(real)(1.-x) + prof_init[i]->Data_Sigma[bin_index_right]*(real)x; + } + } + else + { +// no left hand side bin, no interpolation + if (bin_index==0) + { + for (int i=0; iData [bin_index]; + std_inter[i] = prof_init[i]->Data_Sigma[bin_index]; + } + } + else + { + int bin_index_left = bin_index-1; + delta_r = prof_init[0]->Radius[bin_index] - prof_init[0]->Radius[bin_index_left]; + x = (r - prof_init[0]->Radius[bin_index_left]) / delta_r; +// check x + if (x<(real)(0.0)) + Aux_Error( ERROR_INFO, "x (%14.7e) < 0.0 !! index = %d ; r = %14.7e ; left-hand point = %14.7e ; right-hand point = %14.7e \n", x , bin_index, r, prof_init[0]->Radius[bin_index_left], prof_init[0]->Radius[bin_index] ); + else if (x>(real)(1.0)) + Aux_Error( ERROR_INFO, "x (%14.7e) > 1.0 !! index = %d ; r = %14.7e ; left-hand point = %14.7e ; right-hand point = %14.7e \n", x , bin_index, r, prof_init[0]->Radius[bin_index_left], prof_init[0]->Radius[bin_index] ); +// interpolate + for (int i=0; iData [bin_index_left]*(real)(1.-x) + prof_init[i]->Data [bin_index]*(real)x; + std_inter[i] = prof_init[i]->Data_Sigma[bin_index_left]*(real)(1.-x) + prof_init[i]->Data_Sigma[bin_index]*(real)x; + } + } + } +} + + +//------------------------------------------------------------------------------------------------------- +// Function : Aux_ComputeCorrelation +// Description : Compute the average radial correlation profile of target field(s) +// +// Note : 1. Results will be stored in the input "Correlation" object +// --> Correlation->Radius[]: Radial coordinate at each bin +// Correlation->Data []: Correlation profile data at each bin +// Correlation->Weight[]: Total weighting at each bin +// Correlation->NCell []: Number of cells at each bin +// Correlation->NBin : Total number of bins +// --> See the "Profile_t" structure defined in "include/Profile.h" for details +// --> These arrays will be free'd when deleting "Correlation" +// 2. Maximum radius adopted when actually computing the profile may be larger than the input "r_max" +// --> Because "r_max" in general does not coincide with the right edge of the maximum bin +// 3. Support hybrid OpenMP/MPI parallelization +// --> All ranks will share the same profile data after invoking this function +// 4. Use cell volume as the weighting of each cell +// --> Will support other weighting functions in the future +// 5. Currently only support computing _DENS field +// --> If multiple fields are supported in the future, the order of fields to be returned follows TVarBitIdx[] +// +// Parameter : Correlation : Profile_t object array to store the correlation function +// prof_init : Profile_t object array for storing the mean and standard deviation quantities for calculating correlation function +// Center : Target center coordinates +// r_max_input : Maximum radius for computing the profile +// --> See also "Note-2" above +// dr_min : Minimum bin size +// --> For linear bin, this is the size of all bins +// For log bin, this is the size of the 0th bin +// LogBin : true/false --> log/linear bins +// LogBinRatio : Ratio of adjacent log bins +// --> Right edge of log bin n = dr_min*LogBinRatio^n +// RemoveEmpty : true --> remove empty bins from the data +// false --> these empty bins will still be in the profile arrays with +// Data[empty_bin]=Weight[empty_bin]=NCell[empty_bin]=0 +// TVarBitIdx : Bitwise indices of target variables for computing the profiles +// --> Supported indices (defined in Macro.h): +// ELBDM : _DENS +// --> For a passive scalar with an integer field index FieldIdx returned by AddField(), +// one can convert it to a bitwise field index by BIDX(FieldIdx) +// NProf : Number of Profile_t objects in Correlation +// Min/MaxLv : Consider patches on levels from MinLv to MaxLv +// PatchType : Only consider patches of the specified type +// --> Supported types: PATCH_LEAF, PATCH_NONLEAF, PATCH_BOTH, PATCH_LEAF_PLUS_MAXNONLEAF +// --> PATCH_LEAF_PLUS_MAXNONLEAF includes leaf patches on all target levels +// (i.e., MinLv ~ MaxLv) and non-leaf patches only on MaxLv +// PrepTime : Target physical time to prepare data +// --> If PrepTime<0, turn off temporal interpolation and always use the most recent data +// +// Example : const double Center[3] = { amr->BoxCenter[0], amr->BoxCenter[1], amr->BoxCenter[2] }; +// const double MaxRadius = 0.5*amr->BoxSize[0]; +// const double MinBinSize = amr->dh[MAX_LEVEL]; +// const bool LogBin = true; +// const double LogBinRatio = 1.25; +// const bool RemoveEmptyBin = true; +// const long TVarBitIdx[] = { _DENS }; +// const int NProf = 1; +// const int MinLv = 0; +// const int MaxLv = MAX_LEVEL; +// const PatchType_t PatchType = PATCH_LEAF_PLUS_MAXNONLEAF; +// const double PrepTime = -1.0; +// +// Profile_t Correlation_Dens; +// Profile_t *Correlation[] = { &Correlation_Dens }; +// +// Aux_ComputeCorrelation( Correlation, Center, MaxRadius, MinBinSize, LogBin, LogBinRatio, RemoveEmptyBin, +// TVarBitIdx, NProf, MinLv, MaxLv, PatchType, PrepTime ); +// +// if ( MPI_Rank == 0 ) +// { +// for (int p=0; pNBin; b++) +// fprintf( File, "%20.14e %21.14e %21.14e %10ld\n", +// Correlation[p]->Radius[b], Correlation[p]->Data[b], Correlation[p]->Weight[b], Correlation[p]->NCell[b] ); +// fclose( File ); +// } +// } +// +// Return : Correlation +//------------------------------------------------------------------------------------------------------- +void Aux_ComputeCorrelation( Profile_t *Correlation[], const Profile_t *prof_init[], const double Center[], + const double r_max_input, const double dr_min, const bool LogBin, const double LogBinRatio, + const bool RemoveEmpty, const long TVarBitIdx[], const int NProf, const int MinLv, const int MaxLv, + const PatchType_t PatchType, const double PrepTime, const double dr_min_prof) +{ + +// check +# ifdef GAMER_DEBUG + if ( r_max_input <= 0.0 ) + Aux_Error( ERROR_INFO, "r_max_input (%14.7e) <= 0.0 !!\n", r_max_input ); + + if ( dr_min <= 0.0 ) + Aux_Error( ERROR_INFO, "dr_min (%14.7e) <= 0.0 !!\n", dr_min ); + + if ( LogBin && LogBinRatio <= 1.0 ) + Aux_Error( ERROR_INFO, "LogBinRatio (%14.7e) <= 1.0 !!\n", LogBinRatio ); + + if ( MinLv < 0 || MinLv > TOP_LEVEL ) + Aux_Error( ERROR_INFO, "incorrect MinLv (%d) !!\n", MinLv ); + + if ( MaxLv < 0 || MaxLv > TOP_LEVEL ) + Aux_Error( ERROR_INFO, "incorrect MaxLv (%d) !!\n", MaxLv ); + + if ( MinLv > MaxLv ) + Aux_Error( ERROR_INFO, "MinLv (%d) > MaxLv (%d) !!\n", MinLv, MaxLv ); + + if ( PatchType != PATCH_LEAF && PatchType != PATCH_NONLEAF && + PatchType != PATCH_BOTH && PatchType != PATCH_LEAF_PLUS_MAXNONLEAF ) + Aux_Error( ERROR_INFO, "incorrect PatchType (%d) !!\n", PatchType ); +# endif + if ( NProf != NCOMP_PASSIVE ) + Aux_Error( ERROR_INFO, "NProf(%d) != NCOMP_PASSIVE(%d) !! Currently only support NProf = NCOMP_PASSIVE for computing correlation !!\n", NProf, NCOMP_PASSIVE ); + if ( TVarBitIdx[0] != _DENS ) + Aux_Error( ERROR_INFO, "TVarBitIdx[0](%ld) != _DENS(%ld) !! Currently only support TVarBitIdx[0] = _DENS for computing correlation !!\n", TVarBitIdx[0], _DENS ); +# if ( MODEL == HYDRO ) + Aux_Error( ERROR_INFO, "Does not support HDRDO for computing correlation function yet!!\n" ); +# endif + + +// precompute the integer indices of intrinsic fluid fields for better performance + const int IdxUndef = -1; + int TFluIntIdx[NProf]; + + for (int p=0; puse_wave_flag[lv] ) + Aux_Error( ERROR_INFO, "Retrieving PHAS and STUB to compute profile in hybrid scheme is not supported !!\n" ); +# endif // # if ( MODEL == ELBDM && ELBDM_SCHEME == ELBDM_HYBRID) + +// initialize the profile objects + for (int p=0; pNBin = int( log(r_max_input/dr_min)/log(LogBinRatio) ) + 1; +// will be greater than r_max_input if Correlation[p]->NBin = int( log(r_max_input/dr_min)/log(LogBinRatio) ) + 2; + Correlation[p]->NBin = int( log(r_max_input/dr_min)/log(LogBinRatio) ) + 1; + Correlation[p]->MaxRadius = dr_min*pow( LogBinRatio, Correlation[p]->NBin-1 ); + } + + else // linear bin + { + Correlation[p]->NBin = (int)ceil( r_max_input / dr_min ); + Correlation[p]->MaxRadius = dr_min*Correlation[p]->NBin; + } + + if ( Correlation[p]->MaxRadius > prof_init[p]->MaxRadius ) Aux_Error( ERROR_INFO, "Correlation[%d]->MaxRadius ( %14.7e ) > Profile[%d]->MaxRadius ( %14.7e ) !!\n", p, Correlation[p]->MaxRadius, p, prof_init[p]->MaxRadius ); + + +// record profile parameters + for (int d=0; d<3; d++) Correlation[p]->Center[d] = Center[d]; + + Correlation[p]->LogBin = LogBin; + + if ( LogBin ) Correlation[p]->LogBinRatio = LogBinRatio; + + +// allocate all member arrays of Correlation + Correlation[p]->AllocateMemory(); + + +// record radial coordinates + if ( LogBin ) + for (int b=0; bNBin; b++) Correlation[p]->Radius[b] = dr_min*pow( LogBinRatio, b-0.5 ); + else + for (int b=0; bNBin; b++) Correlation[p]->Radius[b] = (b+0.5)*dr_min; + + } // for (int p=0; pNBin ); + Aux_AllocateArray3D( OMP_Weight, NProf, NT, Correlation[0]->NBin ); + Aux_AllocateArray3D( OMP_NCell, NProf, NT, Correlation[0]->NBin ); + + +// collect profile data in this rank + const double r_max2 = SQR( Correlation[0]->MaxRadius ); + const double HalfBox[3] = { 0.5*amr->BoxSize[0], 0.5*amr->BoxSize[1], 0.5*amr->BoxSize[2] }; + const bool Periodic[3] = { OPT__BC_FLU[0] == BC_FLU_PERIODIC, + OPT__BC_FLU[2] == BC_FLU_PERIODIC, + OPT__BC_FLU[4] == BC_FLU_PERIODIC }; + +# pragma omp parallel + { +# ifdef OPENMP + const int TID = omp_get_thread_num(); +# else + const int TID = 0; +# endif + +// initialize arrays + for (int p=0; pNBin; b++) + { + OMP_Data [p][TID][b] = 0.0; + OMP_Weight[p][TID][b] = 0.0; + OMP_NCell [p][TID][b] = 0; + } + +// allocate passive scalar arrays + real *Passive = new real [NCOMP_PASSIVE]; + +// loop over all target levels + for (int lv=MinLv; lv<=MaxLv; lv++) + { + const double dh = amr->dh[lv]; + const double dv = CUBE( dh ); + + +// determine temporal interpolation parameters + bool FluIntTime = false; + int FluSg = amr->FluSg[lv]; + int FluSg_IntT; + real FluWeighting, FluWeighting_IntT; + + if ( PrepTime >= 0.0 ) + { +// fluid + const int FluSg0 = amr->FluSg[lv]; + SetTempIntPara( lv, FluSg0, PrepTime, amr->FluSgTime[lv][FluSg0], amr->FluSgTime[lv][1-FluSg0], + FluIntTime, FluSg, FluSg_IntT, FluWeighting, FluWeighting_IntT ); + } + + +// use the "static" schedule for reproducibility +# pragma omp for schedule( static ) + for (int PID=0; PIDNPatchComma[lv][1]; PID++) + { +// skip untargeted patches + if ( amr->patch[0][lv][PID]->son != -1 ) + { + if ( PatchType == PATCH_LEAF ) continue; + if ( PatchType == PATCH_LEAF_PLUS_MAXNONLEAF && lv != MaxLv ) continue; + } + + else + { + if ( PatchType == PATCH_NONLEAF ) continue; + } + + const real (*FluidPtr)[PS1][PS1][PS1] = amr->patch[ FluSg ][lv][PID]->fluid; + +// pointer for temporal interpolation + const real (*FluidPtr_IntT)[PS1][PS1][PS1] = ( FluIntTime ) ? amr->patch[ FluSg_IntT ][lv][PID]->fluid : NULL; + const double x0 = amr->patch[0][lv][PID]->EdgeL[0] + 0.5*dh - Center[0]; + const double y0 = amr->patch[0][lv][PID]->EdgeL[1] + 0.5*dh - Center[1]; + const double z0 = amr->patch[0][lv][PID]->EdgeL[2] + 0.5*dh - Center[2]; + + for (int k=0; k +HalfBox[2] ) { dz -= amr->BoxSize[2]; } + else if ( dz < -HalfBox[2] ) { dz += amr->BoxSize[2]; } + } + for (int j=0; j +HalfBox[1] ) { dy -= amr->BoxSize[1]; } + else if ( dy < -HalfBox[1] ) { dy += amr->BoxSize[1]; } + } + for (int i=0; i +HalfBox[0] ) { dx -= amr->BoxSize[0]; } + else if ( dx < -HalfBox[0] ) { dx += amr->BoxSize[0]; } + } + + const double r2 = SQR(dx) + SQR(dy) + SQR(dz); + + if ( r2 < r_max2 ) + { + const double r = sqrt( r2 ); + const int bin = ( LogBin ) ? ( (r= Correlation[0]->NBin ) continue; + +// check +# ifdef GAMER_DEBUG + if ( bin < 0 ) Aux_Error( ERROR_INFO, "bin (%d) < 0 !!\n", bin ); +# endif + +// interpolate to get mean value at r + real mean_value[NProf], std_value[NProf]; +// ****find corresponding bin index in density profile, which always uses linear bin!!***** + const int bin_prof = int (r/dr_min_prof); + InterpolateMeanAndStd( mean_value, std_value, prof_init, NProf, bin_prof, r ); + +// prepare passive scalars (for better sustainability, always do it even when unnecessary) + for (int v_out=0; v_out(real)0.) + OMP_Data[p][TID][bin] += delta*delta_passive/std_value[p]/std_value[p]*Weight; + else + OMP_Data[p][TID][bin] += delta*delta_passive/mean_value[p]/mean_value[p]*Weight; + + OMP_Weight[p][TID][bin] += Weight; + OMP_NCell [p][TID][bin] ++; + } + } // for (int p=0; pNPatchComma[lv][1]; PID++) + } // for (int lv=MinLv; lv<=MaxLv; lv++) + + delete [] Passive; + Passive = NULL; + + } // OpenMP parallel region + + +// sum over all OpenMP threads + for (int p=0; pNBin; b++) + { + Correlation[p]->Data [b] = OMP_Data [p][0][b]; + Correlation[p]->Weight[b] = OMP_Weight[p][0][b]; + Correlation[p]->NCell [b] = OMP_NCell [p][0][b]; + } + + for (int t=1; tNBin; b++) + { + Correlation[p]->Data [b] += OMP_Data [p][t][b]; + Correlation[p]->Weight[b] += OMP_Weight[p][t][b]; + Correlation[p]->NCell [b] += OMP_NCell [p][t][b]; + } + } + +// free per-thread arrays + Aux_DeallocateArray3D( OMP_Data ); + Aux_DeallocateArray3D( OMP_Weight ); + Aux_DeallocateArray3D( OMP_NCell ); + + +// collect data from all ranks (in-place reduction) +# ifndef SERIAL + for (int p=0; pData, Correlation[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( MPI_IN_PLACE, Correlation[p]->Weight, Correlation[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( MPI_IN_PLACE, Correlation[p]->NCell , Correlation[p]->NBin, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); + } + + else + { + MPI_Reduce( Correlation[p]->Data, NULL, Correlation[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( Correlation[p]->Weight, NULL, Correlation[p]->NBin, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Reduce( Correlation[p]->NCell, NULL, Correlation[p]->NBin, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD ); + } + } +# endif + + +// compute profile by the root rank + if ( MPI_Rank == 0 ) + { + for (int p=0; pNBin; b++) + { +// skip empty bins since both their data and weight are zero + if ( Correlation[p]->NCell[b] > 0L ) Correlation[p]->Data[b] /= Correlation[p]->Weight[b]; + } + } + + +// broadcast data to all ranks + for (int p=0; pData, Correlation[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); + MPI_Bcast( Correlation[p]->Weight, Correlation[p]->NBin, MPI_DOUBLE, 0, MPI_COMM_WORLD ); + MPI_Bcast( Correlation[p]->NCell, Correlation[p]->NBin, MPI_LONG, 0, MPI_COMM_WORLD ); + } + + +// remove the empty bins +// --> all ranks do the same work so that no data broadcast is required + if ( RemoveEmpty ) + { + for (int b=0; bNBin; b++) + { + if ( Correlation[0]->NCell[b] != 0L ) continue; + +// remove consecutive empty bins at the same time for better performance + int b_up; + for (b_up=b+1; b_upNBin; b_up++) + if ( Correlation[0]->NCell[b_up] != 0L ) break; + + const int stride = b_up - b; + + for (b_up=b+stride; b_upNBin; b_up++) + { + const int b_up_ms = b_up - stride; + + for (int p=0; pRadius[b_up_ms] = Correlation[p]->Radius[b_up]; + Correlation[p]->Data [b_up_ms] = Correlation[p]->Data [b_up]; + Correlation[p]->Weight[b_up_ms] = Correlation[p]->Weight[b_up]; + Correlation[p]->NCell [b_up_ms] = Correlation[p]->NCell [b_up]; + } + } + +// reset the total number of bins + for (int p=0; pNBin -= stride; + } // for (int b=0; bNBin; b++) + +// update the maximum radius since the last bin may have been removed + for (int p=0; pNBin-1; + + Correlation[p]->MaxRadius = ( LogBin ) ? Correlation[p]->Radius[LastBin]*sqrt( LogBinRatio ) + : Correlation[p]->Radius[LastBin] + 0.5*dr_min; + } + } // if ( RemoveEmpty ) + +} // FUNCTION : Aux_ComputeCorrelation diff --git a/src/TestProblem/ELBDM/UniformGranule/Init_TestProb_ELBDM_UniformGranule.cpp b/src/TestProblem/ELBDM/UniformGranule/Init_TestProb_ELBDM_UniformGranule.cpp new file mode 100644 index 0000000000..7f6a2ec379 --- /dev/null +++ b/src/TestProblem/ELBDM/UniformGranule/Init_TestProb_ELBDM_UniformGranule.cpp @@ -0,0 +1,424 @@ +#include "GAMER.h" + +static void AddNewField_ELBDM_UniformGranule( void ); +static void Init_User_ELBDM_UniformGranule( void ); +static void Do_CF( void ); +static void End_UniformGranule( void ); + +void Aux_ComputeCorrelation( Profile_t *Correlation[], const Profile_t *prof_init[], const double Center[], + const double r_max_input, const double dr_min, const bool LogBin, const double LogBinRatio, + const bool RemoveEmpty, const long TVarBitIdx[], const int NProf, const int MinLv, const int MaxLv, + const PatchType_t PatchType, const double PrepTime, const double dr_min_prof ); + +// problem-specific global variables +// ======================================================================================= +static FieldIdx_t Idx_Dens0 = Idx_Undefined; // field index for storing the **initial** density +static bool ComputeCorrelation; // flag for computing correlation +static bool ReComputeCorrelation; // flag for recomputing correlation during restart; use the simulation time of RESTART as the initial time for computing time correlation; only available for RESTART +static char FilePath_corr[MAX_STRING]; // folder for storing correlation function text files +static int MinLv_corr; // compute correlation function from AMR levels MinLv to MaxLv +static int MaxLv_corr; // compute correlation function from AMR levels MinLv to MaxLv +static int StepInitial_corr; // inital step for recording correlation function +static int StepInterval_corr; // interval for recording correlation function +static int StepEnd_corr; // end step for recording correlation function +static double Center_corr[3]; // center for computing correlation function (currently set to the center of mass) +static double RadiusMax_prof; // maximum radius for correlation function statistics (profile) +static double dr_min_prof; // bin size of correlation function statistics (minimum size for logarithmic bin) (profile) +static bool LogBin_prof; // logarithmic bin or not (profile) +static double LogBinRatio_prof; // ratio of adjacent log bins for logarithmic bin (profile) +static bool RemoveEmpty_prof; // remove bins without any samples; false: Data[empty_bin]=Weight[empty_bin]=NCell[empty_bin]=0 (profile) +static double RadiusMax_corr; // maximum radius for correlation function statistics (correlation) +static double dr_min_corr; // bin size of correlation function statistics (minimum size for logarithmic bin) (correlation) +static bool LogBin_corr; // logarithmic bin or not (correlation) +static double LogBinRatio_corr; // ratio of adjacent log bins for logarithmic bin (correlation) +static bool RemoveEmpty_corr; // remove bins without any samples; false: Data[empty_bin]=Weight[empty_bin]=NCell[empty_bin]=0 (correlation) + +static Profile_t *Prof_Dens_initial = new Profile_t(); // pointer to save initial density profile +static Profile_t *Correlation_Dens = new Profile_t(); // pointer to save density correlation function +// ======================================================================================= + +//------------------------------------------------------------------------------------------------------- +// Function : Validate +// Description : Validate the compilation flags and runtime parameters for this test problem +// +// Note : None +// +// Parameter : None +// +// Return : None +//------------------------------------------------------------------------------------------------------- +void Validate() +{ + + if ( MPI_Rank == 0 ) Aux_Message( stdout, " Validating test problem %d ...\n", TESTPROB_ID ); + +// errors +# if ( MODEL != ELBDM ) + Aux_Error( ERROR_INFO, "MODEL != ELBDM !!\n" ); +# endif + +# ifndef GRAVITY + Aux_Error( ERROR_INFO, "GRAVITY must be enabled !!\n" ); +# endif + +# ifdef COMOVING + Aux_Error( ERROR_INFO, "COMOVING must be disabled !!\n" ); +# endif + +# if ( NCOMP_PASSIVE_USER != 1 ) + Aux_Error( ERROR_INFO, "must set NCOMP_PASSIVE_USER to 1 !!\n" ); +# endif + +// only accept OPT__INIT == INIT_BY_RESTART or OPT__INIT == INIT_BY_FILE + if ( OPT__INIT != INIT_BY_RESTART && OPT__INIT != INIT_BY_FILE ) + Aux_Error( ERROR_INFO, "only support OPT__INIT == INIT_BY_RESTART or OPT__INIT == INIT_BY_FILE !!\n" ); + + if ( MPI_Rank == 0 ) Aux_Message( stdout, " Validating test problem %d ... done\n", TESTPROB_ID ); + +// check whether fluid boundary condition in Input__Parameter is set properly + for ( int direction = 0; direction < 6; direction++ ) + { + if ( OPT__BC_FLU[direction] != BC_FLU_PERIODIC ) + Aux_Error( ERROR_INFO, "must set periodic BC for fluid --> reset OPT__BC_FLU[%d] to 1 !!\n", direction ); + } + if ( OPT__BC_POT != BC_POT_PERIODIC ) + Aux_Error( ERROR_INFO, "must set periodic BC for gravity --> reset OPT__BC_POT to 1 !!\n" ); + +} // FUNCTION : Validate + + + +#if ( MODEL == ELBDM && defined GRAVITY ) +//------------------------------------------------------------------------------------------------------- +// Function : SetParameter +// Description : Load and set the problem-specific runtime parameters +// +// Note : 1. Filename is set to "Input__TestProb" by default +// 2. Major tasks in this function: +// (1) load the problem-specific runtime parameters +// (2) set the problem-specific derived parameters +// (3) reset other general-purpose parameters if necessary +// (4) make a note of the problem-specific parameters +// 3. Must NOT call any EoS routine here since it hasn't been initialized at this point +// +// Parameter : None +// +// Return : None +//------------------------------------------------------------------------------------------------------- +void SetParameter() +{ + + if ( MPI_Rank == 0 ) Aux_Message( stdout, " Setting runtime parameters ...\n" ); + + +// (1) load the problem-specific runtime parameters + const char FileName[] = "Input__TestProb"; + ReadPara_t *ReadPara = new ReadPara_t; + +// (1-1) add parameters in the following format: +// --> note that VARIABLE, DEFAULT, MIN, and MAX must have the same data type +// --> some handy constants (e.g., Useless_bool, Eps_double, NoMin_int, ...) are defined in "include/ReadPara.h" +// ******************************************************************************************************************************** +// ReadPara->Add( "KEY_IN_THE_FILE", &VARIABLE, DEFAULT, MIN, MAX ); +// ******************************************************************************************************************************** + ReadPara->Add( "ComputeCorrelation", &ComputeCorrelation, false, Useless_bool, Useless_bool ); + ReadPara->Add( "ReComputeCorrelation", &ReComputeCorrelation, false, Useless_bool, Useless_bool ); + ReadPara->Add( "FilePath_corr", FilePath_corr, Useless_str, Useless_str, Useless_str ); + ReadPara->Add( "MinLv_corr", &MinLv_corr, 0, 0, MAX_LEVEL ); + ReadPara->Add( "MaxLv_corr", &MaxLv_corr, MAX_LEVEL, 0, MAX_LEVEL ); + ReadPara->Add( "StepInitial_corr", &StepInitial_corr, 0, 0, NoMax_int ); + ReadPara->Add( "StepInterval_corr", &StepInterval_corr, 1, 1, NoMax_int ); + ReadPara->Add( "StepEnd_corr", &StepEnd_corr, NoMax_int, 0, NoMax_int ); + ReadPara->Add( "RadiusMax_corr", &RadiusMax_corr, Eps_double, NoMin_double, NoMax_double ); + ReadPara->Add( "dr_min_corr", &dr_min_corr, Eps_double, NoMin_double, NoMax_double ); + ReadPara->Add( "LogBin_corr", &LogBin_corr, false, Useless_bool, Useless_bool ); + ReadPara->Add( "LogBinRatio_corr", &LogBinRatio_corr, 1.0, NoMin_double, NoMax_double ); + ReadPara->Add( "RemoveEmpty_corr", &RemoveEmpty_corr, false, Useless_bool, Useless_bool ); + ReadPara->Add( "dr_min_prof", &dr_min_prof, Eps_double, NoMin_double, NoMax_double ); + + ReadPara->Read( FileName ); + + delete ReadPara; + +// (1-2) set the default values + if (ComputeCorrelation) + { + if ( !OPT__RECORD_USER ) + Aux_Error( ERROR_INFO, "OPT__RECORD_USER should be turned on to enable ComputeCorrelation !!\n" ); + + if ( dr_min_corr <=Eps_double ) dr_min_corr = 1e-2*0.5*amr->BoxSize[0]; + if ( RadiusMax_corr<=Eps_double ) RadiusMax_corr = 0.5*amr->BoxSize[0]; + if ( LogBinRatio_corr<=1.0 ) LogBinRatio_corr = 2.0; + + if ( dr_min_prof <=Eps_double ) dr_min_prof = 0.25*dr_min_corr; + RadiusMax_prof = RadiusMax_corr * 1.05; // assigned by Test Problem + LogBinRatio_prof = 1.0; // hard-coded by Test Problem (no effect) + LogBin_prof = false; // hard-coded by Test Problem + RemoveEmpty_prof = false; // hard-coded by Test Problem + + if ( MinLv_corr < 0 ) MinLv_corr = 0; + if ( MaxLv_corr < MinLv_corr ) MaxLv_corr = MAX_LEVEL; + if ( FilePath_corr == "\0" ) sprintf( FilePath_corr, "./" ); + else + { + FILE *file_checker = fopen(FilePath_corr, "r"); + if (!file_checker) + Aux_Error( ERROR_INFO, "File path %s for saving correlation function text files does not exist!! Please create!!\n", FilePath_corr ); + else + fclose(file_checker); + } + } + +// (1-3) check the runtime parameters + +// (2) set the problem-specific derived parameters + +// (3) reset other general-purpose parameters +// --> a helper macro PRINT_WARNING is defined in TestProb.h + const long End_Step_Default = 50000; + const double End_T_Default = 2.0e-3; + + if ( END_STEP < 0 ) { + END_STEP = End_Step_Default; + PRINT_RESET_PARA( END_STEP, FORMAT_LONG, "" ); + } + + if ( END_T < 0.0 ) { + END_T = End_T_Default; + PRINT_RESET_PARA( END_T, FORMAT_REAL, "" ); + } + + +// (4) make a note + if ( MPI_Rank == 0 ) + { + Aux_Message( stdout, "==============================================================================\n" ); + Aux_Message( stdout, " test problem ID = %d\n" , TESTPROB_ID ); + Aux_Message( stdout, " compute correlation = %d\n" , ComputeCorrelation ); + if (ComputeCorrelation) + { + Aux_Message( stdout, " radius maximum (correlation) = %13.7e\n", RadiusMax_corr ); + Aux_Message( stdout, " histogram bin size (correlation) = %13.7e\n", dr_min_corr ); + Aux_Message( stdout, " use logarithmic bin (correlation) = %d\n" , LogBin_corr ); + Aux_Message( stdout, " log bin ratio (correlation) = %13.7e\n", LogBinRatio_corr ); + Aux_Message( stdout, " remove empty bin (correlation) = %d\n" , RemoveEmpty_corr ); + Aux_Message( stdout, " radius maximum (profile, assigned) = %13.7e\n", RadiusMax_prof ); + Aux_Message( stdout, " histogram bin size (profile) = %13.7e\n", dr_min_prof ); + Aux_Message( stdout, " use logarithmic bin (profile, hard-coded) = %d\n" , LogBin_prof ); + Aux_Message( stdout, " log bin ratio (profile, no effect) = %13.7e\n", LogBinRatio_prof ); + Aux_Message( stdout, " remove empty bin (profile, hard-coded) = %d\n" , RemoveEmpty_prof ); + Aux_Message( stdout, " minimum level = %d\n" , MinLv_corr ); + Aux_Message( stdout, " maximum level = %d\n" , MaxLv_corr ); + Aux_Message( stdout, " folder for storing correlation text file = %s\n" , FilePath_corr ); + if ( OPT__INIT == INIT_BY_RESTART ) + Aux_Message( stdout, " re-compute correlation using restart time = %d\n" , ReComputeCorrelation ); + } + Aux_Message( stdout, "==============================================================================\n" ); + } + + if ( MPI_Rank == 0 ) Aux_Message( stdout, " Setting runtime parameters ... done\n" ); + +} // FUNCTION : SetParameter + +//------------------------------------------------------------------------------------------------------- +// Function : AddNewField_ELBDM_UniformGranule +// Description : Store the initial density as Dens0 +// +// Note : 1. Ref: https://github.com/gamer-project/gamer/wiki/Adding-New-Simulations#v-add-problem-specific-grid-fields-and-particle-attributes +// 2. Invoke AddField() for each of the problem-specific field: +// --> Field label sent to AddField() will be used as the output name of the field +// --> Field index returned by AddField() can be used to access the field data +// 3. Pre-declared field indices are put in Field.h +// +// Parameter : None +// +// Return : None +//------------------------------------------------------------------------------------------------------- +static void AddNewField_ELBDM_UniformGranule( void ) +{ + +# if ( NCOMP_PASSIVE_USER > 0 ) + Idx_Dens0 = AddField( "Dens0", FIXUP_FLUX_NO, FIXUP_REST_NO, NORMALIZE_NO, INTERP_FRAC_NO ); +# endif + +} // FUNCTION : AddNewField_ELBDM_UniformGranule + +//------------------------------------------------------------------------------------------------------- +// Function : Init_User_ELBDM_UniformGranule +// Description : Store the initial density +// +// Note : 1. Invoked by Init_GAMER() using the function pointer "Init_User_Ptr", +// which must be set by a test problem initializer +// +// Parameter : None +// +// Return : None +//------------------------------------------------------------------------------------------------------- +static void Init_User_ELBDM_UniformGranule( void ) +{ + +# if ( NCOMP_PASSIVE_USER > 0 ) + for (int lv=0; lvNPatchComma[lv][1]; PID++) + for (int k=0; kpatch[ amr->FluSg[lv] ][lv][PID]->fluid[Idx_Dens0][k][j][i]; + + amr->patch[ 1-amr->FluSg[lv] ][lv][PID]->fluid[Idx_Dens0][k][j][i] = Dens0; + } + +// b. for starting a new simulation, we must copy the initial density to both Sg + else + { + const real Dens0 = amr->patch[ amr->FluSg[lv] ][lv][PID]->fluid[DENS][k][j][i]; + + amr->patch[ amr->FluSg[lv] ][lv][PID]->fluid[Idx_Dens0][k][j][i] = Dens0; + amr->patch[ 1-amr->FluSg[lv] ][lv][PID]->fluid[Idx_Dens0][k][j][i] = Dens0; + } + } + + if (ComputeCorrelation) + { + const double InitialTime = Time[0]; + if ( MPI_Rank == 0 ) Aux_Message( stdout, "StepInitial_corr = %d ; StepInterval_corr = %d ; StepEnd_corr = %d\n", StepInitial_corr, StepInterval_corr, StepEnd_corr); + + if ( MPI_Rank == 0 ) Aux_Message( stdout, "InitialTime = %13.6e \n", InitialTime ); + +// determine the central position for computing correlation function + if ( MPI_Rank == 0 ) Aux_Message( stdout, "Calculate the initial center-of-mass position:\n"); + + double FinaldR; + int FinalNIter; + double CoM_ref[3]; + double MaxR = __FLT_MAX__; + double MinWD = 0.0; + double TolErrR = amr->dh[MAX_LEVEL]; + int MaxIter = 10; + + Extrema_t Max_Dens; + Max_Dens.Field = _DENS; + Max_Dens.Radius = __FLT_MAX__; // entire domain + Max_Dens.Center[0] = amr->BoxCenter[0]; + Max_Dens.Center[1] = amr->BoxCenter[1]; + Max_Dens.Center[2] = amr->BoxCenter[2]; + + Aux_FindExtrema( &Max_Dens, EXTREMA_MAX, 0, TOP_LEVEL, PATCH_LEAF ); + + for (int d=0; d<3; d++) CoM_ref[d] = Max_Dens.Coord[d]; + + Aux_FindWeightedAverageCenter( Center_corr, CoM_ref, MaxR, MinWD, _TOTAL_DENS, TolErrR, MaxIter, &FinaldR, &FinalNIter ); + + if ( MPI_Rank == 0 ) Aux_Message( stdout, "Initial center-of-mass position is ( %14.11e,%14.11e,%14.11e )\n", Center_corr[0], Center_corr[1], Center_corr[2] ); + +// commpute density profile for passive field + if ( MPI_Rank == 0 ) Aux_Message( stdout, "Calculate initial density profile:\n"); + + const long TVar[] = {BIDX(Idx_Dens0)}; + Aux_ComputeProfile( &Prof_Dens_initial, Center_corr, RadiusMax_prof, dr_min_prof, LogBin_prof, LogBinRatio_prof, RemoveEmpty_prof, TVar, 1, MinLv_corr, MaxLv_corr, PATCH_LEAF, InitialTime, true ); + + if ( MPI_Rank == 0 ) + { + char Filename[MAX_STRING]; + sprintf( Filename, "%s/initial_profile.txt", FilePath_corr ); + FILE *output_initial_prof = fopen(Filename, "w"); + fprintf( output_initial_prof, "#%19s %21s %21s %21s %11s\n", "Radius", "Dens", "Dens_Sigma" , "Weighting", "Cell_Number"); + for (int b=0; bNBin; b++) + fprintf( output_initial_prof, "%20.14e %21.14e %21.14e %21.14e %11ld\n", + Prof_Dens_initial->Radius[b], Prof_Dens_initial->Data[b], Prof_Dens_initial->Data_Sigma[b], Prof_Dens_initial->Weight[b], Prof_Dens_initial->NCell[b] ); + fclose(output_initial_prof); + } + } +# endif // #if ( NCOMP_PASSIVE_USER > 0 ) + +} // FUNCTION : Init_User_ELBDM_UniformGranule + +#endif // #if ( MODEL == ELBDM && defined GRAVITY ) + +//------------------------------------------------------------------------------------------------------- +// Function : Do_CF +// Description : Compute the correlation function +// +// Note : 1. Linked to the function pointer "Aux_Record_User_Ptr" +// 2. Please turn on the runtime option "OPT__RECORD_USER" +//------------------------------------------------------------------------------------------------------- +static void Do_CF( void ) +{ +// Compute correlation if ComputeCorrelation flag is true + if (ComputeCorrelation) + { + if ( (Step>=StepInitial_corr) && (((Step-StepInitial_corr)%StepInterval_corr)==0) && (Step<=StepEnd_corr) ) + { + const long TVar[] = {_DENS}; + Aux_ComputeCorrelation( &Correlation_Dens, (const Profile_t**) &Prof_Dens_initial, Center_corr, RadiusMax_corr, dr_min_corr, LogBin_corr, LogBinRatio_corr, + RemoveEmpty_corr, TVar, 1, MinLv_corr, MaxLv_corr, PATCH_LEAF, Time[0], dr_min_prof); + + if ( MPI_Rank == 0 ) + { + char Filename[MAX_STRING]; + sprintf( Filename, "%s/correlation_function_t=%.4e.txt", FilePath_corr, Time[0] ); + FILE *output_correlation = fopen(Filename, "w"); + fprintf( output_correlation, "#%19s %21s %21s %11s\n", "Radius", "Correlation_Function", "Weighting", "Cell_Number"); + for (int b=0; bNBin; b++) + fprintf( output_correlation, "%20.14e %21.14e %21.14e %11ld\n", + Correlation_Dens->Radius[b], Correlation_Dens->Data[b], Correlation_Dens->Weight[b], Correlation_Dens->NCell[b] ); + fclose(output_correlation); + } + } + } // if (ComputeCorrelation) +} // FUNCTION : Do_CF + +//------------------------------------------------------------------------------------------------------- +// Function : End_UniformGranule +// Description : Free memory before terminating the program +// +// Note : 1. Linked to the function pointer "End_User_Ptr" to replace "End_User()" +//------------------------------------------------------------------------------------------------------- +void End_UniformGranule() +{ + + delete Prof_Dens_initial; + delete Correlation_Dens; + Prof_Dens_initial = NULL; + Correlation_Dens = NULL; + +} // FUNCTION : End_UniformGranule + +//------------------------------------------------------------------------------------------------------- +// Function : Init_TestProb_ELBDM_UniformGranule +// Description : Test problem initializer +// +// Note : None +// +// Parameter : None +// +// Return : None +//------------------------------------------------------------------------------------------------------- +void Init_TestProb_ELBDM_UniformGranule() +{ + + if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); + + +// validate the compilation flags and runtime parameters + Validate(); + + +# if ( MODEL == ELBDM ) +// set the problem-specific runtime parameters + SetParameter(); + + Init_Field_User_Ptr = AddNewField_ELBDM_UniformGranule; + Init_User_Ptr = Init_User_ELBDM_UniformGranule; + Aux_Record_User_Ptr = Do_CF; + End_User_Ptr = End_UniformGranule; +# endif // #if ( MODEL == ELBDM ) + + if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); + +} // FUNCTION : Init_TestProb_ELBDM_UniformGranule