diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__DumpTable b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__DumpTable new file mode 100644 index 0000000000..c60c669b1d --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__DumpTable @@ -0,0 +1,42 @@ +#Dump ID Dump Time + -6 0.0003124024000000 + -5 0.0005000000000000 + -4 0.0010000000000000 + -3 0.0050000000000000 + -2 0.0100000000000000 + -1 0.0500000000000000 + 0 0.0909090920000000 + 1 0.1022642090403268 + 2 0.1142926268126806 + 3 0.1269969647105442 + 4 0.1403806580959482 + 5 0.1544481509848806 + 6 0.1692051241492372 + 7 0.1846587623059424 + 8 0.2008180659019670 + 9 0.2176942139307767 + 10 0.2353009853594673 + 11 0.2536552481440784 + 12 0.2727775264961512 + 13 0.2926926590557780 + 14 0.3134305629162865 + 15 0.3350271209736094 + 16 0.3575252126958853 + 17 0.3809759108571344 + 18 0.4054398686204628 + 19 0.4309889220031104 + 20 0.4577079316383287 + 21 0.4856968849564935 + 22 0.5150732779850334 + 23 0.5459748043551692 + 24 0.5785624241065474 + 25 0.6130240298474579 + 26 0.6495793081783160 + 27 0.6884872598419996 + 28 0.7300595373040358 + 29 0.7746853173110612 + 30 0.8228743661061253 + 31 0.8753084975593694 + 32 0.9327759052183688 + 33 0.9953073615712860 +***************END LINE*************** diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Interference b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Interference new file mode 100644 index 0000000000..2cdbf69876 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Interference @@ -0,0 +1,12 @@ +# Level QP Density PhaseLap OnlyAtExtrema + 0 0.03 0 1.0 0 + 1 0.03 0 1.0 0 + 2 0.03 0 1.0 0 + 3 0.03 0 1.0 0 + 4 0.03 0 1.0 0 + 5 0.03 0 1.0 0 + 6 0.03 0 1.0 0 + 7 0.03 0 1.0 0 + 8 0.03 0 1.0 0 + 9 0.03 0 1.0 0 + 10 0.03 0 1.0 0 diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Rho b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Rho new file mode 100644 index 0000000000..4d81929cdb --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Rho @@ -0,0 +1,15 @@ +# Level Density + 0 8.0 + 1 32.0 + 2 64.0 + 3 256.0 + 4 330.0 + 5 430.0 + 6 512.0 + 7 262144.0 + 8 2097152.0 + 9 16777216.0 + 10 134217728.0 + 11 1073741824.0 + 12 8589934592.0 + 13 68719476736.0 diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Spectral b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Spectral new file mode 100644 index 0000000000..cf959eb812 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Flag_Spectral @@ -0,0 +1,13 @@ +# Level Refinement Derefinement (derefinement currently not functional) + 0 1e0 -1 + 1 1e0 -1 + 2 1e0 -1 + 3 1e0 -1 + 4 1e0 -1 + 5 1e0 -1 + 6 1e0 -1 + 7 1e0 -1 + 8 1e0 -1 + 9 1e0 -1 + 10 1e0 -1 + 11 1e0 -1 diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Parameter b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Parameter new file mode 100644 index 0000000000..e926aeb950 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__Parameter @@ -0,0 +1,226 @@ + + +# ================================================================================================================= +# NOTE: +# 1. Comment symbol: # +# 2. [*]: defaults +# 3. Parameters set to "auto" (usually by setting to a negative value) do not have deterministic default values +# and will be set according to the adopted compilation options and/or other runtime parameters +# 4. To add new parameters, please edit "Init/Init_Load_Parameter.cpp" +# 5. All dimensional variables should be set consistently with the code units (set by UNIT_L/M/T/V/D) unless +# otherwise specified (e.g., SF_CREATE_STAR_MIN_GAS_DENS & SF_CREATE_STAR_MIN_STAR_MASS) +# 6. For boolean options: 0/1 -> off/on +# ================================================================================================================= + +# simulation scale +BOX_SIZE 10.0 # box size along the longest side (in Mpc/h if COMOVING is adopted) +NX0_TOT_X 64 # number of base-level cells along x +NX0_TOT_Y 64 # number of base-level cells along y +NX0_TOT_Z 64 # number of base-level cells along z +OMP_NTHREAD -1 # number of OpenMP threads (<=0=auto) [-1] ##OPENMP ONLY## +END_T -1.0 # end physical time (<0=auto -> must be set by test problems or restart) [-1.0] +END_STEP -1 # end step (<0=auto -> must be set by test problems or restart) [-1] + + +# test problems +TESTPROB_ID 1009 # test problem ID [0] + # 1009: ELBDM large-scale structure cosmological simulation + + +# code units (in cgs) +OPT__UNIT 1 # specify code units -> must set exactly 3 basic units below [0] ##USELESS FOR COMOVING## + + +# boundary conditions +OPT__BC_FLU_XM 1 # fluid boundary condition at the -x face: (1=periodic, 2=outflow, 3=reflecting, 4=user) ##2/3 for HYDRO ONLY## +OPT__BC_FLU_XP 1 # fluid boundary condition at the +x face: (1=periodic, 2=outflow, 3=reflecting, 4=user) ##2/3 for HYDRO ONLY## +OPT__BC_FLU_YM 1 # fluid boundary condition at the -y face: (1=periodic, 2=outflow, 3=reflecting, 4=user) ##2/3 for HYDRO ONLY## +OPT__BC_FLU_YP 1 # fluid boundary condition at the +y face: (1=periodic, 2=outflow, 3=reflecting, 4=user) ##2/3 for HYDRO ONLY## +OPT__BC_FLU_ZM 1 # fluid boundary condition at the -z face: (1=periodic, 2=outflow, 3=reflecting, 4=user) ##2/3 for HYDRO ONLY## +OPT__BC_FLU_ZP 1 # fluid boundary condition at the +z face: (1=periodic, 2=outflow, 3=reflecting, 4=user) ##2/3 for HYDRO ONLY## +OPT__BC_POT 1 # gravity boundary condition: (1=periodic, 2=isolated) + + +# cosmology (COMOVING only) +A_INIT 9.900990099e-3 # initial scale factor +OMEGA_M0 0.3158230904284232 # omega matter at the present time +HUBBLE0 0.6732117 # dimensionless Hubble parameter (currently only for converting ELBDM_MASS to code units) + + +# time-step +DT__MAX -1.0 # dt criterion: maximum allowed dt (<0=off) [-1.0] +DT__FLUID -1.0 # dt criterion: fluid solver CFL factor (<0=auto) [-1.0] +DT__FLUID_INIT -1.0 # dt criterion: DT__FLUID at the first step (<0=auto) [-1.0] +DT__GRAVITY -1.0 # dt criterion: gravity solver safety factor (<0=auto) [-1.0] +DT__HYBRID_CFL -1.0 # dt criterion: hybrid solver CFL factor (<0=auto) (diffusion) [-1.0] ## ELBDM_HYBRID ONLY## +DT__HYBRID_CFL_INIT -1.0 # dt criterion: DT__HYBRID_CFL in the first step (<0=auto) [-1.0] ## ELBDM_HYBRID ONLY## +DT__HYBRID_VELOCITY -1.0 # dt criterion: hybrid solver CFL factor (<0=auto) (Hamilton-Jacobi) [-1.0] ## ELBDM_HYBRID ONLY## +DT__HYBRID_VELOCITY_INIT -1.0 # dt criterion: DT__HYBRID_VELOCITY in the first step (<0=auto) [-1.0] ## ELBDM_HYBRID ONLY## +DT__PHASE 0.0 # dt criterion: phase rotation safety factor (0=off) [0.0] ##ELBDM ONLY## +DT__PARVEL 0.5 # dt criterion: particle velocity safety factor [0.5] +DT__PARVEL_MAX -1.0 # dt criterion: maximum allowed dt from particle velocity (<0=off) [-1.0] +DT__PARACC 0.5 # dt criterion: particle acceleration safety factor (0=off) [0.5] ##STORE_PAR_ACC ONLY## +DT__MAX_DELTA_A 0.01 # dt criterion: maximum variation of the cosmic scale factor [0.01] +DT__SYNC_PARENT_LV 0.1 # dt criterion: allow dt to adjust by (1.0+DT__SYNC_PARENT) in order to synchronize + # with the parent level (for OPT__DT_LEVEL==3 only) [0.1] +DT__SYNC_CHILDREN_LV 0.1 # dt criterion: allow dt to adjust by (1.0-DT__SYNC_CHILDREN) in order to synchronize + # with the children level (for OPT__DT_LEVEL==3 only; 0=off) [0.1] +OPT__DT_USER 0 # dt criterion: user-defined -> edit "Mis_GetTimeStep_UserCriteria.cpp" [0] +OPT__DT_LEVEL 3 # dt at different AMR levels (1=shared, 2=differ by two, 3=flexible) [3] +OPT__RECORD_DT 1 # record info of the dt determination [1] +AUTO_REDUCE_DT 1 # reduce dt automatically when the program fails (for OPT__DT_LEVEL==3 only) [1] +AUTO_REDUCE_DT_FACTOR 0.8 # reduce dt by a factor of AUTO_REDUCE_DT_FACTOR when the program fails [0.8] +AUTO_REDUCE_DT_FACTOR_MIN 0.1 # minimum allowed AUTO_REDUCE_DT_FACTOR after consecutive failures [0.1] + + +# grid refinement (examples of Input__Flag_XXX tables are put at "example/input/") +REGRID_COUNT 4 # refine every REGRID_COUNT sub-step [4] +REFINE_NLEVEL 1 # number of new AMR levels to be created at once during refinement [1] +FLAG_BUFFER_SIZE -1 # number of buffer cells for the flag operation (0~PATCH_SIZE; <0=auto -> PATCH_SIZE) [-1] +FLAG_BUFFER_SIZE_MAXM1_LV -1 # FLAG_BUFFER_SIZE at the level MAX_LEVEL-1 (<0=auto -> REGRID_COUNT) [-1] +FLAG_BUFFER_SIZE_MAXM2_LV -1 # FLAG_BUFFER_SIZE at the level MAX_LEVEL-2 (<0=auto) [-1] +MAX_LEVEL 7 # maximum refinement level (0~NLEVEL-1) [NLEVEL-1] +OPT__FLAG_RHO 1 # flag: density (Input__Flag_Rho) [0] +OPT__FLAG_RHO_GRADIENT 0 # flag: density gradient (Input__Flag_RhoGradient) [0] +OPT__FLAG_LOHNER_DENS 0 # flag: Lohner for mass density (Input__Flag_Lohner) [0] ##BOTH HYDRO AND ELBDM## +OPT__FLAG_LOHNER_FORM 1 # form of Lohner: (1=FLASH-1, 2=FLASH-2, 3=form-invariant-1, 4=form-invariant-2) [2] +OPT__FLAG_ENGY_DENSITY 0 # flag: energy density (Input_Flag_EngyDensity) [0] ##ELBDM ONLY## +OPT__FLAG_INTERFERENCE 1 # flag: interference level (Input__Flag_Interference) [0] ##ELBDM ONLY## +OPT__FLAG_SPECTRAL 1 # flag: spectral refinement (Input__Flag_Spectral) [0] ##ELBDM ONLY## +OPT__FLAG_USER 0 # flag: user-defined (Input__Flag_User) -> edit "Flag_User.cpp" [0] +OPT__FLAG_REGION 1 # flag: specify the regions **allowed** to be refined -> edit "Flag_Region.cpp" [0] +OPT__FLAG_NPAR_PATCH 0 # flag: # of particles per patch (Input__Flag_NParPatch): (0=off, 1=itself, 2=itself+siblings) [0] +OPT__FLAG_NPAR_CELL 0 # flag: # of particles per cell (Input__Flag_NParCell) [0] +OPT__FLAG_PAR_MASS_CELL 0 # flag: total particle mass per cell (Input__Flag_ParMassCell) [0] +OPT__NO_FLAG_NEAR_BOUNDARY 0 # flag: disallow refinement near the boundaries [0] +OPT__PATCH_COUNT 1 # record the # of patches at each level: (0=off, 1=every step, 2=every sub-step) [1] +OPT__PARTICLE_COUNT 1 # record the # of particles at each level: (0=off, 1=every step, 2=every sub-step) [1] +OPT__REUSE_MEMORY 2 # reuse patch memory to reduce memory fragmentation: (0=off, 1=on, 2=aggressive) [2] +OPT__MEMORY_POOL 0 # preallocate patches for OPT__REUSE_MEMORY=1/2 (Input__MemoryPool) [0] + + +# load balance (LOAD_BALANCE only) +LB_INPUT__WLI_MAX 0.1 # weighted-load-imbalance (WLI) threshold for redistributing all patches [0.1] +LB_INPUT__PAR_WEIGHT 0.0 # load-balance weighting of one particle over one cell [0.0] +OPT__RECORD_LOAD_BALANCE 1 # record the load-balance info [1] +OPT__MINIMIZE_MPI_BARRIER 0 # minimize MPI barriers to improve load balance, especially with particles [0] + # (STORE_POT_GHOST, PAR_IMPROVE_ACC=1, OPT__TIMING_BARRIER=0 only; recommend AUTO_REDUCE_DT=0) +OPT__LB_EXCHANGE_FATHER 1 # exchange all cells of all father patches during load balancing (must enable for hybrid scheme + MPI) [0 usually, 1 for ELBDM_HYBRID] ## ELBDM_HYBRID ONLY### + +# fluid solver in ELBDM (MODEL==ELBDM only) +ELBDM_MASS 2.0e-24 # particle mass in ev/c^2 (input unit is fixed even when OPT__UNIT or COMOVING is on) +ELBDM_PLANCK_CONST 1.0 # reduced Planck constant (will be overwritten if OPT__UNIT or COMOVING is on) +ELBDM_TAYLOR3_COEFF 0.166666667 # 3rd Taylor expansion coefficient [1.0/6.0] ##USELESS if ELBDM_TAYLOR3_AUTO is on## +ELBDM_TAYLOR3_AUTO 0 # Optimize ELBDM_TAYLOR3_COEFF automatically to minimize the damping at kmax [0] +ELBDM_REMOVE_MOTION_CM 0 # remove the motion of center-of-mass (must enable OPT__CK_CONSERVATION): + # (0=off, 1=init, 2=every step) [0] +ELBDM_BASE_SPECTRAL 0 # adopt the spectral method to evolve base-level wave function (must enable SUPPORT_FFTW) [0] +ELBDM_MATCH_PHASE 1 # match child phases with father phases during data restriction [1] ##ELBDM_HYBRID ONLY## +ELBDM_FIRST_WAVE_LEVEL 3 # level at which to switch to the wave solver (must >=1) [-1] ##ELBDM_HYBRID ONLY## + +# fluid solvers in all models +FLU_GPU_NPGROUP -1 # number of patch groups sent into the CPU/GPU fluid solver (<=0=auto) [-1] +GPU_NSTREAM -1 # number of CUDA streams for the asynchronous memory copy in GPU (<=0=auto) [-1] +OPT__FIXUP_FLUX 1 # correct coarse grids by the fine-grid boundary fluxes [1] ##HYDRO and ELBDM ONLY## +OPT__FIXUP_RESTRICT 1 # correct coarse grids by averaging the fine-grid data [1] +OPT__CORR_AFTER_ALL_SYNC -1 # apply various corrections after all levels are synchronized (see "Flu_CorrAfterAllSync"): + # (-1=auto, 0=off, 1=every step, 2=before dump) [-1] +OPT__NORMALIZE_PASSIVE 0 # ensure "sum(passive_scalar_density) == gas_density" [1] +OPT__OVERLAP_MPI 0 # overlap MPI communication with CPU/GPU computations [0] ##NOT SUPPORTED YET## +OPT__RESET_FLUID 0 # reset fluid variables after each update -> edit "Flu_ResetByUser.cpp" [0] +MIN_DENS 0.0 # minimum mass density (must >= 0.0) [0.0] ##HYDRO, MHD, and ELBDM ONLY## + + +# gravity solvers in all models +NEWTON_G 1.0 # gravitational constant (will be overwritten if OPT__UNIT or COMOVING is on) +SOR_OMEGA -1.0 # over-relaxation parameter in SOR: (<0=auto) [-1.0] +SOR_MAX_ITER -1 # maximum number of iterations in SOR: (<0=auto) [-1] +SOR_MIN_ITER -1 # minimum number of iterations in SOR: (<0=auto) [-1] +POT_GPU_NPGROUP -1 # number of patch groups sent into the CPU/GPU Poisson solver (<=0=auto) [-1] +OPT__SELF_GRAVITY 1 # add self-gravity [1] +OPT__EXT_ACC 0 # add external acceleration (0=off, 1=function, 2=table) [0] ##HYDRO ONLY## + # --> 2 (table) is not supported yet +OPT__EXT_POT 0 # add external potential (0=off, 1=function, 2=table) [0] + # --> for 2 (table), edit the corresponding parameters below too + + +# initialization +OPT__INIT 3 # initialization option: (1=FUNCTION, 2=RESTART, 3=FILE->"UM_IC") +RESTART_LOAD_NRANK 1 # number of parallel I/O (i.e., number of MPI ranks) for restart [1] +OPT__RESTART_RESET 0 # reset some simulation status parameters (e.g., current step and time) during restart [0] +OPT__UM_IC_LEVEL 2 # AMR level corresponding to UM_IC (must >= 0) [0] +OPT__UM_IC_NLEVEL 1 # number of AMR levels UM_IC [1] --> edit "Input__UM_IC_RefineRegion" if >1 +OPT__UM_IC_NVAR 2 # number of variables in UM_IC: (1~NCOMP_TOTAL; <=0=auto) [HYDRO=5+passive/ELBDM=2] +OPT__UM_IC_FORMAT 1 # data format of UM_IC: (1=vzyx, 2=zyxv; row-major and v=field) [1] +OPT__UM_IC_DOWNGRADE 1 # downgrade UM_IC from level OPT__UM_IC_LEVEL to 0 [1] +OPT__UM_IC_REFINE 1 # refine UM_IC from level OPT__UM_IC_LEVEL to MAX_LEVEL [1] +OPT__UM_IC_LOAD_NRANK 1 # number of parallel I/O (i.e., number of MPI ranks) for loading UM_IC [1] +OPT__INIT_RESTRICT 1 # restrict all data during the initialization [1] +OPT__INIT_GRID_WITH_OMP 1 # enable OpenMP when assigning the initial condition of each grid patch [1] +OPT__GPUID_SELECT -1 # GPU ID selection mode: (-3=Laohu, -2=CUDA, -1=MPI rank, >=0=input) [-1] +INIT_SUBSAMPLING_NCELL 0 # perform sub-sampling during initialization: (0=off, >0=# of sub-sampling cells) [0] + + +# interpolation schemes: (-1=auto, 1=MinMod-3D, 2=MinMod-1D, 3=vanLeer, 4=CQuad, 5=Quad, 6=CQuar, 7=Quar, 8=Spectral (##ELBDM & SUPPORT_SPECTRAL_INT ONLY##)) +OPT__INT_TIME 1 # perform "temporal" interpolation for OPT__DT_LEVEL == 2/3 [1] +OPT__INT_PHASE 0 # interpolation on phase (does not support MinMod-1D) [1] ##ELBDM ONLY## +OPT__FLU_INT_SCHEME -1 # ghost-zone fluid variables for the fluid solver [-1] +OPT__REF_FLU_INT_SCHEME -1 # newly allocated fluid variables during grid refinement [-1] +OPT__POT_INT_SCHEME 4 # ghost-zone potential for the Poisson solver (only supports 4 & 5) [4] +OPT__RHO_INT_SCHEME 4 # ghost-zone mass density for the Poisson solver [4] +OPT__GRA_INT_SCHEME 4 # ghost-zone potential for the gravity solver (for UNSPLIT_GRAVITY as well) [4] +OPT__REF_POT_INT_SCHEME 4 # newly allocated potential during grid refinement [4] +INT_MONO_COEFF 2.0 # coefficient for ensuring the interpolation monotonicity (1.0~4.0) [2.0] +SPEC_INT_TABLE_PATH ./ # path to tables for spectral interpolation ##ELBDM & SUPPORT_SPECTRAL_INT ONLY## +SPEC_INT_XY_INSTEAD_DEPHA 1 # interpolate x = density^0.5*cos( phase/SPEC_INT_WAVELENGTH_MAGNIFIER ), + # y = density^0.5*sin( phase/SPEC_INT_WAVELENGTH_MAGNIFIER ) instead of density and phase + # for the spectral interpolation, which has the advantage of being well-defined across vortices [1] +SPEC_INT_WAVELENGTH_MAGNIFIER 1.0e2 # stretching factor of wavelength for SPEC_INT_XY_INSTEAD_DEPHA + # --> for SPEC_INT_WAVELENGTH_MAGNIFIER=1, x=real part and y=imaginary part [1.0e2] + + +# data dump +OPT__OUTPUT_TOTAL 1 # output the simulation snapshot: (0=off, 1=HDF5, 2=C-binary) [1] +OPT__OUTPUT_PART 0 # output a single line or slice: (0=off, 1=xy, 2=yz, 3=xz, 4=x, 5=y, 6=z, 7=diag) [0] +OPT__OUTPUT_USER 0 # output the user-specified data -> edit "Output_User.cpp" [0] +OPT__OUTPUT_PAR_MODE 0 # output the particle data: (0=off, 1=text-file, 2=C-binary) [0] ##PARTICLE ONLY## +OPT__OUTPUT_BASEPS 0 # output the base-level power spectrum [0] +OPT__OUTPUT_BASE 0 # only output the base-level data [0] ##OPT__OUTPUT_PART ONLY## +OPT__OUTPUT_POT 1 # output gravitational potential [1] ##OPT__OUTPUT_TOTAL ONLY## +OPT__OUTPUT_PAR_DENS 1 # output the particle or total mass density on grids: + # (0=off, 1=particle mass density, 2=total mass density) [1] ##OPT__OUTPUT_TOTAL ONLY## +OPT__OUTPUT_MODE 3 # (1=const step, 2=const dt, 3=dump table) -> edit "Input__DumpTable" for 3 +OUTPUT_STEP 1 # output data every OUTPUT_STEP step ##OPT__OUTPUT_MODE==1 ONLY## +OUTPUT_DT 1.0 # output data every OUTPUT_DT time interval ##OPT__OUTPUT_MODE==2 ONLY## +OUTPUT_PART_X -1.0 # x coordinate for OPT__OUTPUT_PART [-1.0] +OUTPUT_PART_Y -1.0 # y coordinate for OPT__OUTPUT_PART [-1.0] +OUTPUT_PART_Z -1.0 # z coordinate for OPT__OUTPUT_PART [-1.0] +INIT_DUMPID -1 # set the first dump ID (<0=auto) [-1] + + +# miscellaneous +OPT__VERBOSE 0 # output the simulation progress in detail [0] +OPT__TIMING_BARRIER -1 # synchronize before timing -> more accurate, but may slow down the run (<0=auto) [-1] +OPT__TIMING_BALANCE 0 # record the max/min elapsed time in various code sections for checking load balance [0] +OPT__TIMING_MPI 0 # record the MPI bandwidth achieved in various code sections [0] ##LOAD_BALANCE ONLY## +OPT__RECORD_NOTE 1 # take notes for the general simulation info [1] +OPT__RECORD_UNPHY 0 # record the number of cells with unphysical results being corrected [1] +OPT__RECORD_MEMORY 1 # record the memory consumption [1] +OPT__RECORD_PERFORMANCE 1 # record the code performance [1] +OPT__MANUAL_CONTROL 1 # support manually dump data or stop run during the runtime + # (by generating the file DUMP_GAMER_DUMP or STOP_GAMER_STOP) [1] +OPT__RECORD_USER 0 # record the user-specified info -> edit "Aux_Record_User.cpp" [0] +OPT__OPTIMIZE_AGGRESSIVE 0 # apply aggressive optimizations (experimental) [0] + + +# checks +OPT__CK_REFINE 0 # check the grid refinement [0] +OPT__CK_PROPER_NESTING 0 # check the proper-nesting condition [0] +OPT__CK_CONSERVATION 1 # check the conservation law [0] +OPT__CK_NORMALIZE_PASSIVE 0 # check the normalization of passive scalars [0] ##OPT__NORMALIZE_PASSIVE ONLY## +OPT__CK_RESTRICT 0 # check the data restriction [0] +OPT__CK_FINITE 0 # check if all variables are finite [0] +OPT__CK_PATCH_ALLOCATE 0 # check if all patches are properly allocated [0] +OPT__CK_FLUX_ALLOCATE 0 # check if all flux arrays are properly allocated ##HYDRO and ELBDM ONLY## [0] +OPT__CK_MEMFREE 1.0 # check the free memory in GB (0=off, >0=threshold) [1.0] +OPT__CK_PARTICLE 0 # check the particle allocation [0] diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__TestProb b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__TestProb new file mode 100644 index 0000000000..fd5fa81c30 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__TestProb @@ -0,0 +1,3 @@ +# problem-specific runtime parameters +LSS_InitMode 2 # initialization mode: (1=density-only with constant phase, 2=real and imaginary parts or density and phase) [1] +ZoomIn_MaxLvOutside 3 # if (OPT__REIGON_FLAG), maximum refinement level outside of the zoom-in box. Required: ELBDM_FIRST_WAVE_LEVEL <= ZoomIn_MaxLvOutside \ No newline at end of file diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__ZoominBox b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__ZoominBox new file mode 100644 index 0000000000..7190750bb0 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/Input__ZoominBox @@ -0,0 +1,8 @@ +#ScaleFactor LX(UNIT_L) LY(UNIT_L) LZ(UNIT_L) CenterX(UNIT_L) CenterY(UNIT_L) CenterZ(UNIT_L) + 0.77 1.18 1.13 1.26 0.29491 9.52254 8.27318 + 0.58 1.80 1.66 1.94 0.29491 9.52254 8.27318 + 0.43 2.30 2.00 2.46 0.29491 9.52254 8.27318 + 0.31 2.70 2.31 2.87 0.29491 9.52254 8.27318 + 0.22 3.20 2.53 3.22 0.29491 9.52254 8.27318 + 0.14 3.60 2.70 3.50 0.29491 9.52254 8.27318 + 0.05 4.00 2.90 3.89 0.29491 9.52254 8.27318 \ No newline at end of file diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/README b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/README new file mode 100644 index 0000000000..4a7508d815 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/README @@ -0,0 +1,50 @@ +Compilation flags: +======================================== +Enable : MODEL=ELBDM, ELBDM_SCHEME=ELBDM_HYBRID, GRAVITY, COMOVING +Disable: PARTICLE + + +Default setup: +======================================== +1. ELBDM_MASS 2.0e-24 (eV/c^2) + A_INIT 9.900990099e-3 # initial scale factor + OMEGA_M0 0.3158230904284232 # omega matter at the present time + HUBBLE0 0.6732117 # dimensionless Hubble parameter (currently only for converting ELBDM_MASS to code units) + BOX_SIZE 10. (Mpc/h) + +2. MAX_LEVEL 7 + OPT__FLAG_SPECTRAL 1 + OPT__FLAG_INTERFERENCE 1 + --> Input__Flag_Rho = example/input/Input__Flag_Rho + Input__Flag_Spectral = example/input/Input__Flag_Spectral + Input__Flag_Interference = example/input/Input__Flag_Interference + +3. OPT__FLAG_REGION 1 # Enable zoom-in simulation + + +Note: +======================================== +1. Cosmological ZOOM-IN large-scale structure simulations using hybrid scheme + +2. Quickstart: + 1. Download the IC file "sh download_IC_lighthalo.sh" for testing. + 2. Compile GAMER with the "Makefile" generated by "generate_make.sh" and run simulation to redshift 0 with default settings. + 3. Total data size is 74G. If less snapshots are preferred for small storage space, please remove some rows in Input__DumpTable + +3. details on Zoom-in Simulation + 1. For details on running a hybrid simulation, please see "example/test_problem/ELBDM/LSS_Hybrid/README" + 2. Input__ZoomInBox controls the volume(LX, LY, LZ) and center(CenterX, CenterY, CenterZ) of the time dependent zoom-in box. + Note that we read the scale factor in descending order. + The UNIT_L in cosmological simulation refers to cMpc/h. + 3. Input__TestProb controls the maximum level outside of the volume (ZoomIn_MaxLvOutside). + 4. It is required to set ELBDM_FIRST_WAVE_LEVEL <= ZoomIn_MaxLvOutside, to avoid problems of the fluid-wave interface at the zoom-in boundary (Chan+24, under review) + 5. OPT__FLAG_REGION must be enabled. + +4. Some yt visualization scripts are put in "plot_script" + +5. Simulation results in (Chan+24, submitted to MNRAS) + 1. "download_IC_heavyhalo.sh" downloads the IC file + 2. NX0_TOT_X 256 + NX0_TOT_Y 256 + NX0_TOT_Z 256 + ELBDM_MASS 2.0e-23 \ No newline at end of file diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/clean.sh b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/clean.sh new file mode 100644 index 0000000000..65b1d694e6 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/clean.sh @@ -0,0 +1,5 @@ +rm -f Record__Note Record__Timing Record__TimeStep Record__PatchCount Record__Dump Record__MemInfo Record__L1Err \ + Record__Conservation Data* stderr stdout log XYslice* YZslice* XZslice* Xline* Yline* Zline* \ + Diag* BaseXYslice* BaseYZslice* BaseXZslice* BaseXline* BaseYline* BaseZline* BaseDiag* \ + PowerSpec_* Particle_* nohup.out Record__Performance Record__TimingMPI_* \ + Record__ParticleCount Record__User Patch_* Record__NCorrUnphy FailedPatchGroup* *.pyc Record__LoadBalance Record__Hybrid diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/download_IC_heavyhalo.sh b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/download_IC_heavyhalo.sh new file mode 100755 index 0000000000..be2e5c10aa --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/download_IC_heavyhalo.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +API_URL="https://girder.hub.yt/api/v1" +FILE_ID="66ff5e05999605c485c8d67a" +LOCAL_FILE="Zoomin_IC" + +# download +girder-cli --api-url ${API_URL} download --parent-type item ${FILE_ID} ${LOCAL_FILE} + +# unzip +tar zxvf ${LOCAL_FILE}/Zoomin_IC_heavyhalo.tar.gz +rm -r ${LOCAL_FILE} +ln -fs UM_IC_hybrid_N1024 UM_IC diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/download_IC_lighthalo.sh b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/download_IC_lighthalo.sh new file mode 100755 index 0000000000..d1ed98fb29 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/download_IC_lighthalo.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +API_URL="https://girder.hub.yt/api/v1" +FILE_ID="66ff5e15999605c485c8d67d" +LOCAL_FILE="Zoomin_IC" + +# download +girder-cli --api-url ${API_URL} download --parent-type item ${FILE_ID} ${LOCAL_FILE} + +# unzip +tar zxvf ${LOCAL_FILE}/Zoomin_IC_lighthalo.tar.gz +rm -r ${LOCAL_FILE} +ln -fs UM_IC_hybrid_N256_third UM_IC diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/generate_make.sh b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/generate_make.sh new file mode 100644 index 0000000000..6189e74ade --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/generate_make.sh @@ -0,0 +1,7 @@ +# This script should run in the same directory as configure.py + +PYTHON=python3 + +${PYTHON} configure.py --machine=eureka_intel --mpi=true --hdf5=true --fftw=FFTW3 --gpu=true \ + --model=ELBDM --elbdm_scheme=ELBDM_HYBRID --wave_scheme=WAVE_GRAMFE --gramfe_scheme=GRAMFE_MATMUL \ + --gravity=true --comoving=true --gsl=true --spectral_interpolation=true diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/Compute_profiles.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/Compute_profiles.py new file mode 100644 index 0000000000..2da063f779 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/Compute_profiles.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3.9 +################################################################################ + +# This code outputs the density, cumulative mass, circular velocity and velocity dispersion profiles +# of a target halo. The code uses an initial estimate (center_first_guess) to search the vicinity within +# a specified radius (vicinity) for the coordinates of the maximum density, +# corresponding to the center of the target halo. + +# carefully adjust vicinity for performance. + +# Example usage after running simulation with low resolution IC: +# python3 Compute_profiles.py -s 36 -e 36 + +# Outputs: +# Halo_Parameter +# prof_dens/Data_xxxxxx_0_profile_data +# prof_mass/Data_xxxxxx_0_mass_accumule +# prof_dens/Data_xxxxxx_0_profile_data +# prof_circular_vel/Data_xxxxxx_0_circular_velocity +# prof_veldisp/Data_xxxxxx_0_veldisp_haloRestFrame + +################################################################################ +import argparse +import numpy as np +import yt +import sys +from Profile_Functions import * + + +# load the command-line parameters +parser = argparse.ArgumentParser( description='Plot profile and output Halo_parameter' ) + +parser.add_argument( '-s', action='store', required=True, type=int, dest='idx_start', + help='first data index' ) +parser.add_argument( '-e', action='store', required=True, type=int, dest='idx_end', + help='last data index' ) +parser.add_argument( '-d', action='store', required=False, type=int, dest='didx', + help='delta data index [%(default)d]', default=1 ) + +# take note +print( '\nCommand-line arguments:' ) +print( '-------------------------------------------------------------------' ) +print( ' '.join(map(str, sys.argv)) ) +print( '-------------------------------------------------------------------\n' ) + +args = parser.parse_args() +idx_start = args.idx_start +idx_end = args.idx_end +didx = args.didx + +halo_id = 0 # pick an id for your target halo + +for file_id in range( idx_start, idx_end+1, didx ): + + ds = yt.load( "../Data_%06d"%file_id ) + center_first_guess = np.array( [0.295, 9.522, 8.27] ) # in cMpc/h. First guess for target halo of low resolution IC at z=0 + vicinity = 0.3 # radius in cMpc/h + compute_profile( ds, center_first_guess, vicinity, halo_id, '.' ) + +print( "Done !" ) diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/ELBDM_DerivedField.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/ELBDM_DerivedField.py new file mode 100644 index 0000000000..a6acff894f --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/ELBDM_DerivedField.py @@ -0,0 +1,127 @@ +import yt +import numpy as np + +# define the derived fields +########################### +# psi = R + iI = f*e^(iS) + +# amplitude +# = sqrt(rho) = sqrt(R^2 + I^2) +def _f( field, data ): + return data["Dens"]**0.5 + +# real part +# = sqrt(rho)*cos(S) = R +def _Real( field, data ): + return data["f"]*np.cos( data["Phase"] ) + +# imaginary part +# = sqrt(rho)*sin(S) = I +def _Imag( field, data ): + return data["f"]*np.sin( data["Phase"] ) + +# phase +# = arctan(I/R) +def _S( field, data ): + return np.arctan2( data["Imag"], data["Real"] ) + +def _phasemod2( field, data ): + return np.arctan2( np.sin(data["Phase"]), np.cos(data["Phase"]) ) + +# momentum density x-component +# = rho*v_x = (R*dI/dx - I*dR/dx)*hbar/m +def _j_x( field, data ): + ELBDM_ETA = data.ds.parameters['ELBDM_Mass']*data.ds.units.code_mass/data.ds.units.reduced_planck_constant + return (data["Real"]*data["Imag_gradient_x"] - data["Imag"]*data["Real_gradient_x"])/ELBDM_ETA + +# momentum density y-component +# = rho*v_y = (R*dI/dy - I*dR/dy)*hbar/m +def _j_y( field, data ): + ELBDM_ETA = data.ds.parameters['ELBDM_Mass']*data.ds.units.code_mass/data.ds.units.reduced_planck_constant + return (data["Real"]*data["Imag_gradient_y"] - data["Imag"]*data["Real_gradient_y"])/ELBDM_ETA + +# momentum density z-component +# = rho*v_z = (R*dI/dz - I*dR/dz)*hbar/m +def _j_z( field, data ): + ELBDM_ETA = data.ds.parameters['ELBDM_Mass']*data.ds.units.code_mass/data.ds.units.reduced_planck_constant + return (data["Real"]*data["Imag_gradient_z"] - data["Imag"]*data["Real_gradient_z"])/ELBDM_ETA + +# momentum density magnitude +# = rho*|v| = |R*grad(I) - I*grad(R)|*hbar/m +def _j( field, data ): + return (data["j_x"]**2 + data["j_y"]**2 + data["j_z"]**2)**0.5 + +# bulk velocity x-component +# = (dS/dx)*hbar/m = (R*dI/dx - I*dR/dx)/rho*hbar/m +def _v_x( field, data ): + return data["j_x"]/data["Dens"] + +# bulk velocity y-component +# = (dS/dy)*hbar/m = (R*dI/dy - I*dR/dy)/rho*hbar/m +def _v_y( field, data ): + return data["j_y"]/data["Dens"] + +# bulk velocity z-component +# = (dS/dz)*hbar/m = (R*dI/dz - I*dR/dz)/rho*hbar/m +def _v_z( field, data ): + return data["j_z"]/data["Dens"] + +# bulk velocity magnitude +# = |grad(S)|*hbar/m = |(R*grad(I) - I*grad(R))|/rho*hbar/m +def _v( field, data ): + return (data["v_x"]**2 + data["v_y"]**2 + data["v_z"]**2)**0.5 + +########################### + + +def Add_derived_fields( ds ): + + ds.add_field( ("gamer","f"), + function=_f, units="code_mass**0.5/code_length**1.5", + display_name=r"$f$", sampling_type="cell" ) + ds.add_field( ("gamer","Real"), + function=_Real, units="code_mass**0.5/code_length**1.5", + display_name=r"$Real$", sampling_type="cell" ) + ds.add_field( ("gamer","Imag"), + function=_Imag, units="code_mass**0.5/code_length**1.5", + display_name=r"$Imag$", sampling_type="cell" ) + ds.add_field( ("gamer","S"), + function=_S, units="dimensionless", + display_name=r"$S$", sampling_type="cell" ) + ds.add_field( ("gamer", "PhaseMod2"), + function=_phasemod2, + sampling_type="local", units="" ) + + # gradient fields + Grad_R = ds.add_gradient_fields( ("gamer","Real") ) + Grad_I = ds.add_gradient_fields( ("gamer","Imag") ) + Grad_f = ds.add_gradient_fields( ("gamer","f") ) + Grad_S = ds.add_gradient_fields( ("gamer","S") ) + + # momentum density + ds.add_field( ("gamer","j_x"), + function=_j_x, units="code_mass/(code_length**2*code_time)", + display_name=r"$j_x$", sampling_type="cell" ) + ds.add_field( ("gamer","j_y"), + function=_j_y, units="code_mass/(code_length**2*code_time)", + display_name=r"$j_y$", sampling_type="cell" ) + ds.add_field( ("gamer","j_z"), + function=_j_z, units="code_mass/(code_length**2*code_time)", + display_name=r"$j_z$", sampling_type="cell" ) + ds.add_field( ("gamer","j"), + function=_j, units="code_mass/(code_length**2*code_time)", + display_name=r"$|j|$", sampling_type="cell" ) + + # velocity + ds.add_field( ("gamer","v_x"), + function=_v_x, units="code_length/code_time", + display_name=r"$v_x$", sampling_type="cell" ) + ds.add_field( ("gamer","v_y"), + function=_v_y, units="code_length/code_time", + display_name=r"$v_y$", sampling_type="cell" ) + ds.add_field( ("gamer","v_z"), + function=_v_z, units="code_length/code_time", + display_name=r"$v_z$", sampling_type="cell" ) + ds.add_field( ("gamer","v"), + function=_v, units="code_length/code_time", + display_name=r"$|v|$", sampling_type="cell" ) diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/PowerSpec.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/PowerSpec.py new file mode 100644 index 0000000000..7c83ee09b6 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/PowerSpec.py @@ -0,0 +1,137 @@ +####################################### + +# Computation of Power spectrum within a target halo. +# Run Compute_profile.py to call these functions. + +####################################### +import os +import yt +import numpy as np +import csv + +rfac = 10 # the enclosed box has length of 2 * rfac * rc +rmfac = 2 # density within rmfac * rc is zeroed +lv = 7 + +def corezero( x, y, z, rho, r_min, center ): + + r = np.sqrt( (x - center[0])**2 + + (y - center[1])**2 + + (z - center[2])**2) + mask = np.where( (r < r_min) )[0] + rho[mask] = 0.0 + return rho + +def get_frb_cube( ds, c, radius, lv, dxs ): + + L = radius*2 + frb_res = np.int64( L/dxs[lv] ) + cedge = c - radius + cube = ds.covering_grid( lv, left_edge=cedge, dims=[frb_res, frb_res, frb_res] ) + + return cube + +def density_profile( x, y, z, rho, center ): + + r = np.sqrt( (x - center[0])**2 + + (y - center[1])**2 + + (z - center[2])**2) + + N = np.int64( 8e1 ) + rbin_edge = np.logspace( -3, np.log10(r.max()), N ) + rbin = (rbin_edge[1:] + rbin_edge[:-1])/2 + densprof = np.zeros( N - 1 ) + counts = np.zeros( N - 1 ) + + inds = np.digitize( r, rbin_edge ) + inds -= 1 + + for ind in np.unique( inds ): + if ( ind > 0 and ind < inds.max() ): + densprof[ind] += np.sum( (rho)[inds==ind] ) + counts [ind] += np.sum( inds==ind ) + + mask = (counts==0) + return rbin[~mask], densprof[~mask]/counts[~mask], counts[~mask] + +def correlation_function_Pk( rho, rhoave ): + + N = np.int64( np.ceil( rho.shape[0]**(1./3) ) ) + rho = rho.reshape( (N, N, N) ) + rho = (rho - rhoave)/rhoave + Pk3D = np.abs( np.fft.fftn( rho ) )**2 + + k = np.fft.fftfreq( N ) + kx,ky,kz = np.meshgrid( k, k, k ) + kx = kx.flatten() + ky = ky.flatten() + kz = kz.flatten() + Pk3D = Pk3D.flatten() + distance = np.sqrt( kx**2 + ky**2 + kz**2 ) + + N = np.int64( 8e1 ) + rbin_edge = np.logspace( -2, np.log10( distance.max() ),N ) + rbin = (rbin_edge[1:]+rbin_edge[:-1])/2 + corrfunc = np.zeros( N-1 ) + counts = np.zeros( N-1 ) + + inds = np.digitize( distance, rbin_edge ) + inds -= 1 + + for ind in np.unique( inds ): + if ( ind > 0 and ind < inds.max() ): + corrfunc[ind] += np.sum( (Pk3D)[inds==ind] ) + counts [ind] += np.sum( inds==ind ) + + mask = (counts==0) + return rbin[~mask], corrfunc[~mask]/counts[~mask], counts[~mask] + +def normalize_rho( x, y, z, rho, dr, dprof, center ): + + rhonorm = rho.copy() + r = np.sqrt( (x - center[0])**2 + + (y - center[1])**2 + + (z - center[2])**2 ) + + inds = np.digitize( r, dr )-1 + + for i in range( inds.shape[0] ): + rhonorm[i] /= dprof[inds[i]] + + return rhonorm + +def Compute_PowerSpectrum( ds, center, rc, halo_id, path_data ): + + a = 1/(1+ds.current_redshift) + h = ds.hubble_constant + rc *= h / 1e3 # from comoving kpc to comoving Mpc /h + dx_max = (ds.domain_width[0]/ds.domain_dimensions[0] ).d # in cMpc/h + dxs = ds.arr( [dx_max/(2**i) for i in np.arange(ds.max_level+1)], 'code_length' ) # in cMpc/h + + rmax = rfac * rc + rmin = rmfac * rc + cube = get_frb_cube( ds, center, rmax, lv, dxs ) + + x = cube["gamer","x"].to( "Mpc/h" ).d.flatten() + y = cube["gamer","y"].to( "Mpc/h" ).d.flatten() + z = cube["gamer","z"].to( "Mpc/h" ).d.flatten() + rho = cube["gas", "density"].to("Msun/kpc**3").d.flatten() + + dr,dprof,_ = density_profile( x, y, z, rho, center ) + rho = normalize_rho( x, y, z, rho, dr, dprof, center ) + rho = corezero( x, y, z, rho, rmin, center ) + rhoave = np.mean( rho ) + + k,pk,c = correlation_function_Pk( rho, rhoave ) + pknorm = pk * k**3 + pknorm /= pknorm.max() + k = k / dxs[-1] / 1e3 / a * h * np.pi # from 1/ pixel to 1/kpc == wave vector (cycle/dx) but not angular wave vector (2pi/dx) + + if not os.path.exists( path_data + '/powerspec_within_halo' ): + os.makedirs( path_data + '/powerspec_within_halo' ) + + with open( '%s/powerspec_within_halo/%s_%d_powerspectrum_within_halo'%(path_data,ds,halo_id) , 'w' ) as file: + writer = csv.writer( file, delimiter='\t' ) + writer.writerow( [f"{'#wavenumber(2pi/kpc)':<22}", f"{'powerspec':<15}"] ) + for i in range( len(k) ): + writer.writerow( [f"{k[i].d:<22.8f}", f"{pknorm[i]:<15.8f}"] ) diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/Profile_Functions.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/Profile_Functions.py new file mode 100755 index 0000000000..47b97fdaeb --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/Profile_Functions.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python3.9 +################################################################################ + +# Run Compute_profile.py to call these functions. +# Contains functions called by Compute_profiles.py + +################################################################################ + +import argparse +import csv +import os +import sys + +import numpy as np +import yt +from scipy.optimize import curve_fit, ridder +from VelocityDisp import * +from PowerSpec import * + +def soliton( x, xc ,time_a, particle_mass ): + return 1.9/time_a*((particle_mass/1e-23)**-2)*((xc)**-4)/(1+9.1*1e-2*(x/xc)**2)**8*1e9 + +def find_virial_mass( r, mass_para, zeta, background_density ): + mass = 10**np.interp(np.log10(r), np.log10( mass_para[0]), np.log10( mass_para[1] ) ) + return mass-zeta*background_density*(4/3*np.pi*r**3) + +def compute_profile( ds, center, _halo_radius, halo_id, path_data ): + + if not os.path.exists(path_data): + print( "ERROR: %s does not exist !!"%path_data ) + exit(0) + + print( "start computing profile...\n" ) + omega_M0 = ds.omega_matter + newton_G = ds.units.newtons_constant.to('(kpc*km**2)/(s**2*Msun)').d #(kpc*km^2)/(s^2*Msun) + background_density_0 = (1*ds.units.code_density ).to("Msun/kpc**3").d + particle_mass = (ds.parameters[ 'ELBDM_Mass' ]*ds.units.code_mass).to('eV/c**2').d + zeta_0 = (18*np.pi**2 + 82*( omega_M0 - 1 ) - 39*( omega_M0 - 1 )**2 )/omega_M0 + + # plot variable + nbin = 256 + max_radius = 4e2 + coordinates = ds.arr( [center[0], center[1], center[2]], 'code_length' ) + + # save file parameter + halo_parameter_filename = "Halo_Parameter" + + # periodicity + ds.force_periodicity() + + # find center + # find the maximum value in a sphere extending halo_radius from center_guess + halo_radius = _halo_radius # halo radius in cMpc/h --> change this value properly + if ds.cosmological_simulation: + center_guess = coordinates + sphere_guess = ds.sphere( center_guess, (halo_radius, 'code_length') ) + center_find = sphere_guess.quantities.max_location( 'density' ) + center_coordinate = ds.arr( [center_find[1].d, center_find[2].d, center_find[3].d], 'code_length' ) + else: + center_coordinate = coordinates + print( "Center is ", center_coordinate.in_units('code_length') ) + + # extract halo + sp = ds.sphere( center_coordinate, (0.30, 'code_length') ) # extract sphere with max 0.25 Mpc + max_level = ds.max_level + min_radius = ds.domain_width.in_units("kpc")[0].d/2**max_level/ds.domain_dimensions[0] + + prof_mass_accumulate = yt.create_profile( sp, 'radius', fields = 'cell_mass', + weight_field = None, n_bins = nbin , + units = {'radius': 'kpc', 'cell_mass': 'Msun'}, + extrema = {'radius': (min_radius,max_radius)}, accumulation = True ) + + prof_dens = yt.create_profile( sp, 'radius', fields = 'density', + weight_field = 'cell_volume', n_bins = nbin , + units = {'radius': 'kpc','density': 'Msun/kpc**3'}, + extrema = {'radius': (min_radius,max_radius)} ) + + prof_volume = yt.create_profile( sp, 'radius', fields = 'cell_volume', + weight_field = None, n_bins = nbin , + units = {'radius': 'kpc', 'cell_volume': 'kpc**3'}, + extrema = {'radius': (min_radius,max_radius)} ) + + radius_o = prof_dens.x.value + density_o = prof_dens['density'].value # density at radius + mass_accumulate_o = prof_mass_accumulate['cell_mass'].value # all mass within radius + volume_o = prof_volume['cell_volume'].value # volume within radius + + # remove zero + dens_idx = density_o != 0 + + radius = radius_o[dens_idx] + density = density_o[dens_idx] + mass_accumulate = mass_accumulate_o[dens_idx] + circular_velocity = np.sqrt( newton_G * mass_accumulate/radius ) + + # output profiles + if not os.path.exists( path_data + '/prof_dens' ): + os.makedirs( path_data + '/prof_dens' ) + + if not os.path.exists( path_data + '/prof_mass' ): + os.makedirs( path_data + '/prof_mass' ) + + if not os.path.exists( path_data + '/prof_circular_vel' ): + os.makedirs( path_data + '/prof_circular_vel' ) + + with open( '%s/prof_dens/%s_%d_profile_data'%(path_data,ds,halo_id) , 'w' ) as file: + writer = csv.writer( file, delimiter='\t' ) + writer.writerow( [f"{'#radius(ckpc)':<15}", f"{'density(Msun/ckpc**3)':<15}"] ) + for i in range( len(radius) ): + writer.writerow( [f"{radius[i]:<15.8f}", f"{density[i]:<15.8f}"] ) +# writer.writerow( [radius[i], density[i]] ) + + with open( '%s/prof_mass/%s_%d_mass_accumulate'%(path_data,ds,halo_id) , 'w' ) as file: + writer = csv.writer( file, delimiter='\t' ) + writer.writerow( [f"{'#radius(ckpc)':<15}", f"{'mass(Msun)':<15}"] ) + for i in range( len(radius) ): + writer.writerow( [f"{radius[i]:<15.8f}", f"{mass_accumulate[i]:<15.8f}"] ) + + with open( '%s/prof_circular_vel/%s_%d_circular_velocity'%(path_data,ds,halo_id) , 'w' ) as file: + writer = csv.writer( file, delimiter='\t' ) + writer.writerow( [f"{'#radius(ckpc)':<15}", f"{'Vcir(km/s)':<15}"] ) + for i in range( len(radius) ): + writer.writerow( [f"{radius[i]:<15.8f}", f"{circular_velocity[i]:<15.8f}"] ) + + ############# Output Halo's properties (core mass, halo mass, raidus) #################### + + # calculate virial mass to get halo radius + if ds.cosmological_simulation: + current_time_z = ds.current_redshift + else: + current_time_z = 0 + current_time_a = 1.0/(1+current_time_z) + + # defintion of zeta (halo radius) + omega_M = (omega_M0*(1 + current_time_z)**3)/(omega_M0*(1 + current_time_z)**3 + (1 - omega_M0)) + zeta = (18*np.pi**2 + 82*(omega_M - 1) - 39*(omega_M - 1)**2)/omega_M + + # use mass_accumulation directly + halo_radius = ridder( lambda x:find_virial_mass(x,(radius, mass_accumulate),zeta, background_density_0),min_radius, max_radius ) + halo_mass = 10**np.interp( np.log10( halo_radius ), np.log10( radius ), np.log10( mass_accumulate ) ) + + #core radius 2 : xc = max/2 + try: + core_radius_2 = ridder( lambda x: 10**np.interp( np.log10( x ), np.log10( radius ), np.log10( density )) - max(density)/2, radius[0], max( radius ) ) + except ValueError as e: + print("Error while getting core_radius_2: likely unable to locate the center of halo. Please adjust center_first_guess or vicinity in Compute_profiles.py") + exit(0) + return + core_mass_2 = 10**np.interp( np.log10( core_radius_2 ), np.log10( radius ), np.log10( mass_accumulate ) ) + + #core radius 1 : curve fit + avg = (density > 0.1*max(density)) + popt, pcov = curve_fit( lambda x, r_c:soliton(x, r_c, current_time_a, particle_mass), radius[avg], density[avg], bounds=(5e-1, 50) ) + core_radius_1 = popt[0] + core_mass_1 = 10**np.interp( np.log10( core_radius_1 ),np.log10( radius ), np.log10( mass_accumulate ) ) + + #core radius 3 : x = 0 solve equation + a = (2**(1.0/8) - 1)**(1.0/2) + core_radius_3 = (max(density)/10**9/1.9*float(current_time_a)*(particle_mass/10**-23)**2)**-0.25 + core_mass_3 = ((4.2*10**9/((particle_mass/10**-23)**2*(float(core_radius_3*current_time_a)*10**3)))*(1/(a**2 + 1)**7)*(3465*a**13 + 23100*a**11 + 65373*a**9 + 101376*a**7 + 92323*a**5 + 48580*a**3 - 3465*a + 3465*(a**2 + 1)**7*np.arctan(a))) + + sto_list = [] + sto_list.append( int(str(ds).split("_")[-1]) ) + sto_list.append( halo_id ) + sto_list.append( particle_mass ) + sto_list.append( center_coordinate.in_units('code_length')[0].d ) + sto_list.append( center_coordinate.in_units('code_length')[1].d ) + sto_list.append( center_coordinate.in_units('code_length')[2].d ) + sto_list.append( current_time_z ) + sto_list.append( halo_radius*current_time_a ) + sto_list.append( halo_mass ) + sto_list.append( max(density)*current_time_a**-3 ) + sto_list.append( core_radius_1*current_time_a ) + sto_list.append( core_radius_2*current_time_a ) + sto_list.append( core_radius_3*current_time_a ) + sto_list.append( core_mass_1 ) + sto_list.append( core_mass_2 ) + sto_list.append( core_mass_3 ) + + file_exists = os.path.exists( "%s/%s"%(path_data, halo_parameter_filename) ) + with open( "%s/%s"%(path_data,halo_parameter_filename), 'a+' ) as file: + writer = csv.writer( file, delimiter='\t' ) + + # write header + if not file_exists: + writer.writerow( [f"{'#snap':<6}", + f"{'halo_id':<8}", + f"{'m[eV]':<8}", + f"{'x[cMpc/h]':<11}", + f"{'y[cMpc/h]':<11}", + f"{'z[cMpc/h]':<11}", + f"{'redshift':<11}", + f"{'r_vir[kpc]':<13}", + f"{'Mvir[Msub]':<13}", + f"{'rho_max[Msun/kpc**3]':<20}", + f"{'r_c1[kpc]':<13}", + f"{'r_c2[kpc]':<13}", + f"{'r_c3[kpc]':<13}", + f"{'M_c1[Msun]':<13}", + f"{'M_c2[Msun]':<13}", + f"{'M_c3[Msun]':<13}" + ] ) + # write data + writer.writerow( [ + f"{sto_list[0]:<6}", + f"{sto_list[1]:<8}", + f"{sto_list[2]:<8.1e}", + f"{sto_list[3]:<11.8f}", + f"{sto_list[4]:<11.8f}", + f"{sto_list[5]:<11.8f}", + f"{sto_list[6]:<11.8f}", + f"{sto_list[7]:<13.8f}", + f"{sto_list[8]:<13.8e}", + f"{sto_list[9]:<20.8e}", + f"{sto_list[10]:<13.8f}", + f"{sto_list[11]:<13.8f}", + f"{sto_list[12]:<13.8f}", + f"{sto_list[13]:<13.8e}", + f"{sto_list[14]:<13.8e}", + f"{sto_list[15]:<13.8e}" + ] ) + + # Compute Velocity Dispersion & Power spectrum seperately + Compute_VelocityDispersion( ds, center_coordinate.in_units('code_length').d, halo_id, path_data ) + Compute_PowerSpectrum( ds, center_coordinate.in_units('code_length').d, core_radius_1, halo_id, path_data ) diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/VelocityDisp.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/VelocityDisp.py new file mode 100644 index 0000000000..48bdb28bd2 --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/VelocityDisp.py @@ -0,0 +1,117 @@ +####################################### + +# Computation of Velocity Dispersion. +# Run Compute_profile.py to call these functions. + +# carefully adjust rmax_for_vd to avoid exceeeding memory limit +####################################### +import os +import yt +import numpy as np +import csv +from ELBDM_DerivedField import * + +rmax_for_vd = 0.2 # in comoving Mpc/h. Maximum enclosing radius for computation of velocity dispersion + +def get_frb_cube( ds, c, radius, lv, dxs ): + + L = radius*2 + frb_res = np.int64( L/dxs[lv] ) + cedge = c - radius + cube = ds.covering_grid( lv, left_edge=cedge, dims=[frb_res, frb_res, frb_res],num_ghost_zones=1 ) + + return cube + +def density_profile( x, y, z, rho, center ): + + r = np.sqrt( (x - center[0])**2 + + (y - center[1])**2 + + (z - center[2])**2 ) + + N = np.int64( 128 ) + rbin_edge = np.logspace( -4, np.log10(r.max()), N ) + rbin = (rbin_edge[1:]+rbin_edge[:-1])/2 + densprof = np.zeros( N-1 ) + counts = np.zeros( N-1 ) + + inds = np.digitize(r, rbin_edge) + inds -= 1 + + for ind in np.unique(inds): + if (ind > 0 and ind < inds.max()): + densprof[ind] += np.sum((rho)[inds==ind]) + counts [ind] += np.sum(inds==ind) + + mask = (counts==0) + + return rbin[~mask], densprof[~mask]/counts[~mask], counts[~mask] + +def Hz( z, H0, OmegaL, Omegam ): + return H0 * np.sqrt( Omegam* (1+z)**3 + OmegaL ) + +def Compute_VelocityDispersion( ds, center, halo_id, path_data ): + + omegaL = ds.omega_lambda + omegam = ds.omega_matter + a = 1/(1 + ds.current_redshift) + z = ds.current_redshift + h = ds.hubble_constant + H = Hz( z, h*100, omegaL, omegam ) + dx_max = (ds.domain_width[0]/ds.domain_dimensions[0]).d # in cMpc/h + dxs = ds.arr( [dx_max/(2**i) for i in np.arange(ds.max_level+1)], 'code_length' ) # in cMpc/h + lv = 7 + + Add_derived_fields( ds ) + + cube = get_frb_cube( ds, center, rmax_for_vd, lv, dxs ) + + x = cube["gamer","x"].to( "Mpc/h" ).d.flatten() + y = cube["gamer","y"].to( "Mpc/h" ).d.flatten() + z = cube["gamer","z"].to( "Mpc/h" ).d.flatten() + rho = cube["gas", "density"].to( "Msun/kpc**3" ).d.flatten() + vx = cube["gamer", "v_x"].to( "km/s" ).d.flatten() / a + H * a * x # physical v = comoving v * a + Hax_c + vy = cube["gamer", "v_y"].to( "km/s" ).d.flatten() / a + H * a * y # remind: v = \grad S = dS/dy = dS/dy_c * a^(-1) + vz = cube["gamer", "v_z"].to( "km/s" ).d.flatten() / a + H * a * z + + vx_center = np.sum( rho*vx )/np.sum( rho ) + vy_center = np.sum( rho*vy )/np.sum( rho ) + vz_center = np.sum( rho*vz )/np.sum( rho ) + + vx -= vx_center; # rest frame at center of subhalo + vy -= vy_center; + vz -= vz_center; + + rhovx2 = rho * vx*vx + rhovy2 = rho * vy*vy + rhovz2 = rho * vz*vz + rhovx = rho * vx + rhovy = rho * vy + rhovz = rho * vz + + dr,dprof,_ = density_profile( x, y, z, rho, center ) + dr,rhovx2prof,_ = density_profile( x, y, z, rhovx2, center ) + dr,rhovy2prof,_ = density_profile( x, y, z, rhovy2, center ) + dr,rhovz2prof,_ = density_profile( x, y, z, rhovz2, center ) + dr,rhovxprof,_ = density_profile( x, y, z, rhovx, center ) + dr,rhovyprof,_ = density_profile( x, y, z, rhovy, center ) + dr,rhovzprof,_ = density_profile( x, y, z, rhovz, center ) + + rhovxprof2 = rhovxprof**2 + rhovyprof2 = rhovyprof**2 + rhovzprof2 = rhovzprof**2 + + sigmax2 = rhovx2prof/dprof - rhovxprof2/(dprof*dprof) + sigmay2 = rhovy2prof/dprof - rhovyprof2/(dprof*dprof) + sigmaz2 = rhovz2prof/dprof - rhovzprof2/(dprof*dprof) + + sigma = np.sqrt( (sigmax2 + sigmay2 + sigmaz2) / 3.0 ) + + dr *= 1e3/h # in ckpc + if not os.path.exists( path_data + '/prof_veldisp' ): + os.makedirs( path_data + '/prof_veldisp' ) + + with open( '%s/prof_veldisp/%s_%d_veldisp_haloRestFrame'%(path_data,ds,halo_id) , 'w' ) as file: + writer = csv.writer( file, delimiter='\t' ) + writer.writerow( [f"{'#radius(ckpc)':<15}", f"{'VelDisp(km/s)':<15}"] ) + for i in range( len(dr) ): + writer.writerow( [f"{dr[i]:<15.8f}", f"{sigma[i]:<15.8f}"] ) diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/plot_projection_grid.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/plot_projection_grid.py new file mode 100644 index 0000000000..d1b76b147d --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/plot_projection_grid.py @@ -0,0 +1,67 @@ +################################################################################ + +# This code output the density projection of the simulation with a pre-determined center. + +################################################################################ + +import argparse +import sys +import yt +import numpy as np + +# global parameter settings +field = ('gas', 'density') +axes = ["x", "y", "z"] # directions +lv = 10 # maximum level for sampling AMR grid +dpi = 300 +colormap_dens = 'algae' +center_mode = 'c' +zooms = [1,7] + +# load the command-line parameters +parser = argparse.ArgumentParser( description='Projection of mass density' ) + +parser.add_argument( '-i', action='store', required=False, type=str, dest='prefix', + help='path prefix [%(default)s]', default='../' ) +parser.add_argument( '-s', action='store', required=True, type=int, dest='idx_start', + help='first data index' ) +parser.add_argument( '-e', action='store', required=True, type=int, dest='idx_end', + help='last data index' ) +parser.add_argument( '-d', action='store', required=False, type=int, dest='didx', + help='delta data index [%(default)d]', default=1 ) + +# take note +print( '\nCommand-line arguments:' ) +print( '-------------------------------------------------------------------' ) +print( ' '.join(map(str, sys.argv)) ) +print( '-------------------------------------------------------------------\n' ) + +args=parser.parse_args() +idx_start = args.idx_start +idx_end = args.idx_end +didx = args.didx +prefix = args.prefix + +yt.enable_parallelism() +ts = yt.DatasetSeries( [prefix+'/Data_%06d'%idx for idx in range( idx_start, idx_end+1, didx )] ) + +center = [0.25817871, 9.3939209, 8.20983887] # center of the target halo at z=0 for low resolution IC + +for ds in ts.piter(): + num = '%s'%ds + num = int( num[5:11] ) + for ax in axes: + for zoom in zooms: + pz_dens = yt.ProjectionPlot( ds, ax, field, center=center ) + + pz_dens.set_zlim( field, 1.0e-5, 2.0e-2 ) + pz_dens.set_cmap( field, colormap_dens ) + pz_dens.annotate_timestamp( time_unit='Gyr', redshift=True, corner='upper_right' ) + pz_dens.set_axes_unit( 'Mpc/h' ) + pz_dens.zoom( zoom ) + pz_dens.save( 'Data_%06d_Proj_%s_%s_x%d.png'%(num, ax, field[1],zoom), mpl_kwargs={"dpi":dpi} ) + + pz_dens.annotate_grids() + pz_dens.save( 'Data_%06d_Proj_%s_%s_x%d_grid.png'%(num, ax, field[1],zoom), mpl_kwargs={"dpi":dpi} ) + pz_dens.zoom( 1./zoom ) + diff --git a/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/plot_slice_grid.py b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/plot_slice_grid.py new file mode 100644 index 0000000000..be06f9e3dc --- /dev/null +++ b/example/test_problem/ELBDM/LSS_Hybrid_Zoomin/plot_script/plot_slice_grid.py @@ -0,0 +1,65 @@ +################################################################################ + +# This code output the density slice of the simulation with a pre-determined center. + +################################################################################ + +import argparse +import sys +import yt +import numpy as np + +# global parameter settings +field = ('gas', 'density') +axes = ["x", "y", "z"] # directions +lv = 10 # maximum level for sampling AMR grid +dpi = 300 +colormap_dens = 'algae' +center_mode = 'c' +zooms = [1,7] + +# load the command-line parameters +parser = argparse.ArgumentParser(description='Slice of mass density') + +parser.add_argument( '-i', action='store', required=False, type=str, dest='prefix', + help='path prefix [%(default)s]', default='../' ) +parser.add_argument( '-s', action='store', required=True, type=int, dest='idx_start', + help='first data index' ) +parser.add_argument( '-e', action='store', required=True, type=int, dest='idx_end', + help='last data index' ) +parser.add_argument( '-d', action='store', required=False, type=int, dest='didx', + help='delta data index [%(default)d]', default=1 ) + +# take note +print( '\nCommand-line arguments:' ) +print( '-------------------------------------------------------------------' ) +print( ' '.join(map(str, sys.argv)) ) +print( '-------------------------------------------------------------------\n' ) + +args = parser.parse_args() +idx_start = args.idx_start +idx_end = args.idx_end +didx = args.didx +prefix = args.prefix + +center = [0.25817871, 9.3939209, 8.20983887] # center of the target halo at z=0 for low resolution IC + +yt.enable_parallelism() +ts = yt.DatasetSeries( [prefix+'/Data_%06d'%idx for idx in range( idx_start, idx_end+1, didx )] ) + +for ds in ts.piter(): + num = '%s'%ds + num = int( num[5:11] ) + for ax in axes: + for zoom in zooms: + pz_dens = yt.SlicePlot( ds, ax, field, center=center ) + + pz_dens.set_cmap( field, colormap_dens ) + pz_dens.annotate_timestamp( time_unit='Gyr', redshift=True, corner='upper_right' ) + pz_dens.set_axes_unit( 'Mpc/h' ) + pz_dens.zoom( zoom ) + pz_dens.save( 'Data_%06d_Slice_%s_%s_x%d.png'%(num, ax, field[1],zoom), mpl_kwargs={"dpi":dpi} ) + + pz_dens.annotate_grids() + pz_dens.save( 'Data_%06d_Slice_%s_%s_x%d_grid.png'%(num, ax, field[1],zoom), mpl_kwargs={"dpi":dpi} ) + pz_dens.zoom( 1./zoom ) diff --git a/src/TestProblem/ELBDM/LSS/Init_TestProb_ELBDM_LSS.cpp b/src/TestProblem/ELBDM/LSS/Init_TestProb_ELBDM_LSS.cpp index 2d00db3636..091b10ceb3 100644 --- a/src/TestProblem/ELBDM/LSS/Init_TestProb_ELBDM_LSS.cpp +++ b/src/TestProblem/ELBDM/LSS/Init_TestProb_ELBDM_LSS.cpp @@ -1,10 +1,18 @@ #include "GAMER.h" +#define ZOOM_A 0 // id of scale factor in ZoomIn_Table +#define ZOOM_LX 1 // id of LX in ZoomIn_Table +#define ZOOM_CX 4 // id of CenterX in ZoomIn_Table // problem-specific global variables // ======================================================================================= -static int LSS_InitMode; // initialization mode: 1=density-only, 2=real and imaginary parts or phase and density +static int LSS_InitMode; // initialization mode: 1=density-only, 2=real and imaginary parts or phase and density +static int ZoomIn_MaxLvOutside; // maximum refinement level outside of the zoom-in box +static int ZoomIn_NRow; // number of rows of the zoom-in table +static int ZoomIn_NCol = 7; // number of columns of the zoom-in table +static real **ZoomIn_Table; // arrays of the table of scale factor, length, and center in xyz-axis + // ======================================================================================= @@ -50,7 +58,6 @@ void Validate() // warnings - if ( MPI_Rank == 0 ) Aux_Message( stdout, " Validating test problem %d ... done\n", TESTPROB_ID ); } // FUNCTION : Validate @@ -80,8 +87,9 @@ void SetParameter() // (1) load the problem-specific runtime parameters - const char FileName[] = "Input__TestProb"; - ReadPara_t *ReadPara = new ReadPara_t; + const char FileName[] = "Input__TestProb"; + const char FileNameZoomIn[] = "Input__ZoominBox"; + ReadPara_t *ReadPara = new ReadPara_t; // (1-1) add parameters in the following format: // --> note that VARIABLE, DEFAULT, MIN, and MAX must have the same data type @@ -89,21 +97,80 @@ void SetParameter() // ******************************************************************************************************************************** // ReadPara->Add( "KEY_IN_THE_FILE", &VARIABLE, DEFAULT, MIN, MAX ); // ******************************************************************************************************************************** - ReadPara->Add( "LSS_InitMode", &LSS_InitMode, 1, 1, 2 ); + ReadPara->Add( "LSS_InitMode", &LSS_InitMode, 1, 1, 2 ); + ReadPara->Add( "ZoomIn_MaxLvOutside", &ZoomIn_MaxLvOutside, MAX_LEVEL, 0, MAX_LEVEL ); ReadPara->Read( FileName ); delete ReadPara; -// (1-2) set the default values - -// (1-3) check the runtime parameters - - -// (2) set the problem-specific derived parameters - - -// (3) reset other general-purpose parameters + if ( OPT__FLAG_REGION ) + { +# if ( ELBDM_SCHEME == ELBDM_HYBRID ) + if ( ELBDM_FIRST_WAVE_LEVEL > ZoomIn_MaxLvOutside ) + Aux_Error( ERROR_INFO, " it is required to set ELBDM_FIRST_WAVE_LEVEL <= ZoomIn_MaxLvOutside for zoom-in simulation !!\n" ); +# endif // # if ( ELBDM_SCHEME == ELBDM_HYBRID ) + + if ( !Aux_CheckFileExist( FileNameZoomIn ) ) + Aux_Error( ERROR_INFO, "%s does not exist !!\n", FileNameZoomIn ); + + char *input_line = NULL; + size_t len = 0; + int n; + FILE *File_zoom; + File_zoom = fopen( FileNameZoomIn, "r" ); + +// get the number of rows in Input__ZoominBox + ZoomIn_NRow = -1; + while ( getline( &input_line, &len, File_zoom ) != -1 ) ++ZoomIn_NRow; + fclose( File_zoom ); + +// Allocate ZoomIn_Table + ZoomIn_Table = new real* [ZoomIn_NCol]; + for (int i=0; i !!\n", s+1, FileNameZoomIn ); + + sscanf( input_line, "%f%f%f%f%f%f%f", + &ZoomIn_Table[ZOOM_A][s], + &ZoomIn_Table[ZOOM_LX][s], &ZoomIn_Table[ZOOM_LX + 1][s], &ZoomIn_Table[ZOOM_LX + 2][s], + &ZoomIn_Table[ZOOM_CX][s], &ZoomIn_Table[ZOOM_CX + 1][s], &ZoomIn_Table[ZOOM_CX + 2][s] ); + + if ( s < 1 ) continue; + + if ( ZoomIn_Table[ZOOM_A][s-1] < ZoomIn_Table[ZOOM_A][s] ) + Aux_Error( ERROR_INFO, "Current a=%f is greater than the previous a=%f. Scale factors are not listed in descending order in %s!!\n", + ZoomIn_Table[ZOOM_A][s], ZoomIn_Table[ZOOM_A][s-1], FileNameZoomIn ); + + for (int XYZ=0; XYZ<3; XYZ++) + { + if ( ZoomIn_Table[ZOOM_CX + XYZ][s-1] - ZoomIn_Table[ZOOM_LX + XYZ][s-1]/2 < ZoomIn_Table[ZOOM_CX + XYZ][s] - ZoomIn_Table[ZOOM_LX + XYZ][s]/2 || + ZoomIn_Table[ZOOM_CX + XYZ][s-1] + ZoomIn_Table[ZOOM_LX + XYZ][s-1]/2 > ZoomIn_Table[ZOOM_CX + XYZ][s] + ZoomIn_Table[ZOOM_LX + XYZ][s]/2 ) + Aux_Error( ERROR_INFO, "Zoom-in box at a = %.2f with LX=%.2f LY=%.2f LZ=%.2f CenterX=%.2f CenterY=%.2f CenterZ=%.2f " \ + "is outside of the Zoom-in box at earlier a = %.2f with LX=%.2f LY=%.2f LZ=%.2f CenterX=%.2f CenterY=%.2f CenterZ=%.2f !!\n", + ZoomIn_Table[ZOOM_A][s-1], + ZoomIn_Table[ZOOM_LX][s-1], ZoomIn_Table[ZOOM_LX + 1][s-1], ZoomIn_Table[ZOOM_LX + 2][s-1], + ZoomIn_Table[ZOOM_CX][s-1], ZoomIn_Table[ZOOM_CX + 1][s-1], ZoomIn_Table[ZOOM_CX + 2][s-1], + ZoomIn_Table[ZOOM_A][s], + ZoomIn_Table[ZOOM_LX][s], ZoomIn_Table[ZOOM_LX + 1][s], ZoomIn_Table[ZOOM_LX + 2][s], + ZoomIn_Table[ZOOM_CX][s], ZoomIn_Table[ZOOM_CX + 1][s], ZoomIn_Table[ZOOM_CX + 2][s] ); + } // for (int XYZ=0; XYZ<3; XYZ++) + } // for (int s=0; s a helper macro PRINT_RESET_PARA is defined in Macro.h const long End_Step_Default = __INT_MAX__; const double End_T_Default = 1.0; @@ -119,12 +186,24 @@ void SetParameter() } -// (4) make a note +// (3) make a note if ( MPI_Rank == 0 ) { Aux_Message( stdout, "=============================================================================\n" ); Aux_Message( stdout, " test problem ID = %d\n", TESTPROB_ID ); Aux_Message( stdout, " initialization mode = %d\n", LSS_InitMode ); + Aux_Message( stdout, " ZoomIn_MaxLvOutside = %d\n", ZoomIn_MaxLvOutside ); + if ( OPT__FLAG_REGION ) + { + Aux_Message( stdout, " Table in %s\n", FileNameZoomIn ); + Aux_Message( stdout, " #ScaleFactor LX(UNIT_L) LY(UNIT_L) LZ(UNIT_L) CenterX(UNIT_L) CenterY(UNIT_L) CenterZ(UNIT_L) \n" ); + + for (int i=0; idh[lv]; // cell size + const double Pos[3] = { amr->patch[0][lv][PID]->EdgeL[0] + (i+0.5)*dh, // x,y,z position + amr->patch[0][lv][PID]->EdgeL[1] + (j+0.5)*dh, + amr->patch[0][lv][PID]->EdgeL[2] + (k+0.5)*dh }; + + bool Within = true; + +// get the index of ZoomIn_Table that can collect the zoom-in box's volume and center based on the current simulation time + int zoom_idx; + for (int s=0; s 0.5*amr->BoxSize[XYZ] ) ? Pos[XYZ] - amr->BoxSize[XYZ] : + ( Pos[XYZ] - ZoomIn_Table[ZOOM_CX + XYZ][zoom_idx] < -0.5*amr->BoxSize[XYZ] ) ? Pos[XYZ] + amr->BoxSize[XYZ] : Pos[XYZ]; + const double dR = Pos_i - ZoomIn_Table[ZOOM_CX + XYZ][zoom_idx]; + Within &= ( abs(dR) < ZoomIn_Table[ZOOM_LX + XYZ][zoom_idx]/2 ); + + } // for (int XYZ=0; XYZ<3; XYZ++) + + return Within; + +} // FUNCTION : Flag_Region_LSS + + + //------------------------------------------------------------------------------------------------------- // Function : Init_ByFile_ELBDM_LSS // Description : Function to actually set the fluid field from the input uniform-mesh array @@ -235,6 +367,25 @@ void Init_ByFile_ELBDM_LSS( real fluid_out[], const real fluid_in[], const int n +//------------------------------------------------------------------------------------------------------- +// Function : End_Region_LSS +// Description : Free memory before terminating the program +// +// Note : 1. Linked to the function pointer "End_User_Ptr" to replace "End_User()" +// +// Parameter : None +//------------------------------------------------------------------------------------------------------- +void End_Region_LSS() +{ + + for (int i=0; i