diff --git a/CHANGELOG.md b/CHANGELOG.md index 75bf43b..c179d66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Rewrite of NEATM and FRM internal models, remove support for saving files related + to thermal and optical models. - Throughout the rust code, `Time` is being enforced as inputs for functions instead of accepting `f64` in a large number of places. diff --git a/Cargo.toml b/Cargo.toml index 56bdb73..3da127a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,12 +76,22 @@ rust.trivial_casts = "deny" rust.elided_lifetimes_in_paths = "deny" rust.unexpected_cfgs = "deny" -clippy.too_many_arguments = "allow" -clippy.type_complexity = "allow" clippy.perf = { level = "deny", priority = 1 } -# clippy.pedantic = { level = "warn", priority = 1 } +clippy.pedantic = { level = "deny", priority = 1 } + +# Pedantic overrides +clippy.float_cmp = {level = "allow", priority = 2 } +clippy.inline_always = {level = "allow", priority = 2 } +clippy.unreadable-literal = {level = "allow", priority = 2} +clippy.similar_names = {level = "allow", priority = 2} +clippy.too_many_lines = {level = "allow", priority = 2} +clippy.cast_precision_loss = {level = "allow", priority = 2} +clippy.cast_possible_truncation = {level = "allow", priority = 2} +# General list +clippy.too_many_arguments = "allow" +clippy.type_complexity = "allow" clippy.allow_attributes_without_reason = "warn" clippy.collection_is_never_read = "warn" clippy.dbg_macro = "warn" @@ -102,4 +112,10 @@ clippy.use_self = "warn" clippy.cargo_common_metadata = "warn" clippy.negative_feature_names = "warn" clippy.redundant_feature_names = "warn" -clippy.wildcard_dependencies = "warn" \ No newline at end of file +clippy.wildcard_enum_match_arm = "warn" +clippy.wildcard_dependencies = "warn" +clippy.fallible_impl_from = "warn" +clippy.unneeded_field_pattern = "warn" +clippy.fn_params_excessive_bools = "warn" + +clippy.must_use_candidate = "deny" \ No newline at end of file diff --git a/src/kete/plot.py b/src/kete/plot.py index 4c7c8b1..caa9b0b 100644 --- a/src/kete/plot.py +++ b/src/kete/plot.py @@ -43,8 +43,7 @@ def plot_fits_image(fit, percentiles=(0.1, 99.95), power_stretch=1.0, cmap="gray `ZScaleInterval`. power_stretch : The scaling of the intensity of the plot is a power law, this defines the power - of that power law. By default plots are sqrt scaled. If this is set to 1, then - this becomes a linear scaling. + of that power law. By default plots are linear scaled (`power_stretch=1.0`). cmap : Color map to use for the plot. """ diff --git a/src/kete/rust/fitting.rs b/src/kete/rust/fitting.rs index a8f96d2..30059bf 100644 --- a/src/kete/rust/fitting.rs +++ b/src/kete/rust/fitting.rs @@ -14,5 +14,5 @@ pub fn ks_test_py(sample_a: Vec, sample_b: Vec) -> f64 { #[pyo3(name = "fit_chi2")] pub fn fit_chi2_py(data: Vec, sigmas: Vec) -> f64 { assert_eq!(data.len(), sigmas.len()); - fitting::fit_reduced_chi2(&data, &sigmas) + fitting::fit_reduced_chi2(&data, &sigmas).unwrap() } diff --git a/src/kete/rust/flux/common.rs b/src/kete/rust/flux/common.rs index 0778adc..5aa8fbc 100644 --- a/src/kete/rust/flux/common.rs +++ b/src/kete/rust/flux/common.rs @@ -1,3 +1,5 @@ +use core::f64; + use crate::{frame::PyFrames, vector::VectorLike}; use itertools::Itertools; use kete_core::constants::{ @@ -232,7 +234,7 @@ pub fn neatm_thermal_py( diameter: f64, wavelength: f64, emissivity: f64, -) -> f64 { +) -> PyResult { let sun2obj = sun2obj.into_vector(PyFrames::Ecliptic).into(); let sun2obs = sun2obs.into_vector(PyFrames::Ecliptic).into(); @@ -246,17 +248,13 @@ pub fn neatm_thermal_py( ) .unwrap(); let params = NeatmParams { - obs_bands: ObserverBands::Generic { - bands: vec![wavelength; 1], - zero_mags: None, - solar_correction: vec![1.0], - }, - band_albedos: vec![0.0; 1], + obs_bands: vec![BandInfo::new(wavelength, 1.0, f64::NAN, None)], + band_albedos: vec![0.0], hg_params, emissivity, beaming, }; - params.apparent_thermal_flux(&sun2obj, &sun2obs).unwrap()[0] + Ok(params.apparent_thermal_flux(&sun2obj, &sun2obs).unwrap()[0]) } /// Calculate the flux from an object using the FRM thermal model in Jansky. @@ -300,7 +298,7 @@ pub fn frm_thermal_py( diameter: f64, wavelength: f64, emissivity: f64, -) -> f64 { +) -> PyResult { let sun2obj = sun2obj.into_vector(PyFrames::Ecliptic).into(); let sun2obs = sun2obs.into_vector(PyFrames::Ecliptic).into(); let hg_params = HGParams::try_new( @@ -310,20 +308,15 @@ pub fn frm_thermal_py( Some(C_V), Some(v_albedo), Some(diameter), - ) - .unwrap(); + )?; let params = FrmParams { - obs_bands: ObserverBands::Generic { - bands: vec![wavelength; 1], - zero_mags: None, - solar_correction: vec![1.0], - }, - band_albedos: vec![0.0; 1], + obs_bands: vec![BandInfo::new(wavelength, 1.0, f64::NAN, None)], + band_albedos: vec![0.0], hg_params, emissivity, }; - params.apparent_thermal_flux(&sun2obj, &sun2obs).unwrap()[0] + Ok(params.apparent_thermal_flux(&sun2obj, &sun2obs).unwrap()[0]) } /// Given the M1/K1 and M2/K2 values, compute the apparent Comet visible magnitudes. @@ -421,7 +414,7 @@ pub fn w4_color_correction_py(temp: f64) -> f64 { /// The normal vectors of the fib lattice #[pyfunction] #[pyo3(name = "fib_lattice_vecs")] -pub fn fib_lattice_vecs_py(n_facets: usize) -> Vec<[f64; 3]> { +pub fn fib_lattice_vecs_py(n_facets: u32) -> Vec<[f64; 3]> { let facets = ConvexShape::new_fibonacci_lattice(n_facets).facets; facets .iter() diff --git a/src/kete/rust/flux/models.rs b/src/kete/rust/flux/models.rs index cc3fa24..10fdca5 100644 --- a/src/kete/rust/flux/models.rs +++ b/src/kete/rust/flux/models.rs @@ -1,12 +1,8 @@ //! Model inputs and outputs for NEATM/FRM/Reflected. use rayon::prelude::*; -use serde::{Deserialize, Serialize}; -use kete_core::{ - flux::{HGParams, ObserverBands}, - io::FileIO, -}; +use kete_core::flux::{BandInfo, HGParams}; use pyo3::prelude::*; use crate::{frame::PyFrames, vector::VectorLike}; @@ -28,7 +24,7 @@ use crate::{frame::PyFrames, vector::VectorLike}; /// magnitudes : /// Magnitudes in the different bands if zero mags were available. #[pyclass(frozen, module = "kete.flux", name = "ModelResults")] -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct PyModelResults(pub kete_core::flux::ModelResults); impl From for PyModelResults { @@ -50,6 +46,7 @@ impl PyModelResults { v_band_flux: f64, magnitudes: Option>, ) -> Self { + let magnitudes = magnitudes.unwrap_or(vec![f64::NAN; fluxes.len()]); kete_core::flux::ModelResults { fluxes, magnitudes, @@ -69,7 +66,7 @@ impl PyModelResults { /// Magnitudes in the different bands if zero mags were available. #[getter] - pub fn magnitudes(&self) -> Option> { + pub fn magnitudes(&self) -> Vec { self.0.magnitudes.clone() } @@ -97,35 +94,6 @@ impl PyModelResults { self.0.v_band_flux.to_degrees() } - /// Save results to a binary file. - #[pyo3(name = "save")] - pub fn py_save(&self, filename: String) -> PyResult { - Ok(self.0.save(filename)?) - } - - /// Load results from a binary file. - #[staticmethod] - #[pyo3(name = "load")] - pub fn py_load(filename: String) -> PyResult { - Ok(Self(kete_core::flux::ModelResults::load(filename)?)) - } - - /// Save a list to a binary file. - #[staticmethod] - #[pyo3(name = "save_list")] - pub fn py_save_list(vec: Vec, filename: String) -> PyResult<()> { - let vec: Vec<_> = vec.into_iter().map(|x| x.0).collect(); - Ok(kete_core::flux::ModelResults::save_vec(&vec, filename)?) - } - - /// Load a list from a binary file. - #[staticmethod] - #[pyo3(name = "load_list")] - pub fn py_load_list(filename: String) -> PyResult> { - let res = kete_core::flux::ModelResults::load_vec(filename)?; - Ok(res.into_iter().map(Self).collect()) - } - fn __repr__(&self) -> String { format!( "ModelResults(fluxes={:?}, thermal_fluxes={:?}, hg_fluxes={:?}, v_band_magnitude={:?},\ @@ -193,11 +161,11 @@ impl From for PyNeatmParams { impl PyNeatmParams { #[new] #[allow(clippy::too_many_arguments, missing_docs)] - #[pyo3(signature = (desig, band_wavelength, band_albedos, h_mag=None, diam=None, + #[pyo3(signature = (desig, band_wavelengths, band_albedos, h_mag=None, diam=None, vis_albedo=None, beaming=1.0, g_param=0.15, c_hg=None, emissivity=0.9, zero_mags=None))] pub fn new( desig: String, - band_wavelength: Vec, + band_wavelengths: Vec, band_albedos: Vec, h_mag: Option, diam: Option, @@ -208,13 +176,17 @@ impl PyNeatmParams { emissivity: f64, zero_mags: Option>, ) -> PyResult { + let n_bands = band_wavelengths.len(); + + let zero_mags = zero_mags.unwrap_or(vec![f64::NAN; n_bands]); let hg_params = HGParams::try_new(desig, g_param, h_mag, c_hg, vis_albedo, diam)?; - let obs_bands = ObserverBands::Generic { - solar_correction: vec![1.0; band_wavelength.len()], - bands: band_wavelength, - zero_mags, - }; + let obs_bands = band_wavelengths + .iter() + .zip(zero_mags) + .map(|(wavelength, z_mag)| BandInfo::new(*wavelength, 1.0, z_mag, None)) + .collect(); + Ok(kete_core::flux::NeatmParams { obs_bands, beaming, @@ -373,7 +345,7 @@ impl PyNeatmParams { /// List of effective wavelengths in nm. #[getter] pub fn band_wavelength(&self) -> Vec { - self.0.obs_bands.band_wavelength().to_vec() + self.0.obs_bands.iter().map(|x| x.wavelength).collect() } /// List of albedoes of the object, one for each band. @@ -420,21 +392,8 @@ impl PyNeatmParams { /// List of the zero mags for each band if provided. #[getter] - pub fn zero_mags(&self) -> Option> { - self.0.obs_bands.zero_mags().map(|x| x.to_vec()) - } - - /// Save results to a binary file. - #[pyo3(name = "save")] - pub fn py_save(&self, filename: String) -> PyResult { - Ok(self.0.save(filename)?) - } - - /// Load results from a binary file. - #[staticmethod] - #[pyo3(name = "load")] - pub fn py_load(filename: String) -> PyResult { - Ok(Self(kete_core::flux::NeatmParams::load(filename)?)) + pub fn zero_mags(&self) -> Vec { + self.0.obs_bands.iter().map(|x| x.zero_mag).collect() } fn __repr__(&self) -> String { @@ -454,22 +413,6 @@ impl PyNeatmParams { self.zero_mags(), ) } - - /// Save a list to a binary file. - #[staticmethod] - #[pyo3(name = "save_list")] - pub fn py_save_list(vec: Vec, filename: String) -> PyResult<()> { - let vec: Vec<_> = vec.into_iter().map(|x| x.0).collect(); - Ok(kete_core::flux::NeatmParams::save_vec(&vec, filename)?) - } - - /// Load a list from a binary file. - #[staticmethod] - #[pyo3(name = "load_list")] - pub fn py_load_list(filename: String) -> PyResult> { - let res = kete_core::flux::NeatmParams::load_vec(filename)?; - Ok(res.into_iter().map(Self).collect()) - } } /// FRM Model parameters. @@ -523,11 +466,11 @@ impl From for PyFrmParams { impl PyFrmParams { #[new] #[allow(clippy::too_many_arguments, missing_docs)] - #[pyo3(signature = (desig, band_wavelength, band_albedos, h_mag=None, diam=None, + #[pyo3(signature = (desig, band_wavelengths, band_albedos, h_mag=None, diam=None, vis_albedo=None, g_param=0.15, c_hg=None, emissivity=0.9, zero_mags=None))] pub fn new( desig: String, - band_wavelength: Vec, + band_wavelengths: Vec, band_albedos: Vec, h_mag: Option, diam: Option, @@ -537,12 +480,16 @@ impl PyFrmParams { emissivity: f64, zero_mags: Option>, ) -> PyResult { + let n_bands = band_wavelengths.len(); + + let zero_mags = zero_mags.unwrap_or(vec![f64::NAN; n_bands]); let hg_params = HGParams::try_new(desig, g_param, h_mag, c_hg, vis_albedo, diam)?; - let obs_bands = ObserverBands::Generic { - solar_correction: vec![1.0; band_wavelength.len()], - bands: band_wavelength, - zero_mags, - }; + + let obs_bands = band_wavelengths + .iter() + .zip(zero_mags) + .map(|(wavelength, z_mag)| BandInfo::new(*wavelength, 1.0, z_mag, None)) + .collect(); Ok(kete_core::flux::FrmParams { obs_bands, @@ -689,7 +636,7 @@ impl PyFrmParams { /// List of effective wavelengths in nm. #[getter] pub fn band_wavelength(&self) -> Vec { - self.0.obs_bands.band_wavelength().to_vec() + self.0.obs_bands.iter().map(|x| x.wavelength).collect() } /// List of albedoes of the object, one for each band. @@ -730,21 +677,8 @@ impl PyFrmParams { /// List of the zero mags for each band if provided. #[getter] - pub fn zero_mags(&self) -> Option> { - self.0.obs_bands.zero_mags().map(|x| x.to_vec()) - } - - /// Save results to a binary file. - #[pyo3(name = "save")] - pub fn py_save(&self, filename: String) -> PyResult { - Ok(self.0.save(filename)?) - } - - /// Load results from a binary file. - #[staticmethod] - #[pyo3(name = "load")] - pub fn py_load(filename: String) -> PyResult { - Ok(Self(kete_core::flux::FrmParams::load(filename)?)) + pub fn zero_mags(&self) -> Vec { + self.0.obs_bands.iter().map(|x| x.zero_mag).collect() } fn __repr__(&self) -> String { @@ -762,20 +696,4 @@ impl PyFrmParams { self.zero_mags(), ) } - - /// Save a list to a binary file. - #[staticmethod] - #[pyo3(name = "save_list")] - pub fn py_save_list(vec: Vec, filename: String) -> PyResult<()> { - let vec: Vec<_> = vec.into_iter().map(|x| x.0).collect(); - Ok(kete_core::flux::FrmParams::save_vec(&vec, filename)?) - } - - /// Load a list from a binary file. - #[staticmethod] - #[pyo3(name = "load_list")] - pub fn py_load_list(filename: String) -> PyResult> { - let res = kete_core::flux::FrmParams::load_vec(filename)?; - Ok(res.into_iter().map(Self).collect()) - } } diff --git a/src/kete/rust/spice/pck.rs b/src/kete/rust/spice/pck.rs index 5d796db..c406a75 100644 --- a/src/kete/rust/spice/pck.rs +++ b/src/kete/rust/spice/pck.rs @@ -53,14 +53,15 @@ pub fn pck_earth_frame_py( None => Desig::Empty, } }; - let pcks = LOADED_PCK.try_read().unwrap(); + let pcks = &LOADED_PCK.try_read().unwrap(); + let spks = &LOADED_SPK.try_read().unwrap(); + let frame = pcks.try_get_orientation(3000, jd)?; let (pos, vel) = frame.to_equatorial(pos.into(), [0.0, 0.0, 0.0].into())?; let mut state: State = State::new(desig, jd, pos.into(), vel.into(), 399); - let spks = &LOADED_SPK.try_read().unwrap(); spks.try_change_center(&mut state, new_center)?; Ok(PyState { diff --git a/src/kete/rust/state_transition.rs b/src/kete/rust/state_transition.rs index 24beefc..7389501 100644 --- a/src/kete/rust/state_transition.rs +++ b/src/kete/rust/state_transition.rs @@ -22,7 +22,8 @@ pub fn compute_stm_py( 10, ); - let (final_state, stm) = compute_state_transition(&mut state, jd_end.into(), central_mass); + let (final_state, stm) = + compute_state_transition(&mut state, jd_end.into(), central_mass).unwrap(); (final_state, stm.into()) } diff --git a/src/kete/rust/time.rs b/src/kete/rust/time.rs index 46b4ac2..3ed695f 100644 --- a/src/kete/rust/time.rs +++ b/src/kete/rust/time.rs @@ -159,7 +159,7 @@ impl PyTime { /// Time in the current time. #[staticmethod] pub fn now() -> Self { - PyTime(Time::::now().unwrap().tdb()) + PyTime(Time::::now().tdb()) } /// Return (year, month, day), where day is a float. @@ -167,7 +167,7 @@ impl PyTime { /// >>> kete.Time.from_ymd(2010, 1, 1).ymd /// (2010, 1, 1.0) #[getter] - pub fn ymd(&self) -> (i64, u32, f64) { + pub fn ymd(&self) -> (i32, u32, f64) { let (y, m, d, f) = self.0.utc().year_month_day(); (y, m, d as f64 + f) } @@ -228,8 +228,8 @@ impl PyTime { /// 2016.0 /// #[getter] - pub fn year_float(&self) -> f64 { - self.0.utc().year_as_float() + pub fn year_float(&self) -> PyResult { + Ok(self.0.utc().year_as_float()?) } fn __add__(&self, other: PyTime) -> Self { diff --git a/src/kete/state_transition.py b/src/kete/state_transition.py index da82bae..166c5ba 100644 --- a/src/kete/state_transition.py +++ b/src/kete/state_transition.py @@ -33,7 +33,9 @@ def compute_stm(state: State, jd_end: float) -> NDArray: np.ndarray Returns the 6x6 state transition matrix. """ - return np.array(_core.compute_stm(np.array(state).ravel(), state.jd, jd_end)[1]) + s = list(state.pos) + s = s + list(state.vel) + return np.array(_core.compute_stm(s, state.jd, jd_end, 1.0)[1]) def propagate_covariance(state: State, covariance: NDArray, jd_end: float) -> NDArray: diff --git a/src/kete_core/benches/propagation.rs b/src/kete_core/benches/propagation.rs index 159aabc..97d965d 100644 --- a/src/kete_core/benches/propagation.rs +++ b/src/kete_core/benches/propagation.rs @@ -7,7 +7,7 @@ use std::time::Duration; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use kete_core::prelude::*; -use kete_core::*; +use kete_core::{constants, propagation}; use pprof::criterion::{Output, PProfProfiler}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; @@ -63,7 +63,7 @@ fn prop_n_body_vec_radau(mut state: State, dt: f64) { let _ = propagation::propagate_n_body_vec(states, jd, None, non_gravs).unwrap(); } -fn prop_n_body_radau_par(state: State, dt: f64) { +fn prop_n_body_radau_par(state: &State, dt: f64) { let states: Vec> = (0..100).map(|_| state.clone()).collect(); let _tmp: Vec> = states .into_par_iter() @@ -74,38 +74,13 @@ fn prop_n_body_radau_par(state: State, dt: f64) { .collect(); } -fn prop_2_body_radau(state: State, dt: f64) { - let jd = state.epoch + dt; - let _ = propagation::propagation_central(&state.into_frame(), jd).unwrap(); -} - -fn prop_2_body_kepler(state: State, dt: f64) { - let _ = propagate_two_body(&state, state.epoch + dt).unwrap(); -} - -/// Benchmark functions for the propagation algorithms -pub fn two_body_numeric(c: &mut Criterion) { - let mut twobody_num_group = c.benchmark_group("2-Body-Numeric"); - - for state in [ - CIRCULAR.clone(), - ELLIPTICAL.clone(), - PARABOLIC.clone(), - HYPERBOLIC.clone(), - ] { - let name = match &state.desig { - Desig::Name(n) => n, - _ => panic!(), - }; - let _ = - twobody_num_group.bench_with_input(BenchmarkId::new("Single", name), &state, |b, s| { - b.iter(|| prop_2_body_radau(black_box(s.clone()), black_box(1000.0))); - }); - } +fn prop_2_body_kepler(state: &State, dt: f64) { + let _ = propagate_two_body(state, state.epoch + dt).unwrap(); } /// Benchmark functions for the propagation algorithms -pub fn n_body_prop(c: &mut Criterion) { +#[allow(clippy::missing_panics_doc, reason = "Benchmarking only")] +fn n_body_prop(c: &mut Criterion) { let mut nbody_group = c.benchmark_group("N-Body"); for state in [ @@ -114,21 +89,21 @@ pub fn n_body_prop(c: &mut Criterion) { PARABOLIC.clone(), HYPERBOLIC.clone(), ] { - let name = match &state.desig { - Desig::Name(n) => n, - _ => panic!(), + let Desig::Name(name) = &state.desig else { + panic!() }; let _ = nbody_group.bench_with_input(BenchmarkId::new("Single", name), &state, |b, s| { b.iter(|| prop_n_body_radau(black_box(s.clone()), black_box(1000.0))); }); let _ = nbody_group.bench_with_input(BenchmarkId::new("Parallel", name), &state, |b, s| { - b.iter(|| prop_n_body_radau_par(black_box(s.clone()), black_box(1000.0))); + b.iter(|| prop_n_body_radau_par(black_box(s), black_box(1000.0))); }); } } /// Benchmark functions for the propagation algorithms +#[allow(clippy::missing_panics_doc, reason = "Benchmarking only")] pub fn n_body_prop_vec(c: &mut Criterion) { let mut nbody_group = c.benchmark_group("N-Body-Vec"); @@ -138,9 +113,8 @@ pub fn n_body_prop_vec(c: &mut Criterion) { PARABOLIC.clone(), HYPERBOLIC.clone(), ] { - let name = match &state.desig { - Desig::Name(n) => n, - _ => panic!(), + let Desig::Name(name) = &state.desig else { + panic!() }; let _ = nbody_group.bench_with_input(BenchmarkId::new("Single", name), &state, |b, s| { b.iter(|| prop_n_body_vec_radau(black_box(s.clone()), black_box(1000.0))); @@ -149,6 +123,7 @@ pub fn n_body_prop_vec(c: &mut Criterion) { } /// Benchmark functions for the propagation algorithms +#[allow(clippy::missing_panics_doc, reason = "Benchmarking only")] pub fn two_body_analytic(c: &mut Criterion) { let mut twobody_group = c.benchmark_group("2-Body-Analytic"); @@ -158,18 +133,17 @@ pub fn two_body_analytic(c: &mut Criterion) { PARABOLIC.clone(), HYPERBOLIC.clone(), ] { - let name = match &state.desig { - Desig::Name(n) => n, - _ => panic!(), + let Desig::Name(name) = &state.desig else { + panic!() }; let _ = twobody_group.bench_with_input(BenchmarkId::new("Single", name), &state, |b, s| { - b.iter(|| prop_2_body_kepler(s.clone(), black_box(1000.0))); + b.iter(|| prop_2_body_kepler(s, black_box(1000.0))); }); } } criterion_group!(name=benches; config = Criterion::default().sample_size(30).measurement_time(Duration::from_secs(15)).with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); - targets=n_body_prop_vec, two_body_analytic, n_body_prop, two_body_numeric); + targets=n_body_prop_vec, two_body_analytic, n_body_prop); criterion_main!(benches); diff --git a/src/kete_core/benches/spice.rs b/src/kete_core/benches/spice.rs index 5f0b261..e2a041c 100644 --- a/src/kete_core/benches/spice.rs +++ b/src/kete_core/benches/spice.rs @@ -33,6 +33,7 @@ fn spice_get_state(jd: f64) { } } +#[allow(clippy::missing_panics_doc, reason = "Benchmarking only")] pub fn spice_benchmark(c: &mut Criterion) { let spice = &LOADED_SPK.try_read().unwrap(); let state = spice diff --git a/src/kete_core/benches/thermal.rs b/src/kete_core/benches/thermal.rs index 5315c41..b949617 100644 --- a/src/kete_core/benches/thermal.rs +++ b/src/kete_core/benches/thermal.rs @@ -3,7 +3,7 @@ #![allow(clippy::missing_assert_message, reason = "Unnecessary for benchmarks")] use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; -use kete_core::flux::{FrmParams, HGParams, NeatmParams}; +use kete_core::flux::{BandInfo, FrmParams, HGParams, NeatmParams}; use pprof::criterion::{Output, PProfProfiler}; use std::hint::black_box; @@ -23,6 +23,7 @@ fn frm_bench(params: &FrmParams) { ); } +#[allow(clippy::missing_panics_doc, reason = "Benchmarking only")] pub fn neatm_benchmark(c: &mut Criterion) { let mut neatm_group = c.benchmark_group("NEATM"); @@ -36,18 +37,21 @@ pub fn neatm_benchmark(c: &mut Criterion) { ) .unwrap(); let wise_params = NeatmParams { - obs_bands: kete_core::flux::ObserverBands::Wise, + obs_bands: BandInfo::new_wise().to_vec(), band_albedos: vec![0.2; 4], beaming: 1.0, hg_params: hg_params.clone(), emissivity: 0.9, }; + + let obs_bands = [1000.0; 4] + .iter() + .zip([f64::NAN; 4]) + .map(|(wavelength, z_mag)| BandInfo::new(*wavelength, 1.0, z_mag, None)) + .collect(); + let generic_params = NeatmParams { - obs_bands: kete_core::flux::ObserverBands::Generic { - bands: vec![1000.0; 4], - zero_mags: None, - solar_correction: vec![1.0; 4], - }, + obs_bands, band_albedos: vec![0.2; 4], beaming: 1.0, hg_params, @@ -62,6 +66,7 @@ pub fn neatm_benchmark(c: &mut Criterion) { }); } +#[allow(clippy::missing_panics_doc, reason = "Benchmarking only")] pub fn frm_benchmark(c: &mut Criterion) { let mut frm_group = c.benchmark_group("FRM"); @@ -75,17 +80,20 @@ pub fn frm_benchmark(c: &mut Criterion) { ) .unwrap(); let wise_params = FrmParams { - obs_bands: kete_core::flux::ObserverBands::Wise, + obs_bands: BandInfo::new_wise().to_vec(), band_albedos: vec![0.2; 4], hg_params: hg_params.clone(), emissivity: 0.9, }; + + let obs_bands = [1000.0; 4] + .iter() + .zip([f64::NAN; 4]) + .map(|(wavelength, z_mag)| BandInfo::new(*wavelength, 1.0, z_mag, None)) + .collect(); + let generic_params = FrmParams { - obs_bands: kete_core::flux::ObserverBands::Generic { - bands: vec![1000.0; 4], - zero_mags: None, - solar_correction: vec![1.0; 4], - }, + obs_bands, band_albedos: vec![0.2; 4], hg_params, emissivity: 0.9, diff --git a/src/kete_core/src/cache.rs b/src/kete_core/src/cache.rs index 706a688..b15f354 100644 --- a/src/kete_core/src/cache.rs +++ b/src/kete_core/src/cache.rs @@ -50,18 +50,13 @@ use crate::errors::{Error, KeteResult}; /// /// If the ``KETE_CACHE_DIR`` environment variable is not set, it /// creates a directory in the home directory of the user. +/// +/// # Errors +/// Errors can occur if the home directory is not found or if the +/// ``KETE_CACHE_DIR`` does not exist. pub fn cache_dir() -> KeteResult { - env::var("KETE_CACHE_DIR") - .map(|env_path| { - let path = PathBuf::from(env_path); - if !path.exists() { - return Err(Error::IOError(format!( - "KETE_CACHE_DIR does not exist: {path:?}" - ))); - } - Ok(path) - }) - .unwrap_or_else(|_| { + env::var("KETE_CACHE_DIR").map_or_else( + |_| { let user_dirs = UserDirs::new().ok_or(Error::IOError("Failed to find home directory.".into()))?; let path = user_dirs.home_dir(); @@ -70,7 +65,18 @@ pub fn cache_dir() -> KeteResult { std::fs::create_dir_all(&path)?; } Ok(path) - }) + }, + |env_path| { + let path = PathBuf::from(env_path); + if !path.exists() { + return Err(Error::IOError(format!( + "KETE_CACHE_DIR does not exist: {}", + path.display() + ))); + } + Ok(path) + }, + ) } #[cfg_attr(feature = "pyo3", pyo3::pyfunction(signature = (sub_path = "")))] @@ -80,6 +86,9 @@ pub fn cache_dir() -> KeteResult { /// are not required for basic function. /// /// This will create the folder if it does not exist. +/// +/// # Errors +/// Fails if cache directory cannot be found or created. pub fn cache_path(sub_path: &str) -> KeteResult { let mut path = cache_dir()?; path.push(sub_path); diff --git a/src/kete_core/src/constants/gravity.rs b/src/kete_core/src/constants/gravity.rs index 5721f40..e270e70 100644 --- a/src/kete_core/src/constants/gravity.rs +++ b/src/kete_core/src/constants/gravity.rs @@ -120,7 +120,7 @@ impl FromStr for GravParams { } /// Gravity parameter Singleton -static MASSES_KNOWN: std::sync::LazyLock>> = +pub static MASSES_KNOWN: std::sync::LazyLock>> = std::sync::LazyLock::new(|| { let mut singleton = Vec::new(); let text = std::str::from_utf8(include_bytes!("../../data/masses.tsv")) @@ -135,7 +135,7 @@ static MASSES_KNOWN: std::sync::LazyLock>> = }); /// Gravity parameter Singleton -static MASSES_SELECTED: std::sync::LazyLock>> = +pub static MASSES_SELECTED: std::sync::LazyLock>> = std::sync::LazyLock::new(|| { let mut singleton = Vec::new(); // pre-add the planets and the 5 most massive asteroids from the masses_known list @@ -157,7 +157,8 @@ static MASSES_SELECTED: std::sync::LazyLock>> = /// /// Masses must be provided as a fraction of the Sun's mass, and radius in AU. /// -/// If an object is already registered with the same NAIF ID, it will not be added again. +/// If an object is already registered with the same NAIF ID, it will not be added +/// again. #[cfg_attr(feature = "pyo3", pyfunction, pyo3(signature=(naif_id, mass, radius=0.0)))] pub fn register_custom_mass(naif_id: i32, mass: f64, radius: f32) { let params = GravParams::new(naif_id, mass * GMS, radius); @@ -167,7 +168,11 @@ pub fn register_custom_mass(naif_id: i32, mass: f64, radius: f32) { /// Register a new massive object to be used in the extended list of objects. /// /// This looks up the mass and radius of the object by its NAIF ID of the known -/// masses, panics if the object is not found. +/// masses. +/// +/// # Panics +/// Panic if a write lock cannot be put on [`MASSES_SELECTED`] or the object's mass is +/// not known. #[cfg_attr(feature = "pyo3", pyfunction)] pub fn register_mass(naif_id: i32) { let known_masses = GravParams::known_masses(); @@ -178,7 +183,8 @@ pub fn register_mass(naif_id: i32) { panic!("Failed to find mass for NAIF ID {naif_id}"); } -/// List the massive objects in the extended list of objects to be used during orbit propagation. +/// List the massive objects in the extended list of objects to be used during orbit +/// propagation. /// /// This is meant to be human readable, and will return: /// (the name of the object, @@ -186,6 +192,7 @@ pub fn register_mass(naif_id: i32) { /// the mass, /// the radius) #[cfg_attr(feature = "pyo3", pyfunction)] +#[must_use] pub fn registered_masses() -> Vec<(String, i32, f64, f32)> { let params = GravParams::selected_masses(); params @@ -209,6 +216,7 @@ pub fn registered_masses() -> Vec<(String, i32, f64, f32)> { /// the mass, /// the radius) #[cfg_attr(feature = "pyo3", pyfunction)] +#[must_use] pub fn known_masses() -> Vec<(String, i32, f64, f32)> { let params = GravParams::known_masses(); params @@ -226,6 +234,7 @@ pub fn known_masses() -> Vec<(String, i32, f64, f32)> { impl GravParams { /// Create a new [`GravParams`] object. + #[must_use] pub fn new(naif_id: i32, mass: f64, radius: f32) -> Self { Self { naif_id, @@ -281,6 +290,9 @@ impl GravParams { } /// Add this [`GravParams`] to the singleton. + /// + /// # Panics + /// Panic if a write lock cannot be put on [`MASSES_SELECTED`]. pub fn register(self) { let mut params = MASSES_SELECTED.write().unwrap(); // Check if the GravParams already exists @@ -291,16 +303,23 @@ impl GravParams { } /// Get a read-only reference to the singleton. + /// + /// # Panics + /// Panic if a read lock cannot be put on [`MASSES_KNOWN`]. pub fn known_masses() -> crossbeam::sync::ShardedLockReadGuard<'static, Vec> { MASSES_KNOWN.read().unwrap() } /// Currently selected masses for use in orbit propagation. + /// + /// # Panics + /// Panic if a read lock cannot be put on [`MASSES_SELECTED`]. pub fn selected_masses() -> crossbeam::sync::ShardedLockReadGuard<'static, Vec> { MASSES_SELECTED.read().unwrap() } /// List of all known massive planets and the Moon. + #[must_use] pub fn planets() -> Vec { let known = Self::known_masses(); let mut planets = Vec::new(); @@ -313,6 +332,7 @@ impl GravParams { } /// List of Massive planets, but merge the moon and earth together. + #[must_use] pub fn simplified_planets() -> Vec { let known = Self::known_masses(); let mut planets = Vec::new(); @@ -335,7 +355,7 @@ fn j2_correction(rel_pos: &Vector3, radius: f32, j2: &f64, mass: &f64) -> V // this is formatted a little funny in an attempt to reduce numerical noise // 3/2 * j2 * mass * radius^2 / distance^5 - let coef = 1.5 * j2 * mass * (radius as f64 / r).powi(2) * r.powi(-3); + let coef = 1.5 * j2 * mass * (f64::from(radius) / r).powi(2) * r.powi(-3); Vector3::::new( rel_pos.x * coef * (z_squared - 1.0), rel_pos.y * coef * (z_squared - 1.0), diff --git a/src/kete_core/src/constants/mod.rs b/src/kete_core/src/constants/mod.rs index c8b83a6..5aac0ac 100644 --- a/src/kete_core/src/constants/mod.rs +++ b/src/kete_core/src/constants/mod.rs @@ -37,8 +37,8 @@ mod universal; mod wise; pub use gravity::{ - EARTH_J2, EARTH_J3, EARTH_J4, GMS, GMS_SQRT, GravParams, JUPITER_J2, SUN_J2, known_masses, - register_custom_mass, register_mass, registered_masses, + EARTH_J2, EARTH_J3, EARTH_J4, GMS, GMS_SQRT, GravParams, JUPITER_J2, MASSES_KNOWN, + MASSES_SELECTED, SUN_J2, known_masses, register_custom_mass, register_mass, registered_masses, }; pub use neos::{NEOS_BANDS, NEOS_HEIGHT, NEOS_SUN_CORRECTION, NEOS_WIDTH, NEOS_ZERO_MAG}; pub use universal::{ diff --git a/src/kete_core/src/constants/wise.rs b/src/kete_core/src/constants/wise.rs index 5c7684a..38907df 100644 --- a/src/kete_core/src/constants/wise.rs +++ b/src/kete_core/src/constants/wise.rs @@ -56,6 +56,7 @@ pub const WISE_WIDTH: f64 = 0.01367174580728; /// /// * `temps` - Vec of temperatures in kelvin. #[inline] +#[must_use] pub fn w1_color_correction(temp: f64) -> f64 { let temp = &temp.clamp(100.0, 400.0); (-3.78226591e-01 + 3.50431748e-03 * temp + 1.45866307e-05 * temp.powi(2) @@ -70,6 +71,7 @@ pub fn w1_color_correction(temp: f64) -> f64 { /// /// * `temps` - Vec of temperatures in kelvin. #[inline] +#[must_use] pub fn w2_color_correction(temp: f64) -> f64 { let temp = &temp.clamp(100.0, 400.0); (-8.60229377e-01 + 1.54988562e-02 * temp - 5.10705456e-05 * temp.powi(2) @@ -84,6 +86,7 @@ pub fn w2_color_correction(temp: f64) -> f64 { /// /// * `temps` - Vec of temperatures in kelvin. #[inline] +#[must_use] pub fn w3_color_correction(temp: f64) -> f64 { let temp = &temp.clamp(100.0, 400.0); (-1.29814355 + 2.43268763e-02 * temp - 9.05178737e-05 * temp.powi(2) @@ -98,6 +101,7 @@ pub fn w3_color_correction(temp: f64) -> f64 { /// /// * `temps` - Vec of temperatures in kelvin. #[inline] +#[must_use] pub fn w4_color_correction(temp: f64) -> f64 { let temp = &temp.clamp(100.0, 400.0).ln(); 2.17247804e+01 + -1.46084733e+01 * temp + 3.85364000e+00 * temp.powi(2) @@ -107,8 +111,8 @@ pub fn w4_color_correction(temp: f64) -> f64 { /// The combined color corrections for WISE. pub const WISE_CC: [ColorCorrFn; 4] = [ - &w1_color_correction, - &w2_color_correction, - &w3_color_correction, - &w4_color_correction, + w1_color_correction, + w2_color_correction, + w3_color_correction, + w4_color_correction, ]; diff --git a/src/kete_core/src/desigs.rs b/src/kete_core/src/desigs.rs index 058cf46..819f933 100644 --- a/src/kete_core/src/desigs.rs +++ b/src/kete_core/src/desigs.rs @@ -71,6 +71,7 @@ static ORDER_CHARS: &str = "ABCDEFGHJKLMNOPQRSTUVWXYZ"; /// This enum represents all of the different types of designations /// which kete can represent. #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Hash, Eq)] +#[must_use] pub enum Desig { /// No id assigned. Empty, @@ -84,15 +85,15 @@ pub enum Desig { /// Comet Permanent Designation /// First element is the orbit type `CPAXD`. /// Second is the integer designation. - /// Third is if the comet is fragmented or not, if `Some` then the character - /// is the fragment letter, if `None` then it is not a fragment. + /// Third is if the comet is fragmented or not, if [`Some`] then the character + /// is the fragment letter, if [`None`] then it is not a fragment. CometPerm(char, u32, Option), /// Comet Provisional Designation /// First element is the orbit type `CPAXD` if available. /// Second is the string designation. - /// Third is if the comet is fragmented or not, if `Some` then the character - /// is the fragment letter, if `None` then it is not a fragment. + /// Third is if the comet is fragmented or not, if [`Some`] then the character + /// is the fragment letter, if [`None`] then it is not a fragment. CometProv(Option, String, Option), /// Planetary Satellite @@ -113,12 +114,25 @@ pub enum Desig { impl Desig { /// Return a full string representation of the designation, including the type. + #[must_use] pub fn full_string(&self) -> String { format!("{self:?}") } - /// Try to convert a naif ID into a name. - #[must_use] + /// Try to convert a [`Desig::Naif`] into a [`Desig::Name`] by looking it up. + /// + /// If unsuccessful this returns the original type. + /// + /// ``` + /// use kete_core::desigs::Desig; + /// + /// let desig = Desig::Naif(399).try_naif_id_to_name(); + /// assert_eq!(desig, Desig::Name("earth".into())); + /// + /// let desig = Desig::Empty.try_naif_id_to_name(); + /// assert_eq!(desig, Desig::Empty); + /// ``` + /// pub fn try_naif_id_to_name(self) -> Self { if let Self::Naif(id) = &self { if let Some(name) = try_name_from_id(*id) { @@ -131,9 +145,33 @@ impl Desig { } } - /// Convert the Desig as close to a NAIF id as is possible. + /// Convert the [`Desig::Name`] into a [`Desig::Naif`] if possible. + /// /// This will lookup a NAIF id from the name if it exists. - #[must_use] + /// + /// ``` + /// use kete_core::desigs::Desig; + /// + /// // Integers are parsed if possible + /// let desig = Desig::Name("399".into()).try_name_to_naif_id(); + /// assert_eq!(desig, Desig::Naif(399)); + /// + /// // If there is an exact match, then it is returned + /// let desig = Desig::Name("Earth".into()).try_name_to_naif_id(); + /// assert_eq!(desig, Desig::Naif(399)); + /// + /// // Partial but unique matches are returned. + /// let desig = Desig::Name("Earth b".into()).try_name_to_naif_id(); + /// assert_eq!(desig, Desig::Naif(3)); + /// + /// // Partial but not-unique will leave the input unchanged. + /// let desig = Desig::Name("Ear".into()).try_name_to_naif_id(); + /// assert_eq!(desig, Desig::Name("Ear".into())); + /// + /// // Non-Names are returned unchanged. + /// let desig = Desig::Empty.try_naif_id_to_name(); + /// assert_eq!(desig, Desig::Empty); + /// ``` pub fn try_name_to_naif_id(self) -> Self { if let Self::Name(name) = &self { if let Ok(id) = name.parse::() { @@ -141,21 +179,64 @@ impl Desig { } let naif_ids = naif_ids_from_name(name); - if naif_ids.len() == 1 { - return Self::Naif(naif_ids[0].id); + match naif_ids.as_slice() { + // if there is a single entry, return it + [i] => return Self::Naif(i.id), + _ => { + for id in &naif_ids { + // if there is an EXACT match, return it. + if id.name.to_lowercase() == name.to_lowercase() { + return Self::Naif(id.id); + } + } + } } + // if there are multiple NAIF ids, or none, do not change the designation. } self } - /// Convert the ``Name`` into an ``ObservatoryCode`` if possible. - #[must_use] + /// Convert the [`Desig::Name`] into an [`Desig::ObservatoryCode`] if possible. + /// + /// ``` + /// use kete_core::desigs::Desig; + /// + /// // If it is an existing observatory code, just upgrade it. + /// let desig = Desig::Name("000".into()).try_name_to_obs_code(); + /// assert_eq!(desig, Desig::ObservatoryCode("000".into())); + /// + /// // If there is an exact match, then it is returned + /// let desig = Desig::Name("Maunakea".into()).try_name_to_obs_code(); + /// assert_eq!(desig, Desig::ObservatoryCode("568".into())); + /// + /// // Partial but unique matches are returned. + /// let desig = Desig::Name("Subaru Telescope, Mauna".into()).try_name_to_obs_code(); + /// assert_eq!(desig, Desig::ObservatoryCode("T09".into())); + /// + /// // Partial but not-unique will leave the input unchanged. + /// let desig = Desig::Name(", Maunakea".into()).try_name_to_obs_code(); + /// assert_eq!(desig, Desig::Name(", Maunakea".into())); + /// + /// // Non-Names are returned unchanged. + /// let desig = Desig::Empty.try_name_to_obs_code(); + /// assert_eq!(desig, Desig::Empty); + /// ``` + /// pub fn try_name_to_obs_code(self) -> Self { if let Self::Name(name) = &self { let obs_codes = try_obs_code_from_name(name); - if obs_codes.len() == 1 { - return Self::ObservatoryCode(obs_codes[0].name.clone()); + + match obs_codes.as_slice() { + [i] => return i.code.clone(), + _ => { + for id in &obs_codes { + // if there is an EXACT match, return it. + if id.name.to_lowercase() == name.to_lowercase() { + return id.code.clone(); + } + } + } } // if there are multiple NAIF ids, or none, do not change the designation. } @@ -194,21 +275,21 @@ impl Desig { /// let desig = Desig::parse_mpc_designation("A801 AA"); /// assert_eq!(desig, Ok(Desig::Prov("A801 AA".to_string()))); /// - /// let desig = Desig::parse_mpc_designation("2026 CZ619"); - /// assert_eq!(desig, Ok(Desig::Prov("2026 CZ619".to_string()))); + /// let desig = Desig::parse_mpc_designation("2026 CZ619"); + /// assert_eq!(desig, Ok(Desig::Prov("2026 CZ619".to_string()))); /// - /// // Extended Provisional (2025-2035) - /// let desig = Desig::parse_mpc_designation("2026 CA620"); - /// assert_eq!(desig, Ok(Desig::Prov("2026 CA620".to_string()))); + /// // Extended Provisional (2025-2035) + /// let desig = Desig::parse_mpc_designation("2026 CA620"); + /// assert_eq!(desig, Ok(Desig::Prov("2026 CA620".to_string()))); /// - /// let desig = Desig::parse_mpc_designation("2028 EA339749"); - /// assert_eq!(desig, Ok(Desig::Prov("2028 EA339749".to_string()))); + /// let desig = Desig::parse_mpc_designation("2028 EA339749"); + /// assert_eq!(desig, Ok(Desig::Prov("2028 EA339749".to_string()))); /// - /// let desig = Desig::parse_mpc_designation("2026 CL591673"); - /// assert_eq!(desig, Ok(Desig::Prov("2026 CL591673".to_string()))); + /// let desig = Desig::parse_mpc_designation("2026 CL591673"); + /// assert_eq!(desig, Ok(Desig::Prov("2026 CL591673".to_string()))); /// - /// let desig = Desig::parse_mpc_designation("2029 FL591673"); - /// assert_eq!(desig, Ok(Desig::Prov("2029 FL591673".to_string()))); + /// let desig = Desig::parse_mpc_designation("2029 FL591673"); + /// assert_eq!(desig, Ok(Desig::Prov("2029 FL591673".to_string()))); /// /// // Comet provisional designations /// let desig = Desig::parse_mpc_designation("1996 N2"); @@ -234,6 +315,12 @@ impl Desig { /// assert_eq!(desig, Ok(Desig::PlanetSat(799, 4))); /// /// ``` + /// + /// # Errors + /// + /// This may fail for the following reasons: + /// - Empty designation provided. + /// - Parsing of the designation fails. pub fn parse_mpc_designation(designation: &str) -> KeteResult { if designation.is_empty() { return Err(Error::ValueError("Designation cannot be empty".to_string())); @@ -256,7 +343,7 @@ impl Desig { // there are no spaces, so it is not a provisional designation // it is probably a perm comet designation // check if the last character is in 'IP' - let orbit_type = designation.chars().last().unwrap(); + let orbit_type = designation.chars().last().unwrap_or(' '); if "IP".contains(orbit_type) { // it is a comet permanent designation let num = designation[..designation.len() - 1] @@ -325,16 +412,8 @@ impl Desig { "Invalid MPC Designation, looks like a comet provisional: {designation}" ))); } - let fragment = if fragment.is_empty() { - None - } else { - Some(fragment.chars().next().unwrap()) - }; - let orbit_type = if orbit_type.is_empty() { - None - } else { - Some(orbit_type.chars().next().unwrap()) - }; + let fragment = fragment.chars().next(); + let orbit_type = orbit_type.chars().next(); Ok(Self::CometProv(orbit_type, des.to_string(), fragment)) } } @@ -432,7 +511,22 @@ impl Desig { /// let packed = Desig::PlanetSat(799, 4).try_pack(); /// assert_eq!(packed, Ok("U004S".to_string())); /// ``` + /// + /// # Errors + /// + /// This may fail for the following reasons: + /// - Empty designation provided. + /// - Parsing of the designation fails. + #[allow(clippy::missing_panics_doc, reason = "Should not panic.")] pub fn try_pack(&self) -> KeteResult { + static YEAR_LOOKUP: &[(&str, &str); 5] = &[ + ("18", "I"), + ("19", "J"), + ("20", "K"), + ("A9", "J"), + ("A8", "I"), + ]; + match self { Self::Empty => Err(Error::ValueError( "Cannot pack an empty designation".to_string(), @@ -501,7 +595,8 @@ impl Desig { match num { num if (num >= 620) => { // order counts A = 1, B = 2 ... skipping I - let single_count = ORDER_CHARS.find(order).unwrap() as u32; + let single_count = + u32::try_from(ORDER_CHARS.find(order).unwrap()).unwrap(); let total_num = 25 * num + single_count; let num_packed = num_to_mpc_hex(total_num - 15_500); @@ -515,13 +610,7 @@ impl Desig { // unlike the `order`, this DOES include I... let idx = MPC_HEX.chars().nth(num as usize / 10).unwrap(); let idy = num % 10; - static YEAR_LOOKUP: &[(&str, &str); 5] = &[ - ("18", "I"), - ("19", "J"), - ("20", "K"), - ("A9", "J"), - ("A8", "I"), - ]; + let century = YEAR_LOOKUP.iter().find(|&&x| x.0 == &year[0..2]).map_or( Err(Error::ValueError(format!( @@ -579,7 +668,7 @@ impl Desig { let packed = Self::Prov(unpacked.to_string()).try_pack()?; Ok(format!( "{}{}", - orbit_type.map_or("".to_string(), |o| o.to_string()), + orbit_type.map_or(String::new(), |o| o.to_string()), packed )) } @@ -588,6 +677,9 @@ impl Desig { } /// Unpacked a MPC packed designation into a [`Desig`] enum. + /// + /// # Errors + /// Returns error if parsing fails. pub fn parse_mpc_packed_designation(packed: &str) -> KeteResult { if packed.len() == 5 { unpack_perm_designation(packed) @@ -641,6 +733,7 @@ impl Display for Desig { /// let hex_str = num_to_mpc_hex(63); /// assert_eq!(hex_str, "11"); /// ``` +#[must_use] pub fn num_to_mpc_hex(mut num: u32) -> String { let mut result = String::new(); @@ -649,6 +742,10 @@ pub fn num_to_mpc_hex(mut num: u32) -> String { } while num > 0 { let digit = (num % 62) as usize; + #[allow( + clippy::missing_panics_doc, + reason = "MPX_HEX is 62 long, so this unwrap cannot fail." + )] result.push(MPC_HEX.chars().nth(digit).unwrap()); num /= 62; } @@ -665,11 +762,19 @@ pub fn num_to_mpc_hex(mut num: u32) -> String { /// let largest = mpc_hex_to_num("zzzzz"); /// assert_eq!(largest, Ok(916132831)); /// ``` +/// +/// # Errors +/// Fails when input string contains invalid characters. pub fn mpc_hex_to_num(hex: &str) -> KeteResult { let mut result = 0_u32; + + #[allow( + clippy::missing_panics_doc, + reason = "MPX_HEX is 62 long, so this unwrap cannot fail." + )] for c in hex.chars() { if let Some(pos) = MPC_HEX.find(c) { - result = result * 62 + pos as u32; + result = result * 62 + u32::try_from(pos).unwrap(); } else { return Err(Error::IOError(format!( "Invalid character in MPC hexadecimal string: {c}" @@ -715,6 +820,11 @@ pub fn mpc_hex_to_num(hex: &str) -> KeteResult { /// let desig = unpack_perm_designation("N011S"); /// assert_eq!(desig, Ok(Desig::PlanetSat(899, 11))); /// ``` +/// +/// # Errors +/// Fails when input string contains invalid characters. +/// +#[allow(clippy::missing_panics_doc, reason = "Should not panic.")] pub fn unpack_perm_designation(designation: &str) -> KeteResult { if designation.len() != 5 { return Err(Error::ValueError(format!( @@ -771,7 +881,7 @@ pub fn unpack_perm_designation(designation: &str) -> KeteResult { // split string by first character and remaining let (first_char, remaining) = designation.split_at(1); if let Some(pos) = MPC_HEX.find(first_char) { - let first_num = pos as u32 * 10_000; + let first_num = u32::try_from(pos).unwrap() * 10_000; let rest_num = remaining.parse::().map_err(|e| { Error::ValueError(format!("Failed to parse rest of designation: {e}")) })?; @@ -851,6 +961,11 @@ pub fn unpack_perm_designation(designation: &str) -> KeteResult { /// let desig = unpack_prov_designation("T3S4101").unwrap(); /// assert_eq!(desig, Desig::Prov("4101 T-3".to_string())); /// ``` +/// +/// # Errors +/// Fails when input string contains invalid characters. +/// +#[allow(clippy::missing_panics_doc, reason = "Should not panic.")] pub fn unpack_prov_designation(designation: &str) -> KeteResult { if designation.len() > 8 { return Err(Error::ValueError(format!( @@ -909,8 +1024,10 @@ pub fn unpack_prov_designation(designation: &str) -> KeteResult { }; if is_comet { let century = MPC_HEX.find(&designation[..1]).ok_or_else(err)? * 100; - let year = designation[1..3].parse::().map_err(|_| err())? + century as u32; - let comet_num = MPC_HEX.find(&designation[4..5]).ok_or_else(err)? as u32 * 10 + let year = + designation[1..3].parse::().map_err(|_| err())? + u32::try_from(century).unwrap(); + let comet_num = u32::try_from(MPC_HEX.find(&designation[4..5]).ok_or_else(err)?).unwrap() + * 10 + designation[5..6].parse::().map_err(|_| err())?; let half_month = &designation[3..4]; let frag = designation.chars().nth(6).unwrap(); @@ -952,14 +1069,14 @@ fn unpack_ast_prov_desig(designation: &str) -> KeteResult { "Invalid MPC Provisional Designation: {designation}" )) }; - let century = MPC_HEX.find(&designation[..1]).ok_or_else(err)? as u32 * 100; + let century = u32::try_from(MPC_HEX.find(&designation[..1]).ok_or_else(err)?).unwrap() * 100; let year = designation[1..3].parse::().map_err(|_| err())? + century; let year = if year < 1925 { format!("A{}", year % 1000) } else { year.to_string() }; - let obs_num = MPC_HEX.find(&designation[4..5]).ok_or_else(err)? as u32 * 10 + let obs_num = u32::try_from(MPC_HEX.find(&designation[4..5]).ok_or_else(err)?).unwrap() * 10 + designation[5..6].parse::().map_err(|_| err())?; let obs_str = if obs_num == 0 { String::new() @@ -983,7 +1100,7 @@ fn unpack_ast_extended_prov_des(designation: &str) -> KeteResult { if !is_extended || designation.len() != 7 { return Err(err()); } - let year = 2000 + MPC_HEX.find(&designation[1..2]).ok_or_else(err)? as u32; + let year = 2000 + u32::try_from(MPC_HEX.find(&designation[1..2]).ok_or_else(err)?).unwrap(); let half_month = designation.chars().nth(2).unwrap(); let total_count = mpc_hex_to_num(&designation[3..])? + 15_500; let count = total_count.div_euclid(25); @@ -1035,6 +1152,10 @@ static ROMAN_PAIRS: [(u32, &str); 13] = [ /// let roman = int_to_roman(0); /// assert_eq!(roman, Err(Error::ValueError("Number must be between 1 and 3999".into()))); /// ``` +/// +/// # Errors +/// Fails when input is either 0 or greater than 3999. +/// pub fn int_to_roman(mut num: u32) -> KeteResult { if num > 3999 || num == 0 { return Err(Error::ValueError( @@ -1070,6 +1191,9 @@ pub fn int_to_roman(mut num: u32) -> KeteResult { /// let num = roman_to_int("XXXXX"); /// assert!(num.is_err()); /// ``` +/// # Errors +/// Fails when input contains invalid characters. +/// pub fn roman_to_int(roman: &str) -> KeteResult { let mut result = 0; let mut last_value = 4000; diff --git a/src/kete_core/src/elements.rs b/src/kete_core/src/elements.rs index abdb1da..d4b971f 100644 --- a/src/kete_core/src/elements.rs +++ b/src/kete_core/src/elements.rs @@ -77,6 +77,7 @@ pub struct CometElements { impl CometElements { /// Create cometary elements from a state. + #[must_use] pub fn from_state(state: &State) -> Self { Self::from_pos_vel( state.desig.clone(), @@ -201,6 +202,10 @@ impl CometElements { /// Convert cometary elements to an [`State`] if possible. /// /// Center ID is set to 10. + /// + /// # Errors + /// Conversion can fail for numerous reasons, examples include non-finite values, or if + /// the eccentric anomaly computation fails. pub fn try_to_state(&self) -> KeteResult> { let [pos, vel] = self.to_pos_vel()?; Ok(State::new( @@ -293,6 +298,9 @@ impl CometElements { } /// Compute the eccentric anomaly for the cometary elements. + /// + /// # Errors + /// May fail if extremum values are provided. pub fn eccentric_anomaly(&self) -> KeteResult { compute_eccentric_anomaly(self.eccentricity, self.mean_anomaly(), self.peri_dist).map(|x| { match self.eccentricity { @@ -304,6 +312,7 @@ impl CometElements { /// Compute the semi major axis in AU. /// NAN is returned if the orbit is parabolic. + #[must_use] pub fn semi_major(&self) -> f64 { match self.eccentricity { ecc if ((ecc - 1.0).abs() <= PARABOLIC_ECC_LIMIT) => f64::NAN, @@ -313,6 +322,7 @@ impl CometElements { /// Compute the orbital period in days. /// Infinity is returned if the orbit is parabolic or hyperbolic. + #[must_use] pub fn orbital_period(&self) -> f64 { let semi_major = self.semi_major(); match semi_major { @@ -322,6 +332,7 @@ impl CometElements { } /// Compute the Aphelion distance in AU. + #[must_use] pub fn aphelion(&self) -> f64 { match self.eccentricity { ecc if ((ecc - 1.0).abs() <= PARABOLIC_ECC_LIMIT) => f64::NAN, @@ -330,6 +341,7 @@ impl CometElements { } /// Compute the mean motion in radians per day. + #[must_use] pub fn mean_motion(&self) -> f64 { match self.eccentricity { ecc if ((ecc - 1.0).abs() <= PARABOLIC_ECC_LIMIT) => { @@ -340,6 +352,7 @@ impl CometElements { } /// Compute the mean anomaly in radians. + #[must_use] pub fn mean_anomaly(&self) -> f64 { let mm = self.mean_motion(); let mean_anomaly = (self.epoch - self.peri_time).elapsed * mm; @@ -352,6 +365,11 @@ impl CometElements { /// Compute the True Anomaly /// The angular distance between perihelion and the current position as seen /// from the origin. + /// + /// # Errors + /// + /// Fails for numerous reasons, including if negative eccentricity is provided or if it + /// is a non finite value. pub fn true_anomaly(&self) -> KeteResult { compute_true_anomaly(self.eccentricity, self.mean_anomaly(), self.peri_dist) } diff --git a/src/kete_core/src/errors.rs b/src/kete_core/src/errors.rs index d5d7b27..944e657 100644 --- a/src/kete_core/src/errors.rs +++ b/src/kete_core/src/errors.rs @@ -34,13 +34,14 @@ /// Define all errors which may be raise by this crate, as well as optionally provide /// conversion to pyo3 error types which allow for the errors to be raised in Python. use chrono::ParseError; -use std::{error, fmt, io}; +use std::{error, fmt, io, sync::TryLockError}; /// kete specific result. pub type KeteResult = Result; /// Possible Errors which may be raised by this crate. #[derive(Debug, Clone, PartialEq)] +#[non_exhaustive] pub enum Error { /// Numerical method did not converge within the algorithms limits. Convergence(String), @@ -49,7 +50,7 @@ pub enum Error { ValueError(String), /// Attempting to query outside of data limits. - ExceedsLimits(String), + Bounds(String), /// Attempting to load or convert to/from an Frame of reference which is not known. UnknownFrame(i32), @@ -59,6 +60,9 @@ pub enum Error { /// Propagator detected an impact. Impact(i32, Time), + + /// Failed to acquire lock on memory. + LockFailed, } impl error::Error for Error {} @@ -66,10 +70,7 @@ impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Self::Convergence(s) - | Self::ValueError(s) - | Self::ExceedsLimits(s) - | Self::IOError(s) => { + Self::Convergence(s) | Self::ValueError(s) | Self::Bounds(s) | Self::IOError(s) => { write!(f, "{s}") } Self::UnknownFrame(_) => { @@ -79,6 +80,9 @@ impl fmt::Display for Error { let t = t.jd; write!(f, "Propagation detected an impact with {s} at time {t}") } + Self::LockFailed => { + write!(f, "Failed to acquire lock on memory.") + } } } } @@ -92,15 +96,18 @@ use crate::time::{TDB, Time}; impl From for PyErr { fn from(err: Error) -> Self { match err { - Error::IOError(s) - | Error::ExceedsLimits(s) - | Error::ValueError(s) - | Error::Convergence(s) => Self::new::(s), + Error::IOError(s) | Error::Bounds(s) | Error::ValueError(s) | Error::Convergence(s) => { + Self::new::(s) + } Error::UnknownFrame(_) => { Self::new::("This reference frame is not supported.") } + Error::LockFailed => { + Self::new::("Failed to acquire lock on memory.") + } + Error::Impact(s, t) => Self::new::({ let t = t.jd; format!("Propagation detected an impact with {s} at time {t}") @@ -131,3 +138,15 @@ impl From for Error { Self::IOError(value.to_string()) } } + +impl From> for Error { + fn from(_: TryLockError) -> Self { + Self::LockFailed + } +} + +impl From for Error { + fn from(_: argmin::core::Error) -> Self { + Self::Convergence("Convergence failed".into()) + } +} diff --git a/src/kete_core/src/fitting/halley.rs b/src/kete_core/src/fitting/halley.rs index ae1eced..03108ff 100644 --- a/src/kete_core/src/fitting/halley.rs +++ b/src/kete_core/src/fitting/halley.rs @@ -49,6 +49,12 @@ use crate::{errors::Error, prelude::KeteResult}; /// assert!((root - 1.0).abs() < 1e-12); /// ``` /// +/// # Errors +/// +/// [`Error::Convergence`] may be returned in the following cases: +/// - Any function evaluation return a non-finite value. +/// - Derivative is zero but not converged. +/// - Failed to converge within 100 iterations. #[inline(always)] pub fn halley( func: Func, @@ -71,7 +77,7 @@ where let mut f_eval: f64; let mut d_eval: f64; - let mut dd_eval: f64; + let mut d_d_eval: f64; let mut step: f64; for _ in 0..100 { f_eval = func(x); @@ -87,15 +93,15 @@ where ))?; } - dd_eval = sec_der(x); + d_d_eval = sec_der(x); - if !dd_eval.is_finite() || !d_eval.is_finite() || !f_eval.is_finite() { + if !d_d_eval.is_finite() || !d_eval.is_finite() || !f_eval.is_finite() { Err(Error::Convergence( "Halley root finding failed to converge due to non-finite evaluations".into(), ))?; } step = f_eval / d_eval; - step = step / (1.0 - step * dd_eval / (2.0 * d_eval)); + step = step / (1.0 - step * d_d_eval / (2.0 * d_eval)); x -= step; } diff --git a/src/kete_core/src/fitting/newton.rs b/src/kete_core/src/fitting/newton.rs index bcf38df..3e645dc 100644 --- a/src/kete_core/src/fitting/newton.rs +++ b/src/kete_core/src/fitting/newton.rs @@ -43,6 +43,12 @@ use crate::{errors::Error, prelude::KeteResult}; /// assert!((root - 1.0).abs() < 1e-12); /// ``` /// +/// # Errors +/// +/// [`Error::Convergence`] may be returned in the following cases: +/// - Any function evaluation return a non-finite value. +/// - Derivative is zero but not converged. +/// - Failed to converge within 100 iterations. #[inline(always)] pub fn newton_raphson(func: Func, der: Der, start: f64, atol: f64) -> KeteResult where diff --git a/src/kete_core/src/fitting/reduced_chi2.rs b/src/kete_core/src/fitting/reduced_chi2.rs index 79865dc..3e49bd7 100644 --- a/src/kete_core/src/fitting/reduced_chi2.rs +++ b/src/kete_core/src/fitting/reduced_chi2.rs @@ -27,11 +27,16 @@ // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use core::f64; + +use crate::errors::KeteResult; + use super::newton_raphson; /// Compute the reduced chi squared value from known values and standard deviations. /// This computes the reduced chi squared against a single desired value. #[inline(always)] +#[must_use] pub fn reduced_chi2(data: &[f64], sigmas: &[f64], val: f64) -> f64 { debug_assert_eq!( data.len(), @@ -46,6 +51,7 @@ pub fn reduced_chi2(data: &[f64], sigmas: &[f64], val: f64) -> f64 { /// Compute the derivative of reduced chi squared value with respect to the set value. #[inline(always)] +#[must_use] pub fn reduced_chi2_der(data: &[f64], sigmas: &[f64], val: f64) -> f64 { debug_assert_eq!( data.len(), @@ -60,14 +66,20 @@ pub fn reduced_chi2_der(data: &[f64], sigmas: &[f64], val: f64) -> f64 { /// Compute the second derivative of reduced chi squared value with respect to the set value. #[inline(always)] +#[must_use] fn reduced_chi2_der_der(sigmas: &[f64]) -> f64 { sigmas.iter().map(|sigma| 2.0 / sigma.powi(2)).sum::() } /// Given a collection of data and standard deviations, fit the best reduced chi squared value /// for the provided data. -pub fn fit_reduced_chi2(data: &[f64], sigmas: &[f64]) -> f64 { - let cost = |val: f64| -> f64 { reduced_chi2_der(data, sigmas, val) / sigmas.len() as f64 }; - let der = |_: f64| -> f64 { reduced_chi2_der_der(sigmas) / sigmas.len() as f64 }; - newton_raphson(cost, der, data[0], 1e-8).unwrap() +/// +/// # Errors +/// [`crate::prelude::Error::Convergence`] may be returned if data contains NaNs. +pub fn fit_reduced_chi2(data: &[f64], sigmas: &[f64]) -> KeteResult { + let n_sigmas = sigmas.len() as f64; + + let cost = |val: f64| -> f64 { reduced_chi2_der(data, sigmas, val) / n_sigmas }; + let der = |_: f64| -> f64 { reduced_chi2_der_der(sigmas) / n_sigmas }; + newton_raphson(cost, der, *data.first().unwrap_or(&f64::NAN), 1e-8) } diff --git a/src/kete_core/src/flux/comets.rs b/src/kete_core/src/flux/comets.rs index 68edf20..9a044b1 100644 --- a/src/kete_core/src/flux/comets.rs +++ b/src/kete_core/src/flux/comets.rs @@ -68,6 +68,7 @@ pub struct CometMKParams { impl CometMKParams { /// Create a new [`CometMKParams`] object. + #[must_use] pub fn new( desig: String, mk_1: Option<[f64; 2]>, @@ -84,6 +85,7 @@ impl CometMKParams { /// Compute the apparent total flux including both coma and nucleus of the comet. /// This includes an additional 0.035 Mag/Deg phase correction. + #[must_use] pub fn apparent_total_mag( &self, sun2obs: &Vector3, @@ -99,6 +101,7 @@ impl CometMKParams { /// Compute the apparent nuclear flux of the comet, not including the coma. /// This includes an additional 0.035 Mag/Deg phase correction. + #[must_use] pub fn apparent_nuclear_mag( &self, sun2obs: &Vector3, diff --git a/src/kete_core/src/flux/common.rs b/src/kete_core/src/flux/common.rs index 03de5f3..292978f 100644 --- a/src/kete_core/src/flux/common.rs +++ b/src/kete_core/src/flux/common.rs @@ -27,9 +27,7 @@ // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::io::FileIO; use nalgebra::{UnitVector3, Vector3}; -use serde::{Deserialize, Serialize}; use std::f64::consts::PI; use crate::constants::{ @@ -40,82 +38,106 @@ use crate::constants::{ /// A function which computes the color correction on a single facet for NEATM and FRM. /// These functions must accept a temperature in kelvin, and return a value (typically /// near 1.0), which scales the final flux seen by the observer for that facet. -pub type ColorCorrFn = &'static (dyn Fn(f64) -> f64 + Sync); - -/// Observer specific Flux properties. -/// Such as band information, zero mags, and color corrections. -#[derive(Debug, Serialize, Deserialize, Clone)] -pub enum ObserverBands { - /// Wise specific observer properties - Wise, - - /// NEOS specific observer properties - Neos, - - /// Generic observer properties - /// This cannot store color correction functions. - Generic { - /// Effective central band wavelengths in nm - bands: Vec, - - /// Zero magnitudes for the listed bands, optional. - zero_mags: Option>, - - /// Flux correction for each of the bands for reflected light from the sun. - /// 1.0 means no change. - solar_correction: Vec, - }, -} +pub type ColorCorrFn = fn(f64) -> f64; -impl ObserverBands { +/// Generic band information +#[derive(Debug, Clone)] +pub struct BandInfo { /// Effective central band wavelengths in nm - pub fn band_wavelength(&self) -> &[f64] { - match self { - Self::Neos => &NEOS_BANDS, - Self::Wise => &WISE_BANDS_300K, - Self::Generic { bands, .. } => bands, - } - } + pub wavelength: f64, + + /// Flux correction for each of the bands for reflected light from the sun. + /// 1.0 means no change. + pub solar_correction: f64, + + /// Zero magnitude for the band, optional. + pub zero_mag: f64, - /// Zero magnitudes for the wavelength bands, optional. - pub fn zero_mags(&self) -> Option<&[f64]> { - match self { - Self::Neos => Some(&NEOS_ZERO_MAG), - Self::Wise => Some(&WISE_ZERO_MAG_300K), - Self::Generic { zero_mags, .. } => zero_mags.as_deref(), + /// Color correction functions + pub color_correction: Option, +} + +impl BandInfo { + /// # Arguments + /// ``wavelength``: Effective central band wavelengths in nm + /// ``zero_mags``: Zero magnitudes for the listed bands, optional. + /// ``solar_correction``: Flux correction for each of the bands for reflected light + /// from the sun, 1.0 means no change. + #[must_use] + pub fn new( + wavelength: f64, + solar_correction: f64, + zero_mag: f64, + color_correction: Option, + ) -> Self { + Self { + wavelength, + solar_correction, + zero_mag, + color_correction, } } - /// Flux correction for each of the bands for reflected light from the sun. - pub fn solar_correction(&self) -> &[f64] { - match self { - Self::Neos => &NEOS_SUN_CORRECTION, - Self::Wise => &WISE_SUN_CORRECTION, - Self::Generic { - solar_correction, .. - } => solar_correction, - } + #[must_use] + /// New [`BandInfo`] containing NEOS band information. + pub fn new_neos() -> [Self; 2] { + [ + Self::new( + NEOS_BANDS[0], + NEOS_SUN_CORRECTION[0], + NEOS_ZERO_MAG[0], + None, + ), + Self::new( + NEOS_BANDS[1], + NEOS_SUN_CORRECTION[1], + NEOS_ZERO_MAG[1], + None, + ), + ] } - /// Color correction for the thermal light for each band. - pub fn color_correction(&self) -> Option<&[ColorCorrFn]> { - match self { - Self::Neos => None, - Self::Wise => Some(&WISE_CC), - Self::Generic { .. } => None, - } + #[must_use] + /// New [`BandInfo`] containing WISE band information. + pub fn new_wise() -> [Self; 4] { + [ + Self::new( + WISE_BANDS_300K[0], + WISE_SUN_CORRECTION[0], + WISE_ZERO_MAG_300K[0], + Some(WISE_CC[0]), + ), + Self::new( + WISE_BANDS_300K[1], + WISE_SUN_CORRECTION[1], + WISE_ZERO_MAG_300K[1], + Some(WISE_CC[1]), + ), + Self::new( + WISE_BANDS_300K[2], + WISE_SUN_CORRECTION[2], + WISE_ZERO_MAG_300K[2], + Some(WISE_CC[2]), + ), + Self::new( + WISE_BANDS_300K[3], + WISE_SUN_CORRECTION[3], + WISE_ZERO_MAG_300K[3], + Some(WISE_CC[3]), + ), + ] } } /// Output of a flux calculation. -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Clone)] pub struct ModelResults { /// Total output fluxes in Jy, this is a sum of thermal and HG contributions. pub fluxes: Vec, /// The expected magnitude if zero magnitudes are available. These magnitudes are /// from the combined fluxes, not individual terms. - pub magnitudes: Option>, + pub magnitudes: Vec, /// Fluxes in Jy from black body emission. pub thermal_fluxes: Vec, @@ -132,10 +154,9 @@ pub struct ModelResults { pub v_band_flux: f64, } -impl FileIO for ModelResults {} - impl ModelResults { /// Compute what fraction of the total flux is due to the HG reflection model. + #[must_use] pub fn reflected_fraction(&self) -> Vec { let mut frac = vec![0.0; self.fluxes.len()]; frac.iter_mut() @@ -165,6 +186,7 @@ impl ModelResults { /// # Returns /// * Flux in Jy / steradian. #[inline(always)] +#[must_use] pub fn black_body_flux(temp: f64, wavelength: f64) -> f64 { if wavelength < 10.0 || temp < 30.0 { return 0.0; @@ -197,6 +219,7 @@ pub fn black_body_flux(temp: f64, wavelength: f64) -> f64 { /// * `diameter` - Diameter of the object in km. /// * `emissivity` - The emissivity of surface of the object. #[inline(always)] +#[must_use] pub fn lambertian_flux( facet_normal: &UnitVector3, obs2obj: &UnitVector3, @@ -218,6 +241,7 @@ pub fn lambertian_flux( /// This allows this to be computed once per geometry, but then multiple wavelengths /// be multiplied against it. This resulted in a 50% speedup in FRM and NEATM overall. #[inline(always)] +#[must_use] pub fn lambertian_vis_scale_factor( facet_normal: &UnitVector3, obs2obj: &UnitVector3, @@ -246,6 +270,7 @@ pub fn lambertian_vis_scale_factor( /// * `beaming` - Beaming of the object, this is geometry dependent. /// * `emissivity` - The emissivity of the surface. #[inline(always)] +#[must_use] pub fn sub_solar_temperature( obj2sun: &Vector3, geom_albedo: f64, @@ -270,6 +295,7 @@ pub fn sub_solar_temperature( /// /// * `mag` - Magnitude. /// * `zero_mag_flux` - Flux in Jy at which the Magnitude is 0. +#[must_use] pub fn mag_to_flux(mag: f64, mag_zero_flux: f64) -> f64 { 10_f64.powf(mag / -2.5) * mag_zero_flux } @@ -280,6 +306,7 @@ pub fn mag_to_flux(mag: f64, mag_zero_flux: f64) -> f64 { /// /// * `flux` - Flux in Jy. /// * `zero_mag_flux` - Flux in Jy at which the Magnitude is 0. +#[must_use] pub fn flux_to_mag(flux: f64, mag_zero_flux: f64) -> f64 { -2.5 * (flux / mag_zero_flux).log10() } @@ -327,10 +354,10 @@ mod tests { assert_eq!(temp, 0.0); for range in 1..10 { - let obj2sun = [range as f64, 0.0, 0.0].into(); + let obj2sun = [f64::from(range), 0.0, 0.0].into(); let mut temp = sub_solar_temperature(&obj2sun, 0.0, 0.0, 1.0, 1.0); temp = temp.powi(4); - let expected = SOLAR_FLUX / (range as f64).powi(2) / STEFAN_BOLTZMANN; + let expected = SOLAR_FLUX / f64::from(range).powi(2) / STEFAN_BOLTZMANN; assert!((temp - expected).abs() < 1e-5); } } diff --git a/src/kete_core/src/flux/frm.rs b/src/kete_core/src/flux/frm.rs index 5d12197..e6fe81b 100644 --- a/src/kete_core/src/flux/frm.rs +++ b/src/kete_core/src/flux/frm.rs @@ -28,14 +28,14 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::{ - DEFAULT_SHAPE, HGParams, ObserverBands, + BandInfo, DEFAULT_SHAPE, HGParams, common::{ModelResults, black_body_flux, lambertian_vis_scale_factor, sub_solar_temperature}, flux_to_mag, }; -use crate::{constants::V_MAG_ZERO, io::FileIO}; +use crate::constants::V_MAG_ZERO; +use core::f64; use nalgebra::{UnitVector3, Vector3}; -use serde::{Deserialize, Serialize}; use std::f64::consts::PI; /// Using the FRM thermal model, calculate the temperature of each facet given the @@ -47,6 +47,7 @@ use std::f64::consts::PI; /// * `subsolar_temp` - The temperature at the sub-solar point in kelvin. /// * `obj2sun` - The vector from the object to the sun, unit vector. #[inline(always)] +#[must_use] pub fn frm_facet_temperature( facet_normal: &UnitVector3, subsolar_temp: f64, @@ -63,10 +64,10 @@ pub fn frm_facet_temperature( } /// FRM input -#[derive(Debug, Deserialize, Serialize, Clone)] +#[derive(Debug, Clone)] pub struct FrmParams { /// Wavelength band information of the observer. - pub obs_bands: ObserverBands, + pub obs_bands: Vec, /// Albedo of the object for each band. pub band_albedos: Vec, @@ -78,13 +79,12 @@ pub struct FrmParams { pub emissivity: f64, } -impl FileIO for FrmParams {} - impl FrmParams { /// Create new [`FrmParams`] with WISE band and zero mags + #[must_use] pub fn new_wise(albedos: [f64; 4], hg_params: HGParams, emissivity: f64) -> Self { Self { - obs_bands: ObserverBands::Wise, + obs_bands: BandInfo::new_wise().to_vec(), band_albedos: albedos.to_vec(), hg_params, emissivity, @@ -92,9 +92,10 @@ impl FrmParams { } /// Create new [`FrmParams`] with NEOS band and zero mags + #[must_use] pub fn new_neos(albedos: [f64; 2], hg_params: HGParams, emissivity: f64) -> Self { Self { - obs_bands: ObserverBands::Neos, + obs_bands: BandInfo::new_neos().to_vec(), band_albedos: albedos.to_vec(), hg_params, emissivity, @@ -107,6 +108,7 @@ impl FrmParams { /// /// * `sun2obj` - Position of the object with respect to the Sun in AU. /// * `sun2obs` - Position of the Observer with respect to the Sun in AU. + #[must_use] pub fn apparent_thermal_flux( &self, sun2obj: &Vector3, @@ -126,13 +128,13 @@ impl FrmParams { self.emissivity, ); - let bands = self.obs_bands.band_wavelength(); - let color_correction = self.obs_bands.color_correction(); + let bands: Vec<_> = self.obs_bands.iter().map(|x| x.wavelength).collect(); + let color_correction: Vec<_> = self.obs_bands.iter().map(|x| x.color_correction).collect(); let obj2sun = UnitVector3::new_normalize(obj2sun); let obs2obj = UnitVector3::new_normalize(obs2obj); - let mut fluxes = vec![0.0; bands.len()]; - for facet in geom.facets.iter() { + let mut fluxes = vec![0.0; self.obs_bands.len()]; + for facet in &geom.facets { let temp = frm_facet_temperature(&facet.normal, ss_temp, &obj2sun); let obs_flux_scaling = lambertian_vis_scale_factor( &facet.normal, @@ -146,9 +148,9 @@ impl FrmParams { } for (idx, (wavelength, flux)) in bands.iter().zip(&mut fluxes).enumerate() { let mut facet_flux = black_body_flux(temp, *wavelength); - if let Some(funcs) = color_correction { - facet_flux *= funcs[idx](temp); - }; + if let Some(func) = color_correction[idx] { + facet_flux *= func(temp); + } facet_flux *= facet.area; *flux += obs_flux_scaling * facet_flux; @@ -163,46 +165,47 @@ impl FrmParams { /// /// * `sun2obj` - Position of the object with respect to the Sun in AU. /// * `sun2obs` - Position of the Observer with respect to the Sun in AU. + #[must_use] pub fn apparent_total_flux( &self, sun2obj: &Vector3, sun2obs: &Vector3, ) -> Option { - let bands = self.obs_bands.band_wavelength(); - let mut fluxes = vec![0.0; bands.len()]; - let mut hg_fluxes = vec![0.0; bands.len()]; - let thermal_fluxes = self.apparent_thermal_flux(sun2obj, sun2obs)?; - let sun_correction = self.obs_bands.solar_correction(); - - for (idx, (wavelength, sun_corr)) in bands.iter().zip(sun_correction).enumerate() { - let refl = self.hg_params.apparent_flux( - sun2obj, - sun2obs, - *wavelength, - self.band_albedos[idx], - )? * sun_corr; - hg_fluxes[idx] = refl; - fluxes[idx] = thermal_fluxes[idx] + refl; + + let mut hg_fluxes = Vec::with_capacity(thermal_fluxes.len()); + let mut fluxes = Vec::with_capacity(thermal_fluxes.len()); + for ((band, t_flux), albedo) in self + .obs_bands + .iter() + .zip(&thermal_fluxes) + .zip(&self.band_albedos) + { + let refl = self + .hg_params + .apparent_flux(sun2obj, sun2obs, band.wavelength, *albedo)? + * band.solar_correction; + hg_fluxes.push(refl); + fluxes.push(*t_flux + refl); } let v_band_magnitude = self.hg_params.apparent_mag(sun2obj, sun2obs); let v_band_flux = flux_to_mag(v_band_magnitude, V_MAG_ZERO); - let magnitudes: Option> = self.obs_bands.zero_mags().map(|mags| { - fluxes - .iter() - .zip(mags) - .map(|(flux, z_mag)| -2.5 * (flux / z_mag).log10()) - .collect::>() - }); + let magnitudes: Vec<_> = self + .obs_bands + .iter() + .zip(&fluxes) + .map(|(band_info, flux)| flux_to_mag(*flux, band_info.zero_mag)) + .collect(); + Some(ModelResults { fluxes, - hg_fluxes, + magnitudes, thermal_fluxes, + hg_fluxes, v_band_magnitude, v_band_flux, - magnitudes, }) } } @@ -276,8 +279,10 @@ mod tests { .iter() .map(|facet| frm_facet_temperature(&facet.normal, 1.0, &obj2sun)) .sum(); + let t1: f64 = t1 / fib_n2048.facets.len() as f64; let t2: f64 = t2 / fib_n1024.facets.len() as f64; + assert!((t1 - t2).abs() < 1e-2); } } diff --git a/src/kete_core/src/flux/mod.rs b/src/kete_core/src/flux/mod.rs index 65998b9..a7ca2ea 100644 --- a/src/kete_core/src/flux/mod.rs +++ b/src/kete_core/src/flux/mod.rs @@ -47,7 +47,7 @@ mod sun; pub use self::comets::CometMKParams; pub use self::common::{ - ColorCorrFn, ModelResults, ObserverBands, black_body_flux, flux_to_mag, lambertian_flux, + BandInfo, ColorCorrFn, ModelResults, black_body_flux, flux_to_mag, lambertian_flux, lambertian_vis_scale_factor, mag_to_flux, sub_solar_temperature, }; pub use self::frm::{FrmParams, frm_facet_temperature}; diff --git a/src/kete_core/src/flux/neatm.rs b/src/kete_core/src/flux/neatm.rs index 8a37c1d..9103c96 100644 --- a/src/kete_core/src/flux/neatm.rs +++ b/src/kete_core/src/flux/neatm.rs @@ -28,14 +28,13 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::{ - DEFAULT_SHAPE, HGParams, ObserverBands, + BandInfo, DEFAULT_SHAPE, HGParams, common::{ModelResults, black_body_flux, lambertian_vis_scale_factor, sub_solar_temperature}, flux_to_mag, }; -use crate::{constants::V_MAG_ZERO, io::FileIO}; +use crate::constants::V_MAG_ZERO; use nalgebra::{UnitVector3, Vector3}; -use serde::{Deserialize, Serialize}; /// Using the NEATM thermal model, calculate the temperature of each facet given the /// direction of the sun, the subsolar temperature and the facet normal vectors. @@ -46,6 +45,7 @@ use serde::{Deserialize, Serialize}; /// * `subsolar_temp` - The temperature at the sub-solar point in kelvin. /// * `obj2sun` - The vector from the object to the sun, unit length. #[inline(always)] +#[must_use] pub fn neatm_facet_temperature( facet_normal: &UnitVector3, obj2sun: &UnitVector3, @@ -59,10 +59,10 @@ pub fn neatm_facet_temperature( } /// NEATM input -#[derive(Deserialize, Serialize, Clone, Debug)] +#[derive(Clone, Debug)] pub struct NeatmParams { /// Wavelength band information of the observer. - pub obs_bands: ObserverBands, + pub obs_bands: Vec, /// Albedo of the object for each band. pub band_albedos: Vec, @@ -77,26 +77,26 @@ pub struct NeatmParams { pub emissivity: f64, } -impl FileIO for NeatmParams {} - impl NeatmParams { /// Create new [`NeatmParams`] with WISE band and zero mags + #[must_use] pub fn new_wise(albedos: [f64; 4], beaming: f64, hg_params: HGParams, emissivity: f64) -> Self { Self { - obs_bands: ObserverBands::Wise, + obs_bands: BandInfo::new_wise().to_vec(), band_albedos: albedos.to_vec(), - beaming, hg_params, emissivity, + beaming, } } /// Create new [`NeatmParams`] with NEOS band and zero mags + #[must_use] pub fn new_neos(albedos: [f64; 2], beaming: f64, hg_params: HGParams, emissivity: f64) -> Self { Self { - obs_bands: ObserverBands::Neos, - band_albedos: albedos.to_vec(), + obs_bands: BandInfo::new_neos().to_vec(), beaming, + band_albedos: albedos.to_vec(), hg_params, emissivity, } @@ -112,6 +112,7 @@ impl NeatmParams { /// this is provided, the function must accept a list of temperatures in kelvin /// and return a scaling factor for how much the flux gets scaled by for that /// specified temp. + #[must_use] pub fn apparent_thermal_flux( &self, sun2obj: &Vector3, @@ -122,8 +123,6 @@ impl NeatmParams { let obs2obj_r = obs2obj.norm(); let geom = &DEFAULT_SHAPE; let hg_params = &self.hg_params; - let bands = self.obs_bands.band_wavelength(); - let color_correction = self.obs_bands.color_correction(); let diameter = hg_params.diam()?; let ss_temp = sub_solar_temperature( @@ -134,12 +133,13 @@ impl NeatmParams { self.emissivity, ); + let bands: Vec<_> = self.obs_bands.iter().map(|x| x.wavelength).collect(); + let color_correction: Vec<_> = self.obs_bands.iter().map(|x| x.color_correction).collect(); let obj2sun = UnitVector3::new_normalize(obj2sun); let obs2obj = UnitVector3::new_normalize(obs2obj); - let mut fluxes = vec![0.0; bands.len()]; - - for facet in geom.facets.iter() { + let mut fluxes = vec![0.0; self.obs_bands.len()]; + for facet in &geom.facets { let temp = neatm_facet_temperature(&facet.normal, &obj2sun, &ss_temp); let obs_flux_scaling = lambertian_vis_scale_factor( &facet.normal, @@ -153,9 +153,9 @@ impl NeatmParams { } for (idx, (wavelength, flux)) in bands.iter().zip(&mut fluxes).enumerate() { let mut facet_flux = black_body_flux(temp, *wavelength); - if let Some(funcs) = color_correction { - facet_flux *= funcs[idx](temp); - }; + if let Some(func) = color_correction[idx] { + facet_flux *= func(temp); + } facet_flux *= facet.area; *flux += obs_flux_scaling * facet_flux; @@ -170,47 +170,47 @@ impl NeatmParams { /// /// * `sun2obj` - Position of the object with respect to the Sun in AU. /// * `sun2obs` - Position of the Observer with respect to the Sun in AU. + #[must_use] pub fn apparent_total_flux( &self, sun2obj: &Vector3, sun2obs: &Vector3, ) -> Option { - let bands = self.obs_bands.band_wavelength(); - let mut fluxes = vec![0.0; bands.len()]; - let mut hg_fluxes = vec![0.0; bands.len()]; - let thermal_fluxes = self.apparent_thermal_flux(sun2obj, sun2obs)?; - let sun_correction = self.obs_bands.solar_correction(); - for (idx, (wavelength, sun_corr)) in bands.iter().zip(sun_correction).enumerate() { - let refl = self.hg_params.apparent_flux( - sun2obj, - sun2obs, - *wavelength, - self.band_albedos[idx], - )? * sun_corr; - hg_fluxes[idx] = refl; - fluxes[idx] = thermal_fluxes[idx] + refl; + let mut hg_fluxes = Vec::with_capacity(thermal_fluxes.len()); + let mut fluxes = Vec::with_capacity(thermal_fluxes.len()); + for ((band, t_flux), albedo) in self + .obs_bands + .iter() + .zip(&thermal_fluxes) + .zip(&self.band_albedos) + { + let refl = self + .hg_params + .apparent_flux(sun2obj, sun2obs, band.wavelength, *albedo)? + * band.solar_correction; + hg_fluxes.push(refl); + fluxes.push(*t_flux + refl); } let v_band_magnitude = self.hg_params.apparent_mag(sun2obj, sun2obs); let v_band_flux = flux_to_mag(v_band_magnitude, V_MAG_ZERO); - let magnitudes: Option> = self.obs_bands.zero_mags().map(|mags| { - fluxes - .iter() - .zip(mags) - .map(|(flux, z_mag)| flux_to_mag(*flux, *z_mag)) - .collect::>() - }); + let magnitudes: Vec<_> = self + .obs_bands + .iter() + .zip(&fluxes) + .map(|(band_info, flux)| flux_to_mag(*flux, band_info.zero_mag)) + .collect(); Some(ModelResults { fluxes, - hg_fluxes, + magnitudes, thermal_fluxes, + hg_fluxes, v_band_magnitude, v_band_flux, - magnitudes, }) } } @@ -275,8 +275,10 @@ mod tests { .iter() .map(|facet| neatm_facet_temperature(&facet.normal, &obj2sun, &1.0)) .sum(); + let t1: f64 = t1 / fib_n2048.facets.len() as f64; let t2: f64 = t2 / fib_n1024.facets.len() as f64; + assert!((t1 - t2).abs() < 1e-2); } } diff --git a/src/kete_core/src/flux/reflected.rs b/src/kete_core/src/flux/reflected.rs index 435869c..c72ea7d 100644 --- a/src/kete_core/src/flux/reflected.rs +++ b/src/kete_core/src/flux/reflected.rs @@ -50,6 +50,7 @@ use serde::{Deserialize, Serialize}; /// /// * `g_param` - The G parameter, between 0 and 1. /// * `phase` - The phase angle in radians. +#[must_use] pub fn hg_phase_curve_correction(g_param: f64, phase: f64) -> f64 { fn helper(a: f64, b: f64, c: f64, phase: f64) -> f64 { let phase_s = phase.sin(); @@ -77,6 +78,7 @@ pub fn hg_phase_curve_correction(g_param: f64, phase: f64) -> f64 { /// # Arguments /// * `phase_angle` - Phase angle in radians. /// +#[must_use] #[cfg_attr(feature = "pyo3", pyo3::pyfunction)] pub fn cometary_dust_phase_curve_correction(phase_angle: f64) -> f64 { const K: f64 = 0.80; @@ -152,15 +154,16 @@ impl HGParams { /// * `g_param` - The G parameter in the HG system. /// * `h_mag` - The H parameter of the object in the HG system. /// * `c_hg` - The relationship constant of the H-D-pV conversion in km. + #[must_use] pub fn new(desig: String, g_param: f64, h_mag: f64, c_hg: Option) -> Self { let c_hg = c_hg.unwrap_or(C_V); Self { desig, g_param, h_mag, - c_hg, vis_albedo: None, diam: None, + c_hg, } } @@ -189,6 +192,8 @@ impl HGParams { /// * `vis_albedo` - Visible geometric albedo of the object. /// * `diameter` - Diameter of the object in km. /// + /// # Errors + /// This can fail if parameters are not self consistent between H, diam, and albedo. pub fn try_new( desig: String, g_param: f64, @@ -202,23 +207,26 @@ impl HGParams { desig, g_param, h_mag, - c_hg, vis_albedo, diam, + c_hg, }) } /// New [`HGParams`] assuming G param is `0.15` and ``c_hg`` is the V band value. + #[must_use] pub fn default(desig: String, h_mag: f64) -> Self { Self::new(desig, 0.15, h_mag, Some(C_V)) } /// Diameter of the object in km. + #[must_use] pub fn diam(&self) -> Option { self.diam } /// Visible geometric albedo of the object. + #[must_use] pub fn vis_albedo(&self) -> Option { self.vis_albedo } @@ -315,6 +323,7 @@ impl HGParams { /// * `sun2obj` - Vector from the sun to the object in AU. /// * `sun2obs` - Vector from the sun to the observer in AU. /// + #[must_use] pub fn apparent_mag(&self, sun2obj: &Vector3, sun2obs: &Vector3) -> f64 { let obj_r = sun2obj.norm(); let obj2obs = sun2obs - sun2obj; @@ -355,6 +364,7 @@ impl HGParams { /// * `sun2obs` - Vector from the sun to the observer in AU. /// * `wavelength` - Central wavelength of light in nm. /// * `albedo` - Geometric Albedo at the wavelength provided. + #[must_use] pub fn apparent_flux( &self, sun2obj: &Vector3, diff --git a/src/kete_core/src/flux/shapes.rs b/src/kete_core/src/flux/shapes.rs index 3c103f8..1118310 100644 --- a/src/kete_core/src/flux/shapes.rs +++ b/src/kete_core/src/flux/shapes.rs @@ -66,18 +66,19 @@ impl ConvexShape { /// /// /// Total surface area is set to 1. - pub fn new_fibonacci_lattice(n_facets: usize) -> Self { - let mut facets: Vec = Vec::with_capacity(n_facets); - + #[must_use] + pub fn new_fibonacci_lattice(n_facets: u32) -> Self { const EPSILON: f64 = 0.36; - let n_normals = n_facets as f64; + let mut facets: Vec = Vec::with_capacity(n_facets as usize); + + let n_normals = f64::from(n_facets); let area = n_normals.recip(); for idx in 0..n_facets { - let theta: f64 = TAU * (idx as f64) / GOLDEN_RATIO; + let theta: f64 = TAU * f64::from(idx) / GOLDEN_RATIO; let phi: f64 = - (1.0 - 2.0 * ((idx as f64) + EPSILON) / (n_normals - 1.0 + 2.0 * EPSILON)).acos(); + (1.0 - 2.0 * (f64::from(idx) + EPSILON) / (n_normals - 1.0 + 2.0 * EPSILON)).acos(); let normal = Unit::new_unchecked(Vector3::new( theta.cos() * phi.sin(), theta.sin() * phi.sin(), diff --git a/src/kete_core/src/flux/sun.rs b/src/kete_core/src/flux/sun.rs index 13900ef..809b78f 100644 --- a/src/kete_core/src/flux/sun.rs +++ b/src/kete_core/src/flux/sun.rs @@ -53,6 +53,7 @@ use crate::{ /// /// * `dist` - Distance from the Sun in au. /// * `wavelength` - wavelength in nm. +#[must_use] pub fn solar_flux(dist: f64, wavelength: f64) -> Option { let wavelength = wavelength / 1000.0; let start_idx = match E490_DATA.binary_search_by(|probe| probe[0].total_cmp(&wavelength)) { @@ -60,13 +61,13 @@ pub fn solar_flux(dist: f64, wavelength: f64) -> Option { Err(c) => { if c >= E490_DATA.len() - 1 || c == 0 { return None; - }; + } c - 1 } }; - let low = E490_DATA[start_idx]; - let high = E490_DATA[start_idx + 1]; + let low = E490_DATA.get(start_idx)?; + let high = E490_DATA.get(start_idx + 1)?; let w_frac = (wavelength - low[0]) / (high[0] - low[0]); let val = w_frac * (high[1] - low[1]) + low[1]; @@ -89,6 +90,7 @@ pub fn solar_flux(dist: f64, wavelength: f64) -> Option { /// * `wavelength` - Central wavelength of light in nm. /// #[inline(always)] +#[must_use] pub fn solar_flux_black_body(dist: f64, wavelength: f64) -> f64 { const SUN_R_AU: f64 = SUN_DIAMETER / AU_KM / 2.0; diff --git a/src/kete_core/src/fov/fov_like.rs b/src/kete_core/src/fov/fov_like.rs index f5be6f9..7cccd90 100644 --- a/src/kete_core/src/fov/fov_like.rs +++ b/src/kete_core/src/fov/fov_like.rs @@ -30,11 +30,11 @@ // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use super::*; +use super::{Contains, FOV}; use rayon::prelude::*; use crate::constants::C_AU_PER_DAY_INV; -use crate::frames::Vector; +use crate::frames::{Equatorial, Vector}; use crate::prelude::*; /// Field of View like objects. @@ -57,9 +57,15 @@ pub trait FovLike: Sync + Sized { fn n_patches(&self) -> usize; /// Get the pointing vector of the FOV. + /// + /// # Errors + /// Some ``FoVs`` may not have a well formed pointing vector. fn pointing(&self) -> KeteResult>; /// Get the corners of the FOV. + /// + /// # Errors + /// Not all ``FoVs`` contain corners, such as a Cone. fn corners(&self) -> KeteResult>>; /// Check if a static source is visible. This assumes the vector passed in is at an @@ -98,6 +104,9 @@ pub trait FovLike: Sync + Sized { /// Assuming the object undergoes two-body motion, check to see if it is within the /// field of view. + /// + /// # Errors + /// Errors can occur when numerical integration fails, such as if the object hits the sun. #[inline] fn check_two_body( &self, @@ -120,6 +129,9 @@ pub trait FovLike: Sync + Sized { /// Assuming the object undergoes n-body motion, check to see if it is within the /// field of view. + /// + /// # Errors + /// Errors can occur for numerous reasons, typically from numerical integration failing. #[inline] fn check_n_body( &self, @@ -190,7 +202,7 @@ pub trait FovLike: Sync + Sized { let (idx, contains, state) = self.check_two_body(state).ok()?; match contains { Contains::Inside => Some((idx, state)), - _ => None, + Contains::Outside(_) => None, } } else { let (_, contains, _) = self.check_two_body(state).ok()?; @@ -203,14 +215,14 @@ pub trait FovLike: Sync + Sized { self.check_n_body(state, include_asteroids).ok()?; match contains { Contains::Inside => Some((idx, state)), - _ => None, + Contains::Outside(_) => None, } } }) .collect(); let mut detector_states = vec![Vec::>::new(); self.n_patches()]; - for (idx, state) in final_states.into_iter() { + for (idx, state) in final_states { detector_states[idx].push(state); } @@ -244,9 +256,9 @@ pub trait FovLike: Sync + Sized { }) .collect(); - states - .into_iter() - .for_each(|(patch_idx, state)| visible[patch_idx].push(state)); + for (patch_idx, state) in states { + visible[patch_idx].push(state); + } visible .into_iter() diff --git a/src/kete_core/src/fov/generic.rs b/src/kete_core/src/fov/generic.rs index 9f665fa..918a36b 100644 --- a/src/kete_core/src/fov/generic.rs +++ b/src/kete_core/src/fov/generic.rs @@ -55,6 +55,7 @@ pub struct GenericRectangle { impl GenericRectangle { /// Create a new Generic Rectangular FOV + #[must_use] pub fn new( pointing: Vector, rotation: f64, @@ -71,6 +72,7 @@ impl GenericRectangle { } /// Create a Field of view from a collection of corners. + #[must_use] pub fn from_corners( corners: [Vector; 4], observer: State, @@ -86,12 +88,14 @@ impl GenericRectangle { /// Latitudinal width of the FOV. #[inline] + #[must_use] pub fn lat_width(&self) -> f64 { self.patch.lat_width() } /// Longitudinal width of the FOV. #[inline] + #[must_use] pub fn lon_width(&self) -> f64 { self.patch.lon_width() } @@ -100,9 +104,7 @@ impl GenericRectangle { impl FovLike for GenericRectangle { #[inline] fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("FOV only has a single patch") - } + assert!(index == 0, "FOV only has a single patch"); FOV::GenericRectangle(self.clone()) } @@ -140,6 +142,7 @@ pub struct OmniDirectional { impl OmniDirectional { /// Create a new Omni-Directional FOV + #[must_use] pub fn new(observer: State) -> Self { Self { observer } } @@ -148,9 +151,7 @@ impl OmniDirectional { impl FovLike for OmniDirectional { #[inline] fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("FOV only has a single patch") - } + assert!(index == 0, "FOV only has a single patch"); FOV::OmniDirectional(self.clone()) } @@ -195,6 +196,7 @@ pub struct GenericCone { impl GenericCone { /// Create a new Generic Conic FOV + #[must_use] pub fn new(pointing: Vector, angle: f64, observer: State) -> Self { let patch = SphericalCone::new(&pointing, angle); Self { observer, patch } @@ -202,6 +204,7 @@ impl GenericCone { /// Angle of the cone from the central pointing vector. #[inline] + #[must_use] pub fn angle(&self) -> &f64 { &self.patch.angle } @@ -210,9 +213,7 @@ impl GenericCone { impl FovLike for GenericCone { #[inline] fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("FOV only has a single patch") - } + assert!(index == 0, "FOV only has a single patch"); FOV::GenericCone(self.clone()) } diff --git a/src/kete_core/src/fov/mod.rs b/src/kete_core/src/fov/mod.rs index dc858d1..9cf3833 100644 --- a/src/kete_core/src/fov/mod.rs +++ b/src/kete_core/src/fov/mod.rs @@ -95,6 +95,7 @@ pub enum FOV { impl FOV { /// Check if a collection of states are visible to this FOV using orbital propagation + #[must_use] pub fn check_visible( self, states: &[State], @@ -136,6 +137,7 @@ impl FOV { } /// Check if any loaded SPK objects are visible to this FOV + #[must_use] pub fn check_spks(&self, obj_ids: &[i32]) -> Vec> { match self { Self::Wise(fov) => fov.check_spks(obj_ids), @@ -154,6 +156,7 @@ impl FOV { } /// Check if static sources are visible in this FOV. + #[must_use] pub fn check_statics(&self, pos: &[Vector]) -> Vec, Self)>> { match self { Self::Wise(fov) => fov.check_statics(pos), diff --git a/src/kete_core/src/fov/neos.rs b/src/kete_core/src/fov/neos.rs index 5af5108..0f2b14c 100644 --- a/src/kete_core/src/fov/neos.rs +++ b/src/kete_core/src/fov/neos.rs @@ -76,6 +76,7 @@ pub struct NeosCmos { impl NeosCmos { /// Create a NEOS FOV + #[must_use] pub fn new( pointing: Vector, rotation: f64, @@ -93,24 +94,22 @@ impl NeosCmos { Self { observer, patch, + rotation, side_id, stack_id, quad_id, loop_id, subloop_id, exposure_id, - cmos_id, band, - rotation, + cmos_id, } } } impl FovLike for NeosCmos { fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("FOV only has a single patch") - } + assert!(index == 0, "FOV only has a single patch"); FOV::NeosCmos(self.clone()) } @@ -177,14 +176,11 @@ pub struct NeosVisit { impl NeosVisit { /// Construct a new [`NeosVisit`] from a list of cmos fovs. /// These cmos fovs must be from the same metadata when appropriate. - pub fn new(chips: Vec) -> KeteResult { - if chips.len() != 4 { - Err(Error::ValueError( - "Visit must contains 4 NeosCmos fovs".into(), - ))?; - } - let chips: Box<[NeosCmos; 4]> = Box::new(chips.try_into().unwrap()); - + /// + /// # Errors + /// Errors will occur if input chips are inconsistent with one another. + pub fn new(chips: [NeosCmos; 4]) -> KeteResult { + #[allow(clippy::missing_panics_doc, reason = "infallible by construction")] let first = chips.first().unwrap(); let observer = first.observer().clone(); @@ -197,7 +193,7 @@ impl NeosVisit { let rotation = first.rotation; let band = first.band; - for ccd in chips.iter() { + for ccd in &chips { if ccd.side_id != side_id || ccd.stack_id != stack_id || ccd.quad_id != quad_id @@ -214,7 +210,7 @@ impl NeosVisit { } } Ok(Self { - chips, + chips: Box::new(chips), observer, rotation, side_id, @@ -237,6 +233,7 @@ impl NeosVisit { /// | <---- x -----> | /// /// Pointing vector is in the middle of the 'a' in the central gap. + #[must_use] pub fn from_pointing( x_width: f64, y_width: f64, diff --git a/src/kete_core/src/fov/patches.rs b/src/kete_core/src/fov/patches.rs index ef41f46..f7343f5 100644 --- a/src/kete_core/src/fov/patches.rs +++ b/src/kete_core/src/fov/patches.rs @@ -51,6 +51,7 @@ pub enum Contains { impl Contains { /// Returns true if the vector is inside the area. + #[must_use] pub fn is_inside(&self) -> bool { matches!(self, Self::Inside) } @@ -116,6 +117,7 @@ impl OnSkyRectangle { /// # Arguments /// /// * `edge_normals` - Normal vectors which define the boundary of a polygon. + #[must_use] pub fn from_normals(edge_normals: [Vector; 4]) -> Self { // construct the 4 normal vectors Self { edge_normals } @@ -134,6 +136,7 @@ impl OnSkyRectangle { /// longitudinally in radians. /// * `lat_width` - If the rotation is 0, this defines the width of the rectangle /// latitudinally in radians. + #[must_use] pub fn new( pointing: Vector, rotation: f64, @@ -183,11 +186,14 @@ impl OnSkyRectangle { /// * `expand_angle` - Expand the fov by the specified angle away from the center, /// units of radians. /// + #[must_use] pub fn from_corners(corners: [Vector; 4], expand_angle: f64) -> Self { // compute the pointing vector from the corners let pointing = { let mut point: Vector = [0.0; 3].into(); - corners.iter().for_each(|c| point += c); + for c in corners { + point += &c; + } point.normalize() }; @@ -201,12 +207,14 @@ impl OnSkyRectangle { // check the direction of the normals, if they are too far away from the // pointing vector, then we need to flip the signs. if n1.dot(&pointing).is_sign_negative() { - edge_normals.iter_mut().for_each(|x| *x = x.neg()); + for x in &mut edge_normals { + *x = x.neg(); + } edge_normals.reverse(); } // move the normals away from the pointing vector by the specified angle. - for v in edge_normals.iter_mut() { + for v in &mut edge_normals { let rot_vec = v.cross(&pointing); *v = v.rotate_around(rot_vec, expand_angle); } @@ -216,6 +224,7 @@ impl OnSkyRectangle { /// Return the 4 corners of the patch. pub fn corners(&self) -> [Vector; 4] { + #[allow(clippy::missing_panics_doc, reason = "Cant fail by construction.")] (0..4) .map(|idx| { let idy = (idx + 1) % 4; @@ -229,12 +238,14 @@ impl OnSkyRectangle { } /// Latitudinal width of the patch, the assumes the patch is rectangular. + #[must_use] pub fn lat_width(&self) -> f64 { let pointing = self.pointing(); 2.0 * (FRAC_PI_2 - pointing.angle(&self.edge_normals[1])) } /// Longitudinal width of the patch, the assumes the patch is rectangular. + #[must_use] pub fn lon_width(&self) -> f64 { let pointing = self.pointing(); 2.0 * (FRAC_PI_2 - pointing.angle(&self.edge_normals[0])) @@ -245,8 +256,8 @@ impl SkyPatch for SphericalPolygon { /// Is the vector inside of the polygon. fn contains(&self, obs_to_obj: &Vector) -> Contains { let mut closest_edge = f64::NEG_INFINITY; - for normal in self.edge_normals.iter() { - let d = obs_to_obj.dot(normal); + for normal in self.edge_normals { + let d = obs_to_obj.dot(&normal); if d.is_nan() { return Contains::Outside(d); } @@ -296,6 +307,7 @@ pub struct SphericalCone { impl SphericalCone { /// Construct a new `SphericalCone` given the central vector and the angle of the /// cone. + #[must_use] pub fn new(pointing: &Vector, angle: f64) -> Self { let pointing = pointing.normalize(); Self { pointing, angle } diff --git a/src/kete_core/src/fov/ptf.rs b/src/kete_core/src/fov/ptf.rs index 262c753..a1dc393 100644 --- a/src/kete_core/src/fov/ptf.rs +++ b/src/kete_core/src/fov/ptf.rs @@ -106,6 +106,7 @@ pub struct PtfCcd { impl PtfCcd { /// Create a Ptf field of view + #[must_use] pub fn new( corners: [Vector; 4], observer: State, @@ -118,23 +119,21 @@ impl PtfCcd { ) -> Self { let patch = OnSkyRectangle::from_corners(corners, 0.0); Self { - patch, observer, + patch, field, ccdid, filter, filename, - seeing, info_bits, + seeing, } } } impl FovLike for PtfCcd { fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("FOV only has a single patch") - } + assert!(index == 0, "FOV only has a single patch"); FOV::PtfCcd(self.clone()) } @@ -184,19 +183,24 @@ impl PtfField { /// Construct a new [`PtfField`] from a list of ccds. /// These ccds must be from the same field and having matching value as /// appropriate. + /// + /// # Errors + /// Construction will fail if no ccds are provided or if they are inconsistent. pub fn new(ccds: Vec) -> KeteResult { if ccds.is_empty() { Err(Error::ValueError("Ptf Field must contains PtfCcd".into()))?; } + #[allow(clippy::missing_panics_doc, reason = "ccds is not empty")] let first = ccds.first().unwrap(); let observer = first.observer().clone(); let field = first.field; let filter = first.filter; - for ccd in ccds.iter() { - if ccd.field != field || ccd.filter != filter || ccd.observer().epoch != observer.epoch { + for ccd in &ccds { + if ccd.field != field || ccd.filter != filter || ccd.observer().epoch != observer.epoch + { Err(Error::ValueError( "All PtfCcds must have matching values except CCD ID etc.".into(), ))?; diff --git a/src/kete_core/src/fov/spherex.rs b/src/kete_core/src/fov/spherex.rs index fddfdd0..e7d8513 100644 --- a/src/kete_core/src/fov/spherex.rs +++ b/src/kete_core/src/fov/spherex.rs @@ -52,6 +52,7 @@ pub struct SpherexCmos { impl SpherexCmos { /// Create a Spherex fov from corners + #[must_use] pub fn new( corners: [Vector; 4], observer: State, @@ -60,8 +61,8 @@ impl SpherexCmos { ) -> Self { let patch = OnSkyRectangle::from_corners(corners, 0.0); Self { - patch, observer, + patch, uri, plane_id, } @@ -71,9 +72,7 @@ impl SpherexCmos { impl FovLike for SpherexCmos { #[inline] fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("SPHEREx FOV only has a single patch") - } + assert!(index == 0, "SPHEREx FOV only has a single patch"); FOV::SpherexCmos(self.clone()) } @@ -123,6 +122,10 @@ impl SpherexField { /// Construct a new [`SpherexField`] from a list of cmos frames. /// These cmos frames must be from the same field and having matching value as /// appropriate. + /// + /// # Errors + /// ``cmos_frames`` must not be empty, and all frames must be consistent with one + /// another. pub fn new( cmos_frames: Vec, obsid: Box, @@ -134,11 +137,12 @@ impl SpherexField { ))?; } + #[allow(clippy::missing_panics_doc, reason = "frame is not empty")] let first = cmos_frames.first().unwrap(); let observer = first.observer().clone(); - for ccd in cmos_frames.iter() { + for ccd in &cmos_frames { if ccd.observer().epoch != observer.epoch { Err(Error::ValueError( "All SpherexCMOS must have matching values times".into(), diff --git a/src/kete_core/src/fov/wise.rs b/src/kete_core/src/fov/wise.rs index 0cf66af..c81f134 100644 --- a/src/kete_core/src/fov/wise.rs +++ b/src/kete_core/src/fov/wise.rs @@ -52,6 +52,7 @@ pub struct WiseCmos { impl WiseCmos { /// Create a Wise fov + #[must_use] pub fn new( pointing: Vector, rotation: f64, @@ -61,14 +62,15 @@ impl WiseCmos { ) -> Self { let patch = OnSkyRectangle::new(pointing, rotation, WISE_WIDTH, WISE_WIDTH); Self { - patch, observer, + patch, frame_num, scan_id, } } /// Create a Wise fov from corners + #[must_use] pub fn from_corners( corners: [Vector; 4], observer: State, @@ -77,8 +79,8 @@ impl WiseCmos { ) -> Self { let patch = OnSkyRectangle::from_corners(corners, 60_f64.recip().to_radians()); Self { - patch, observer, + patch, frame_num, scan_id, } @@ -88,9 +90,7 @@ impl WiseCmos { impl FovLike for WiseCmos { #[inline] fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("Wise FOV only has a single patch") - } + assert!(index == 0, "Wise FOV only has a single patch"); FOV::Wise(self.clone()) } diff --git a/src/kete_core/src/fov/ztf.rs b/src/kete_core/src/fov/ztf.rs index c979d75..64c38df 100644 --- a/src/kete_core/src/fov/ztf.rs +++ b/src/kete_core/src/fov/ztf.rs @@ -71,6 +71,7 @@ pub struct ZtfCcdQuad { impl ZtfCcdQuad { /// Create a ZTF field of view + #[must_use] pub fn new( corners: [Vector; 4], observer: State, @@ -85,25 +86,23 @@ impl ZtfCcdQuad { ) -> Self { let patch = OnSkyRectangle::from_corners(corners, 0.0); Self { - patch, observer, + patch, field, filefracday, - ccdid, + maglimit, + fid, filtercode, imgtypecode, + ccdid, qid, - maglimit, - fid, } } } impl FovLike for ZtfCcdQuad { fn get_fov(&self, index: usize) -> FOV { - if index != 0 { - panic!("FOV only has a single patch") - } + assert!(index == 0, "FOV only has a single patch"); FOV::ZtfCcdQuad(self.clone()) } @@ -159,6 +158,9 @@ impl ZtfField { /// Construct a new [`ZtfField`] from a list of ccd quads. /// These ccd quads must be from the same field and having matching value as /// appropriate. + /// + /// # Errors + /// Construction will fail if no ccds are provided or if they are inconsistent. pub fn new(ccd_quads: Vec) -> KeteResult { if ccd_quads.is_empty() { Err(Error::ValueError( @@ -166,6 +168,7 @@ impl ZtfField { ))?; } + #[allow(clippy::missing_panics_doc, reason = "ccds is not empty")] let first = ccd_quads.first().unwrap(); let observer = first.observer().clone(); @@ -174,7 +177,7 @@ impl ZtfField { let filtercode = first.filtercode.clone(); let imgtypecode = first.imgtypecode.clone(); - for ccd in ccd_quads.iter() { + for ccd in &ccd_quads { if ccd.field != field || ccd.fid != fid || ccd.filtercode != filtercode diff --git a/src/kete_core/src/frames/definitions.rs b/src/kete_core/src/frames/definitions.rs index 55ec0a1..3f2d018 100644 --- a/src/kete_core/src/frames/definitions.rs +++ b/src/kete_core/src/frames/definitions.rs @@ -55,12 +55,14 @@ use super::euler_rotation; pub trait InertialFrame: Sized + Sync + Send + Clone + Copy + Debug + PartialEq { /// Convert a vector from input frame to equatorial frame. #[inline(always)] + #[must_use] fn to_equatorial(vec: Vector3) -> Vector3 { Self::rotation_to_equatorial().transform_vector(&vec) } /// Convert a vector from the equatorial frame to this frame. #[inline(always)] + #[must_use] fn from_equatorial(vec: Vector3) -> Vector3 { Self::rotation_to_equatorial().inverse_transform_vector(&vec) } @@ -70,6 +72,7 @@ pub trait InertialFrame: Sized + Sync + Send + Clone + Copy + Debug + PartialEq /// Convert between frames. #[inline(always)] + #[must_use] fn convert(vec: Vector3) -> Vector3 { Target::from_equatorial(Self::to_equatorial(vec)) } @@ -77,6 +80,7 @@ pub trait InertialFrame: Sized + Sync + Send + Clone + Copy + Debug + PartialEq /// Equatorial frame. #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +#[must_use] pub struct Equatorial {} impl InertialFrame for Equatorial { @@ -135,6 +139,7 @@ impl InertialFrame for FK4 { /// General representation of a non-inertial frame. #[derive(Debug, Clone, Copy, PartialEq)] +#[must_use] pub struct NonInertialFrame { /// Time of the frame in TDB. pub time: Time, @@ -189,6 +194,13 @@ impl NonInertialFrame { } /// Return the rotation matrix and rotation rate for this frame in the equatorial frame. + /// + /// # Errors + /// Fails when reference frame is not found or supported. + /// + /// # Panics + /// Panics can occur if a non-inertial reference frame is used but the lock on CK's + /// cannot be taken. pub fn rotations_to_equatorial(&self) -> KeteResult<(Rotation3, Matrix3)> { if self.reference_frame_id == 1 { // Equatorial frame @@ -208,7 +220,7 @@ impl NonInertialFrame { let cks = LOADED_CK.read().unwrap(); // find the segment in the ck data which matches the reference frame id, with its own reference frame id being either 1 or 17. - for segment in cks.segments.iter() { + for segment in &cks.segments { let array: &CkArray = segment.into(); if array.instrument_id == self.reference_frame_id { let frame = segment.try_get_orientation(self.reference_frame_id, self.time); @@ -226,13 +238,13 @@ impl NonInertialFrame { )); } } - Err(Error::ExceedsLimits(format!( + Err(Error::Bounds(format!( "Reference frame ID {} not found in CK data.", self.reference_frame_id ))) } else { // Unsupported frame - Err(Error::ExceedsLimits(format!( + Err(Error::Bounds(format!( "Reference frame ID {} is not supported.", self.reference_frame_id ))) @@ -240,6 +252,10 @@ impl NonInertialFrame { } /// Convert a vector from the equatorial frame to this frame. + /// + /// # Errors + /// May fail if coordinate frame conversion fails, should not be possible. + /// Please raise a github issue if this error occurs. #[allow( clippy::wrong_self_convention, reason = "Always need position and velocity together" @@ -258,6 +274,10 @@ impl NonInertialFrame { } /// Convert a vector from input frame to equatorial frame. + /// + /// # Errors + /// May fail if coordinate frame conversion fails, should not be possible. + /// Please raise a github issue if this error occurs. pub fn to_equatorial( &self, pos: Vector3, @@ -307,25 +327,25 @@ mod tests { fn test_ecliptic_rot_roundtrip() { let vec = Ecliptic::to_equatorial([1.0, 2.0, 3.0].into()); let vec_return = Ecliptic::from_equatorial(vec); - assert!((1.0 - vec_return[0]).abs() <= 10.0 * f64::EPSILON); - assert!((2.0 - vec_return[1]).abs() <= 10.0 * f64::EPSILON); - assert!((3.0 - vec_return[2]).abs() <= 10.0 * f64::EPSILON); + assert!((1.0 - vec_return.x).abs() <= 10.0 * f64::EPSILON); + assert!((2.0 - vec_return.y).abs() <= 10.0 * f64::EPSILON); + assert!((3.0 - vec_return.z).abs() <= 10.0 * f64::EPSILON); } #[test] fn test_fk4_roundtrip() { let vec = FK4::to_equatorial([1.0, 2.0, 3.0].into()); let vec_return = FK4::from_equatorial(vec); - assert!((1.0 - vec_return[0]).abs() <= 10.0 * f64::EPSILON); - assert!((2.0 - vec_return[1]).abs() <= 10.0 * f64::EPSILON); - assert!((3.0 - vec_return[2]).abs() <= 10.0 * f64::EPSILON); + assert!((1.0 - vec_return.x).abs() <= 10.0 * f64::EPSILON); + assert!((2.0 - vec_return.y).abs() <= 10.0 * f64::EPSILON); + assert!((3.0 - vec_return.z).abs() <= 10.0 * f64::EPSILON); } #[test] fn test_galactic_rot_roundtrip() { let vec = Galactic::to_equatorial([1.0, 2.0, 3.0].into()); let vec_return = Galactic::from_equatorial(vec); - assert!((1.0 - vec_return[0]).abs() <= 10.0 * f64::EPSILON); - assert!((2.0 - vec_return[1]).abs() <= 10.0 * f64::EPSILON); - assert!((3.0 - vec_return[2]).abs() <= 10.0 * f64::EPSILON); + assert!((1.0 - vec_return.x).abs() <= 10.0 * f64::EPSILON); + assert!((2.0 - vec_return.y).abs() <= 10.0 * f64::EPSILON); + assert!((3.0 - vec_return.z).abs() <= 10.0 * f64::EPSILON); } #[test] @@ -339,11 +359,11 @@ mod tests { let (r_pos, r_vel) = frame.to_equatorial(pos, vel).unwrap(); let (pos_return, vel_return) = frame.from_equatorial(r_pos, r_vel).unwrap(); - assert!((1.0 - pos_return[0]).abs() <= 10.0 * f64::EPSILON); - assert!((2.0 - pos_return[1]).abs() <= 10.0 * f64::EPSILON); - assert!((3.0 - pos_return[2]).abs() <= 10.0 * f64::EPSILON); - assert!((0.1 - vel_return[0]).abs() <= 10.0 * f64::EPSILON); - assert!((0.2 - vel_return[1]).abs() <= 10.0 * f64::EPSILON); - assert!((0.3 - vel_return[2]).abs() <= 10.0 * f64::EPSILON); + assert!((1.0 - pos_return.x).abs() <= 10.0 * f64::EPSILON); + assert!((2.0 - pos_return.y).abs() <= 10.0 * f64::EPSILON); + assert!((3.0 - pos_return.z).abs() <= 10.0 * f64::EPSILON); + assert!((0.1 - vel_return.x).abs() <= 10.0 * f64::EPSILON); + assert!((0.2 - vel_return.y).abs() <= 10.0 * f64::EPSILON); + assert!((0.3 - vel_return.z).abs() <= 10.0 * f64::EPSILON); } } diff --git a/src/kete_core/src/frames/earth.rs b/src/kete_core/src/frames/earth.rs index 5222e81..3ee389f 100644 --- a/src/kete_core/src/frames/earth.rs +++ b/src/kete_core/src/frames/earth.rs @@ -67,11 +67,13 @@ pub(super) const OBLIQUITY: f64 = 0.40909280422232897; /// Prime vertical radius of curvature. /// This is the radius of curvature of the earth surface at the specific geodetic /// latitude. +#[must_use] pub fn prime_vert_radius(geodetic_lat: f64) -> f64 { EARTH_A / (1.0 - EARTH_E2 * geodetic_lat.sin().powi(2)).sqrt() } /// Compute earths geocentric radius at the specified latitude in km. +#[must_use] pub fn geocentric_radius(geodetic_lat: f64) -> f64 { let (sin, cos) = geodetic_lat.sin_cos(); let a_cos = EARTH_A * cos; @@ -81,6 +83,7 @@ pub fn geocentric_radius(geodetic_lat: f64) -> f64 { } /// Compute geodetic lat/lon/height in radians/km from ECEF position in km. +#[must_use] pub fn ecef_to_geodetic_lat_lon(x: f64, y: f64, z: f64) -> (f64, f64, f64) { let longitude = f64::atan2(y, x); let p = (x * x + y * y).sqrt(); @@ -88,32 +91,34 @@ pub fn ecef_to_geodetic_lat_lon(x: f64, y: f64, z: f64) -> (f64, f64, f64) { // initial guess, and iterate. let mut geodetic_lat = geocen_lat; - let mut h = 0.0; + let mut height = 0.0; // this usually converges in only 1-2 iterations, but to reduce CPU branching // don't bother with a convergence check. for _ in 0..5 { - let n = prime_vert_radius(geodetic_lat); - h = p / geodetic_lat.cos() - n; - geodetic_lat = f64::atan(z / p / (1.0 - EARTH_E2 * n / (n + h))); + let vert_rad = prime_vert_radius(geodetic_lat); + height = p / geodetic_lat.cos() - vert_rad; + geodetic_lat = f64::atan(z / p / (1.0 - EARTH_E2 * vert_rad / (vert_rad + height))); } - (geodetic_lat, longitude, h) + (geodetic_lat, longitude, height) } -/// Compute geocentric latitude from geodetic lat/height in radians/km . +/// Compute geocentric latitude from geodetic lat/height in radians/km. +#[must_use] pub fn geodetic_lat_to_geocentric(geodetic_lat: f64, h: f64) -> f64 { let n = prime_vert_radius(geodetic_lat); ((1.0 - EARTH_E2 * n / (n + h)) * geodetic_lat.tan()).atan() } -/// Compute the ECEF X/Y/Z position in km from geodetic lat/lon/height in radians/km -pub fn geodetic_lat_lon_to_ecef(geodetic_lat: f64, geodetic_lon: f64, h: f64) -> [f64; 3] { +/// Compute the ECEF X/Y/Z position in km from geodetic lat/lon/height in radians/km. +#[must_use] +pub fn geodetic_lat_lon_to_ecef(geodetic_lat: f64, geodetic_lon: f64, height: f64) -> [f64; 3] { let n = prime_vert_radius(geodetic_lat); let (sin_gd_lat, cos_gd_lat) = geodetic_lat.sin_cos(); let (sin_gd_lon, cos_gd_lon) = geodetic_lon.sin_cos(); - let x = (n + h) * cos_gd_lat * cos_gd_lon; - let y = (n + h) * cos_gd_lat * sin_gd_lon; - let z = ((1.0 - EARTH_E2) * n + h) * sin_gd_lat; + let x = (n + height) * cos_gd_lat * cos_gd_lon; + let y = (n + height) * cos_gd_lat * sin_gd_lon; + let z = ((1.0 - EARTH_E2) * n + height) * sin_gd_lat; [x, y, z] } @@ -123,6 +128,7 @@ pub fn geodetic_lat_lon_to_ecef(geodetic_lat: f64, geodetic_lon: f64, h: f64) -> /// /// The equation here is from the 2010 Astronomical Almanac. /// +#[must_use] pub fn earth_obliquity(jd: Time) -> f64 { // centuries from j2000 let c = (jd - Time::j2000()).elapsed / 365.25 / 100.0; @@ -138,6 +144,7 @@ pub fn earth_obliquity(jd: Time) -> f64 { /// The ERA is the angle between the Greenwich meridian and the vernal equinox, /// the Equatorial J2000 X-axis. /// +#[must_use] pub fn earth_rotation_angle(time: Time) -> f64 { // Note that second number is not j2000, its the j2000 value in UTC time. let dt = time.jd - 2451545.0; @@ -158,16 +165,16 @@ pub fn earth_rotation_angle(time: Time) -> f64 { /// /// This function is an implementation equation (21) from this paper: /// -/// > "Expressions for IAU 2000 precession quantities" -/// > Capitaine, N. ; Wallace, P. T. ; Chapront, J. +/// > "Expressions for IAU 2000 precession quantities"\\ +/// > Capitaine, N. ; Wallace, P. T. ; Chapront, J.\\ /// > Astronomy and Astrophysics, v.412, p.567-586 (2003) /// /// It is recommended to first look at the following paper, as it provides useful /// discussion to help understand the above model. This defines the model used /// by JPL Horizons: /// -/// > "Precession matrix based on IAU (1976) system of astronomical constants." -/// > Lieske, J. H. +/// > "Precession matrix based on IAU (1976) system of astronomical constants."\\ +/// > Lieske, J. H.\\ /// > Astronomy and Astrophysics, vol. 73, no. 3, Mar. 1979, p. 282-284. /// /// The IAU 2000 model paper improves accuracy by approximately ~300 mas/century over @@ -216,6 +223,9 @@ pub fn earth_precession_rotation(time: Time) -> NonInertialFrame { /// /// This will be centered at the Earth, conversion to Sun centered /// requires a computation of Earths position. +/// +/// # Errors +/// This can fail if the equatorial frame conversion fails. pub fn approx_earth_pos_to_ecliptic( time: Time, geodetic_lat: f64, @@ -277,6 +287,7 @@ pub fn next_sunset_sunrise( /// Approximate the Sun's declination angle at solar noon at the specified date. /// /// Returns the declination in radians. +#[must_use] pub fn approx_sun_dec(time: Time) -> f64 { let obliquity = earth_obliquity(time.tdb()); @@ -303,6 +314,7 @@ pub fn approx_sun_dec(time: Time) -> f64 { /// This is largely based off the approximation used by the USNO: /// /// +#[must_use] pub fn equation_of_time(time: Time) -> f64 { let time_since_j2000 = (time - Time::j2000()).elapsed; let mean_lon_of_sun = (280.459 + 0.98564736 * time_since_j2000).rem_euclid(360.0); @@ -327,7 +339,7 @@ pub fn approx_solar_noon(time: Time, geodetic_lon: f64) -> Time { let (y, m, d, _) = time.year_month_day(); let frac_of_earth = -geodetic_lon.to_degrees().rem_euclid(360.0) / 360.0; - Time::::from_year_month_day(y, m, d, 0.5 + frac_of_earth).jd + Time::::from_year_month_day(y.into(), m, d, 0.5 + frac_of_earth).jd }; let mut noon = noon + equation_of_time(Time::::new(noon)); while noon <= time.jd { diff --git a/src/kete_core/src/frames/rotation.rs b/src/kete_core/src/frames/rotation.rs index 5525660..b40df66 100644 --- a/src/kete_core/src/frames/rotation.rs +++ b/src/kete_core/src/frames/rotation.rs @@ -54,6 +54,8 @@ use nalgebra::{Matrix3, Quaternion, Rotation3, Unit, Vector3}; /// assert!((euler[2] - 0.3).abs() < 1e-12); /// ``` /// +#[must_use] +#[allow(clippy::cast_sign_loss, reason = "Casts are positive by construction")] pub fn quaternion_to_euler( quat: Quaternion, ) -> [f64; 3] { @@ -66,9 +68,9 @@ pub fn quaternion_to_euler( assert!(E1 != E2 && E2 != E3, "Middle axis must not match outer."); } - let i = const { char_to_index::() }; - let j = const { char_to_index::() }; - let k = const { + let idi = const { char_to_index::() }; + let idj = const { char_to_index::() }; + let idk = const { if E1 == E3 { // 1 + 2 + 3 = 6, so the remaining axis is 6 - i - j 6 - char_to_index::() - char_to_index::() @@ -79,34 +81,34 @@ pub fn quaternion_to_euler( let proper = const { E1 == E3 }; - let epsilon = f64::from((i - j) * (j - k) * (k - i) / 2); + let epsilon = f64::from((idi - idj) * (idj - idk) * (idk - idi) / 2); - let i = i as usize; - let j = j as usize; - let k = k as usize; + let idi = idi as usize; + let idj = idj as usize; + let idk = idk as usize; - let q = [quat.w, quat.i, quat.j, quat.k]; + let quat = [quat.w, quat.i, quat.j, quat.k]; let [a, b, c, d] = if proper { - [q[0], q[i], q[j], q[k] * epsilon] + [quat[0], quat[idi], quat[idj], quat[idk] * epsilon] } else { [ - q[0] - q[j], - q[i] + q[k] * epsilon, - q[j] + q[0], - q[k] * epsilon - q[i], + quat[0] - quat[idj], + quat[idi] + quat[idk] * epsilon, + quat[idj] + quat[0], + quat[idk] * epsilon - quat[idi], ] }; - let n = a.powi(2) + b.powi(2) + c.powi(2) + d.powi(2); + let normal = a.powi(2) + b.powi(2) + c.powi(2) + d.powi(2); - let mut theta_2 = (2.0 * ((a.powi(2) + b.powi(2)) / n) - 1.0).acos(); + let mut theta_2 = (2.0 * ((a.powi(2) + b.powi(2)) / normal) - 1.0).acos(); let theta_plus = b.atan2(a); let theta_minus = d.atan2(c); let (theta_1, mut theta_3) = match theta_2 { - t if t.abs() < 1e-10 => (0.0, 2.0 * theta_plus), - t if (t - PI / 2.0).abs() < 1e-10 => (0.0, 2.0 * theta_minus), + tmp if tmp.abs() < 1e-10 => (0.0, 2.0 * theta_plus), + tmp if (tmp - PI / 2.0).abs() < 1e-10 => (0.0, 2.0 * theta_minus), _ => (theta_plus - theta_minus, theta_plus + theta_minus), }; @@ -130,6 +132,7 @@ pub fn quaternion_to_euler( /// second is the derivative of the 3x3 matrix with respect to time. These two matrices /// may be used to compute the new position and velocities when moving from one frame /// to another. +#[must_use] pub fn euler_rotation( angles: &[f64; 3], rates: &[f64; 3], diff --git a/src/kete_core/src/frames/vector.rs b/src/kete_core/src/frames/vector.rs index 8bd6eda..eec0f8e 100644 --- a/src/kete_core/src/frames/vector.rs +++ b/src/kete_core/src/frames/vector.rs @@ -40,6 +40,7 @@ use std::ops::{Add, AddAssign, Div, Index, IndexMut, Mul, Neg, Sub, SubAssign}; /// Vector with frame information. /// All vectors are 3D vectors in an inertial frame. #[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)] +#[must_use] pub struct Vector { /// Underlying vector data. raw: [f64; 3], @@ -69,6 +70,7 @@ impl Vector { /// Are all element of the vector finite. #[inline(always)] + #[must_use] pub fn is_finite(&self) -> bool { self.raw.iter().all(|x| x.is_finite()) } @@ -96,6 +98,7 @@ impl Vector { /// Dot product between two vectors #[inline(always)] + #[must_use] pub fn dot(&self, other: &Self) -> f64 { self.raw .iter() @@ -114,18 +117,21 @@ impl Vector { /// Squared euclidean length. #[inline(always)] + #[must_use] pub fn norm_squared(&self) -> f64 { self.raw.iter().map(|a| a.powi(2)).sum() } /// The euclidean length of the vector. #[inline(always)] + #[must_use] pub fn norm(&self) -> f64 { self.raw.iter().map(|a| a.powi(2)).sum::().sqrt() } /// The angle betweeen two vectors in radians. #[inline(always)] + #[must_use] pub fn angle(&self, other: &Self) -> f64 { Vector3::from(self.raw).angle(&Vector3::from(other.raw)) } @@ -149,6 +155,7 @@ impl Vector { /// Convert a vector to polar spherical coordinates. /// /// + #[must_use] pub fn to_polar_spherical(&self) -> (f64, f64) { let vec = self.normalize(); let theta = vec.raw[2].acos(); @@ -166,6 +173,7 @@ impl Vector { /// Convert a unit vector to latitude and longitude. #[inline(always)] + #[must_use] pub fn to_lat_lon(self) -> (f64, f64) { let (mut lat, mut lon) = self.to_polar_spherical(); if lat > PI { @@ -185,6 +193,7 @@ impl Vector { /// Convert a unit vector to ra and dec. #[inline(always)] + #[must_use] pub fn to_ra_dec(self) -> (f64, f64) { let (mut dec, mut ra) = self.to_polar_spherical(); if dec > PI { @@ -199,14 +208,14 @@ impl Index for Vector { type Output = f64; #[inline(always)] fn index(&self, index: usize) -> &Self::Output { - &self.raw[index] + self.raw.index(index) } } impl IndexMut for Vector { #[inline(always)] fn index_mut(&mut self, index: usize) -> &mut Self::Output { - &mut self.raw[index] + self.raw.index_mut(index) } } diff --git a/src/kete_core/src/io/bytes.rs b/src/kete_core/src/io/bytes.rs index 7e3978a..0282f50 100644 --- a/src/kete_core/src/io/bytes.rs +++ b/src/kete_core/src/io/bytes.rs @@ -37,7 +37,7 @@ pub(crate) fn read_bytes_exact(buffer: T, n_bytes: usize) -> KeteResult let n_read = buffer.take(n_bytes as u64).read_to_end(&mut bytes)?; if n_read != n_bytes { Err(Error::IOError("Unexpected end of file.".into()))?; - }; + } Ok(bytes.into()) } @@ -93,11 +93,11 @@ pub(crate) fn bytes_to_i32(bytes: &[u8], little_endian: bool) -> KeteResult /// Change a collection of bytes into a String. pub(crate) fn bytes_to_string(bytes: &[u8]) -> String { let mut bytes = bytes.to_vec(); - bytes.iter_mut().for_each(|x| { + for x in &mut bytes { if x == &0x00 { *x = 0x0a; } - }); + } String::from_utf8_lossy(&bytes).to_string() } diff --git a/src/kete_core/src/io/mod.rs b/src/kete_core/src/io/mod.rs index 3b2021c..3438025 100644 --- a/src/kete_core/src/io/mod.rs +++ b/src/kete_core/src/io/mod.rs @@ -49,6 +49,8 @@ where /// Binary file formats as used by kete are not guaranteed to be stable in future /// versions. /// + /// # Errors + /// Saving is fallible due to filesystem calls. fn save(&self, filename: String) -> KeteResult { let mut f = BufWriter::new(File::create(filename)?); encode_into_std_write(self, &mut f, bincode::config::legacy()) @@ -59,6 +61,9 @@ where /// /// Binary file formats as used by kete are not guaranteed to be stable in future /// versions. + /// + /// # Errors + /// Loading is fallible due to filesystem calls. fn load(filename: String) -> KeteResult { let mut f = BufReader::new(File::open(filename)?); decode_from_std_read(&mut f, bincode::config::legacy()) @@ -69,6 +74,9 @@ where /// /// Binary file formats as used by kete are not guaranteed to be stable in future /// versions. + /// + /// # Errors + /// Saving is fallible due to filesystem calls. fn save_vec(vec: &[Self], filename: String) -> KeteResult<()> { let mut f = BufWriter::new(File::create(filename)?); @@ -81,6 +89,9 @@ where /// /// Binary file formats as used by kete are not guaranteed to be stable in future /// versions. + /// + /// # Errors + /// Loading is fallible due to filesystem calls. fn load_vec(filename: String) -> KeteResult> { let mut f = BufReader::new(File::open(filename)?); diff --git a/src/kete_core/src/io/parquet.rs b/src/kete_core/src/io/parquet.rs index b557e14..7813083 100644 --- a/src/kete_core/src/io/parquet.rs +++ b/src/kete_core/src/io/parquet.rs @@ -40,6 +40,9 @@ use crate::state::State; use polars::prelude::*; /// Write a collection of states to a parquet table. +/// +/// # Errors +/// Saving is fallible due to filesystem calls. pub fn write_states_parquet(states: &[State], filename: &str) -> KeteResult<()> { let desigs = Column::new( "desig".into(), @@ -81,8 +84,8 @@ pub fn write_states_parquet(states: &[State], filename: &str) -> Ket states.iter().map(|state| state.center_id).collect_vec(), ); let mut df = DataFrame::new(vec![desigs, jd, x, y, z, vx, vy, vz, center]) - .expect("Failed to construct dataframe"); - let file = File::create(filename).expect("could not create file"); + .map_err(|_| Error::ValueError("Failed to construct dataframe".into()))?; + let file = File::create(filename)?; let _ = ParquetWriter::new(file) .finish(&mut df) .map_err(|_| Error::IOError("Failed to write to file".into()))?; @@ -90,6 +93,14 @@ pub fn write_states_parquet(states: &[State], filename: &str) -> Ket } /// Read a collection of states from a parquet table. +/// +/// # Errors +/// Reading files can fail for numerous reasons, incorrectly formatted, inconsistent +/// contents, etc. +/// +/// # Panics +/// There are a number of unwraps in this function, but it is structured such that they +/// should not be possible to reach. pub fn read_states_parquet(filename: &str) -> KeteResult>> { // this reads the parquet table, then creates iterators over the contents, making // states by going through the iterators one at a time. @@ -105,14 +116,14 @@ pub fn read_states_parquet(filename: &str) -> KeteResult>> .column("desig") .map_err(|_| Error::IOError("File doesn't contain the correct columns".into()))? .str() - .expect("Designations are not all strings.") + .map_err(|_| Error::ValueError("Designations are not all strings.".into()))? .into_no_null_iter(); let mut center_iter = dataframe .column("center") .map_err(|_| Error::IOError("File doesn't contain the correct columns".into()))? .i32() - .expect("Centers are not all ints.") + .map_err(|_| Error::ValueError("Centers are not all ints.".into()))? .into_no_null_iter(); // the remaining columns are all floats, so here we make a vector of iterators of @@ -123,10 +134,10 @@ pub fn read_states_parquet(filename: &str) -> KeteResult>> .iter() .map(|s| { s.f64() - .expect("state information is not all floats.") - .into_no_null_iter() + .map_err(|_| Error::ValueError("state information is not all floats.".into())) + .map(ChunkedArray::::into_no_null_iter) }) - .collect::>(); + .collect::>>()?; Ok((0..dataframe.height()) .map(|_| { @@ -135,13 +146,20 @@ pub fn read_states_parquet(filename: &str) -> KeteResult>> .expect("should have as many iterations as rows"); let center_id = center_iter.next().unwrap(); - let jd = state_iters[0].next().unwrap(); - let x = state_iters[1].next().unwrap(); - let y = state_iters[2].next().unwrap(); - let z = state_iters[3].next().unwrap(); - let vx = state_iters[4].next().unwrap(); - let vy = state_iters[5].next().unwrap(); - let vz = state_iters[6].next().unwrap(); + let [jd, x, y, z, vx, vy, vz] = + if let [jd, x, y, z, vx, vy, vz] = state_iters.as_mut_slice() { + [ + jd.next().unwrap(), + x.next().unwrap(), + y.next().unwrap(), + z.next().unwrap(), + vx.next().unwrap(), + vy.next().unwrap(), + vz.next().unwrap(), + ] + } else { + unreachable!() + }; State::new( crate::desigs::Desig::Name(desig.to_string()), diff --git a/src/kete_core/src/propagation/acceleration.rs b/src/kete_core/src/propagation/acceleration.rs index b0a29f1..a64a340 100644 --- a/src/kete_core/src/propagation/acceleration.rs +++ b/src/kete_core/src/propagation/acceleration.rs @@ -95,6 +95,9 @@ impl Default for CentralAccelMeta { /// * `pos` - A vector which defines the position with respect to the Sun in AU. /// * `vel` - A vector which defines the velocity with respect to the Sun in AU/Day. /// * `meta` - Metadata object which records values at integration steps. +/// +/// # Errors +/// This is actually infallible, but must have this signature for the integrator. pub fn central_accel( time: Time, pos: &Vector3, @@ -150,6 +153,9 @@ pub struct AccelSPKMeta<'a> { /// * `pos` - A vector which defines the position with respect to the Sun in AU. /// * `vel` - A vector which defines the velocity with respect to the Sun in AU/Day multiplied by `SUN_GMS_SQRT`. /// * `meta` - Metadata object [`AccelSPKMeta`] which records values at each integration step. +/// +/// # Errors +/// This is actually infallible, but must have this signature for the integrator. pub fn spk_accel( time: Time, pos: &Vector3, @@ -159,7 +165,7 @@ pub fn spk_accel( ) -> KeteResult> { let mut accel = Vector3::::zeros(); - let spk = &LOADED_SPK.try_read().unwrap(); + let spk = &LOADED_SPK.try_read()?; for grav_params in meta.massive_obj { let id = grav_params.naif_id; @@ -197,6 +203,9 @@ pub fn spk_accel( /// /// The `state_vec` is made up of concatenated position and velocity vectors. /// Otherwise this is just a thin wrapper over the [`spk_accel`] function. +/// +/// # Errors +/// Fails when SPK queries fail. pub fn spk_accel_first_order( time: Time, state_vec: &SVector, @@ -239,6 +248,9 @@ pub struct AccelVecMeta<'a> { /// * `pos` - A vector which defines the position with respect to the Sun in AU. /// * `vel` - A vector which defines the velocity with respect to the Sun in AU/Day. /// * `meta` - Metadata. +/// +/// # Errors +/// Fails during an impact. pub fn vec_accel( time: Time, pos: &OVector, @@ -311,6 +323,7 @@ pub fn central_accel_grad( /// Calculate the Jacobian for the [`central_accel`] function. /// /// This enables the computation of the two body STM. +#[must_use] pub fn accel_grad( obj_pos: &Vector3, _obj_vel: &Vector3, diff --git a/src/kete_core/src/propagation/kepler.rs b/src/kete_core/src/propagation/kepler.rs index 7876d43..253359b 100644 --- a/src/kete_core/src/propagation/kepler.rs +++ b/src/kete_core/src/propagation/kepler.rs @@ -56,6 +56,10 @@ pub const PARABOLIC_ECC_LIMIT: f64 = 1e-4; /// * `mean_anomaly` - Mean anomaly. /// * `peri_dist` - Perihelion Distance in AU, only used for parabolic orbits. /// +/// # Errors +/// +/// Fails for numerous reasons, including if negative eccentricity is provided or if it +/// is a non finite value. pub fn compute_eccentric_anomaly(ecc: f64, mean_anom: f64, peri_dist: f64) -> KeteResult { match ecc { ecc if !ecc.is_finite() => Err(Error::ValueError( @@ -99,6 +103,9 @@ pub fn compute_eccentric_anomaly(ecc: f64, mean_anom: f64, peri_dist: f64) -> Ke /// * `mean_anomaly` - Mean anomaly, between 0 and 2 pi. /// * `peri_dist` - Perihelion Distance in AU, only used for parabolic orbits. /// +/// # Errors +/// Fails when eccentricity is not between 0 and 1, or peri dist is negative, or mean +/// anomaly is between 0 and 2 pi. pub fn compute_true_anomaly(ecc: f64, mean_anom: f64, peri_dist: f64) -> KeteResult { let ecc_anom = compute_eccentric_anomaly(ecc, mean_anom, peri_dist)?; @@ -125,6 +132,9 @@ pub fn compute_true_anomaly(ecc: f64, mean_anom: f64, peri_dist: f64) -> KeteRes /// * `true_anomaly` - true anomaly, between 0 and 2 pi. /// * `peri_dist` - Perihelion Distance in AU, only used for parabolic orbits. /// +/// # Errors +/// Fails when eccentricity is not between 0 and 1, or peri dist is negative, or mean +/// anomaly is between 0 and 2 pi. pub fn eccentric_anomaly_from_true(ecc: f64, true_anom: f64, peri_dist: f64) -> KeteResult { let ecc_anom = match ecc { ecc if !ecc.is_finite() => Err(Error::ValueError( @@ -283,6 +293,11 @@ fn solve_kepler_universal(mut dt: f64, r0: f64, v0: f64, rv0: f64) -> KeteResult /// * `pos` - Starting position, from the center of the sun, in AU. /// * `vel` - Starting velocity, in AU/Day. /// * `depth` - Current Recursion depth, this should be called with `None` initially. +/// +/// # Errors +/// Fails for a number of reasons, including: +/// - Input contains non-finite values +/// - Hitting recurusion depth limits. pub fn analytic_2_body( time: Duration, pos: &Vector3, @@ -305,28 +320,23 @@ pub fn analytic_2_body( let v0 = vel.norm(); let rv0 = pos.dot(vel); - if rv0.is_nan() || rv0.is_infinite() { + if !rv0.is_finite() { Err(Error::Convergence("Input included infinity or NAN.".into()))?; } - - match solve_kepler_universal(time, r0, v0, rv0) { - Ok((universal_s, beta)) => { - let g1 = g_1(universal_s, beta); - let g2 = g_2(universal_s, beta); - let f = 1.0 - GMS / r0 * g2; - let g = r0 * g1 + rv0 * g2; - let new_pos = pos * f + vel * g; - let new_r0 = new_pos.norm(); - let f_dot = -GMS / (new_r0 * r0) * g1; - let g_dot = 1.0 - (GMS / new_r0) * g2; - let new_vel = pos * f_dot + vel * g_dot; - Ok((new_pos, new_vel)) - } - Err(_) => { - let (inter_pos, inter_vel) = - analytic_2_body((0.5 * time).into(), pos, vel, Some(depth))?; - analytic_2_body((time * 0.5).into(), &inter_pos, &inter_vel, Some(depth)) - } + if let Ok((universal_s, beta)) = solve_kepler_universal(time, r0, v0, rv0) { + let g1 = g_1(universal_s, beta); + let g2 = g_2(universal_s, beta); + let f = 1.0 - GMS / r0 * g2; + let g = r0 * g1 + rv0 * g2; + let new_pos = pos * f + vel * g; + let new_r0 = new_pos.norm(); + let f_dot = -GMS / (new_r0 * r0) * g1; + let g_dot = 1.0 - (GMS / new_r0) * g2; + let new_vel = pos * f_dot + vel * g_dot; + Ok((new_pos, new_vel)) + } else { + let (inter_pos, inter_vel) = analytic_2_body((0.5 * time).into(), pos, vel, Some(depth))?; + analytic_2_body((time * 0.5).into(), &inter_pos, &inter_vel, Some(depth)) } } @@ -337,6 +347,9 @@ pub fn analytic_2_body( /// recommended to use a center of the Sun (10) for this computation. /// /// This is the fastest method for getting a relatively good estimate of the orbits. +/// +/// # Errors +/// Two body calculation may fail in extreme cases. pub fn propagate_two_body( state: &State, time_final: Time, @@ -380,6 +393,9 @@ impl CostFunction for MoidCost { /// Compute the MOID between two states in au. /// MOID = Minimum Orbital Intersection Distance +/// +/// # Errors +/// Can fail due to orbital element conversion errors, or failing to find minimum. pub fn moid(mut state_a: State, mut state_b: State) -> KeteResult { const N_STEPS: i32 = 50; @@ -389,12 +405,12 @@ pub fn moid(mut state_a: State, mut state_b: State) -> K state_b = propagate_two_body(&state_b, elements_b.peri_time)?; let state_a_step_size = match elements_a.orbital_period() { - p if p.is_finite() => p / N_STEPS as f64, - _ => 300.0 / N_STEPS as f64, + p if p.is_finite() => p / f64::from(N_STEPS), + _ => 300.0 / f64::from(N_STEPS), }; let state_b_step_size = match elements_b.orbital_period() { - p if p.is_finite() => p / N_STEPS as f64, - _ => 300.0 / N_STEPS as f64, + p if p.is_finite() => p / f64::from(N_STEPS), + _ => 300.0 / f64::from(N_STEPS), }; let mut states_b: Vec> = Vec::with_capacity(N_STEPS as usize); @@ -403,11 +419,11 @@ pub fn moid(mut state_a: State, mut state_b: State) -> K for idx in (-N_STEPS)..N_STEPS { states_a.push(propagate_two_body( &state_a, - (state_a.epoch.jd + idx as f64 * state_a_step_size).into(), + (state_a.epoch.jd + f64::from(idx) * state_a_step_size).into(), )?); states_b.push(propagate_two_body( &state_b, - (state_b.epoch.jd + idx as f64 * state_b_step_size).into(), + (state_b.epoch.jd + f64::from(idx) * state_b_step_size).into(), )?); } let mut best = (f64::INFINITY, state_a.clone(), state_b.clone()); @@ -429,8 +445,7 @@ pub fn moid(mut state_a: State, mut state_b: State) -> K let res = Executor::new(cost, solver) .configure(|state| state.max_iters(1000)) - .run() - .unwrap(); + .run()?; Ok(res.state().get_best_cost()) } @@ -464,7 +479,7 @@ mod tests { #[test] fn test_compute_eccentric_anom_hyperbolic() { for mean_anom in -100..100 { - let mean_anom = mean_anom as f64; + let mean_anom = f64::from(mean_anom); assert!( compute_eccentric_anomaly(2.0, mean_anom, 0.1).is_ok(), "Mean Anom: {mean_anom}" diff --git a/src/kete_core/src/propagation/mod.rs b/src/kete_core/src/propagation/mod.rs index d88d7f8..084fec3 100644 --- a/src/kete_core/src/propagation/mod.rs +++ b/src/kete_core/src/propagation/mod.rs @@ -65,49 +65,11 @@ pub use radau::RadauIntegrator; pub use runge_kutta::RK45Integrator; pub use state_transition::compute_state_transition; -/// Using the Radau 15th order integrator, integrate the position and velocity of an -/// object assuming two body mechanics with a central object located at 0, 0 with the -/// mass of the sun. -/// -/// This is primarily intended for testing the numerical integrator, it is strongly -/// recommended to use the Kepler analytic equation to do actual two body calculations. -pub fn propagate_two_body_radau(dt: f64, pos: &[f64; 3], vel: &[f64; 3]) -> ([f64; 3], [f64; 3]) { - let res = RadauIntegrator::integrate( - ¢ral_accel, - Vector3::from(*pos), - Vector3::from(*vel), - 0.0.into(), - dt.into(), - CentralAccelMeta::default(), - ) - .unwrap(); - (res.0.into(), res.1.into()) -} - -/// Propagate the state of an object, only considering linear motion. -/// -/// This is a very poor approximation over more than a few minutes/hours, however it -/// is very fast. -pub fn propagate_linear( - state: &State, - jd_final: Time, -) -> KeteResult> { - let dt = (jd_final - state.epoch).elapsed; - let mut pos: Vector3 = state.pos.into(); - pos.iter_mut() - .zip(state.vel) - .for_each(|(p, v)| *p += v * dt); - - Ok(State::new( - state.desig.clone(), - jd_final, - pos.into(), - state.vel, - state.center_id, - )) -} - /// Propagate an object using full N-Body physics with the Radau 15th order integrator. +/// +/// # Errors +/// Propagation may fail for a number of reasons, including missing SPK data, +/// integration near singularities, or impacts. pub fn propagate_n_body_spk( mut state: State, jd_final: Time, @@ -115,7 +77,7 @@ pub fn propagate_n_body_spk( non_grav_model: Option, ) -> KeteResult> { let center = state.center_id; - let spk = &LOADED_SPK.try_read().unwrap(); + let spk = &LOADED_SPK.try_read()?; spk.try_change_center(&mut state, 0)?; let mass_list = { @@ -143,7 +105,7 @@ pub fn propagate_n_body_spk( )? }; - let mut new_state = State::new(state.desig.to_owned(), jd_final, pos.into(), vel.into(), 0); + let mut new_state = State::new(state.desig.clone(), jd_final, pos.into(), vel.into(), 0); spk.try_change_center(&mut new_state, center)?; Ok(new_state) } @@ -173,6 +135,10 @@ fn picard_two_body_init( } /// Propagate an object using full N-Body physics with the Radau 15th order integrator. +/// +/// # Errors +/// Propagation may fail for a number of reasons, including missing SPK data, +/// integration near singularities, or impacts. pub fn propagate_picard_n_body_spk( mut state: State, jd_final: Time, @@ -180,7 +146,7 @@ pub fn propagate_picard_n_body_spk( non_grav_model: Option, ) -> KeteResult> { let center = state.center_id; - let spk = &LOADED_SPK.try_read().unwrap(); + let spk = &LOADED_SPK.try_read()?; spk.try_change_center(&mut state, 0)?; let mass_list = { @@ -222,37 +188,20 @@ pub fn propagate_picard_n_body_spk( let pos: Vector3 = final_state_vec.fixed_rows::<3>(0).into(); let vel: Vector3 = final_state_vec.fixed_rows::<3>(3).into(); - let mut new_state = State::new(state.desig.to_owned(), jd_final, pos.into(), vel.into(), 0); + let mut new_state = State::new(state.desig.clone(), jd_final, pos.into(), vel.into(), 0); spk.try_change_center(&mut new_state, center)?; Ok(new_state) } -/// Propagate an object using two body mechanics. -/// This is a brute force way to solve the kepler equations of motion as it uses Radau -/// as an integrator. -/// -/// It is *strongly recommended* to use the `kepler.rs` code for this, as -/// it will be much more computationally efficient. -pub fn propagation_central( - state: &State, - jd_final: Time, -) -> KeteResult<[[f64; 3]; 2]> { - let pos: Vector3 = state.pos.into(); - let vel: Vector3 = state.vel.into(); - let (pos, vel, _meta) = RadauIntegrator::integrate( - ¢ral_accel, - pos, - vel, - state.epoch, - jd_final, - CentralAccelMeta::default(), - )?; - Ok([pos.into(), vel.into()]) -} - /// Propagate using n-body mechanics but skipping SPK queries. /// This will propagate all planets and the Moon, so it may vary from SPK states slightly. -#[allow(clippy::type_complexity, reason = "Not practical to avoid this")] +/// +/// # Errors +/// Propagation may fail for a number of reasons, including lacking SPK information, +/// numerical singularities, or slow convergence of the integrator. +/// +/// # Panics +/// Panics if planet states not provided, and cannot find the state in loaded SPKs. pub fn propagate_n_body_vec( states: Vec>, jd_final: Time, @@ -271,14 +220,15 @@ pub fn propagate_n_body_vec( ))?; } + #[allow(clippy::missing_panics_doc, reason = "not possible by construction.")] let jd_init = states.first().unwrap().epoch; let mut pos: Vec = Vec::new(); let mut vel: Vec = Vec::new(); let mut desigs: Vec = Vec::new(); + let spk = &LOADED_SPK.try_read()?; let planet_states = planet_states.unwrap_or_else(|| { - let spk = &LOADED_SPK.try_read().unwrap(); let mut planet_states = Vec::new(); for obj in GravParams::simplified_planets() { let planet = spk diff --git a/src/kete_core/src/propagation/nongrav.rs b/src/kete_core/src/propagation/nongrav.rs index 0a89b58..1f7cf31 100644 --- a/src/kete_core/src/propagation/nongrav.rs +++ b/src/kete_core/src/propagation/nongrav.rs @@ -94,6 +94,7 @@ impl NonGravModel { /// Construct a new non-grav model, manually specifying all parameters. /// Consider using the other constructors if this is a simple object. #[allow(clippy::too_many_arguments, reason = "Not practical to avoid this")] + #[must_use] pub fn new_jpl( a1: f64, a2: f64, @@ -119,11 +120,13 @@ impl NonGravModel { } /// Construct a new non-grav dust model. + #[must_use] pub fn new_dust(beta: f64) -> Self { Self::Dust { beta } } /// Construct a new non-grav model which follows the default comet drop-off. + #[must_use] pub fn new_jpl_comet_default(a1: f64, a2: f64, a3: f64) -> Self { Self::JplComet { a1, @@ -140,6 +143,9 @@ impl NonGravModel { /// Compute the non-gravitational acceleration vector when provided the position /// and velocity vector with respect to the sun. + /// + /// # Panics + /// Panics when two body propagation fails. #[inline(always)] pub fn add_acceleration( &self, @@ -176,7 +182,7 @@ impl NonGravModel { if !dt.is_zero() { (pos, _) = analytic_2_body((-dt).into(), &pos, vel, None).unwrap(); - }; + } let rr0 = pos.norm() / r_0; let scale = alpha * rr0.powf(-m) * (1.0 + rr0.powf(*n)).powf(-k); *accel += pos_norm * (scale * a1); diff --git a/src/kete_core/src/propagation/picard.rs b/src/kete_core/src/propagation/picard.rs index 1160daf..641c412 100644 --- a/src/kete_core/src/propagation/picard.rs +++ b/src/kete_core/src/propagation/picard.rs @@ -68,6 +68,7 @@ type PicardInitFunc<'a, const DIM: usize, const N: usize> = &'a dyn Fn(&[Time; N], &SVector) -> SMatrix; /// Initialization function for the picard integrator where the first value is repeated +#[must_use] pub fn dumb_picard_init( _times: &[Time; N], initial_pos: &SVector, @@ -170,7 +171,10 @@ impl<'a, const N: usize, const NM1: usize> PicardIntegrator { /// # Returns /// /// Returns the expected state of the system at the final time step `t1`. - /// + /// + /// # Errors + /// Integration can fail for a number of reasons, including convergence failing or + /// function evals failing. pub fn integrate( &self, func: FirstOrderODE<'a, MType, DIM>, @@ -184,6 +188,9 @@ impl<'a, const N: usize, const NM1: usize> PicardIntegrator { let t0 = t0.jd; let t1 = t1.jd; let mut cur_t0 = t0; + if (t0 - t1).abs() < 10.0 * f64::EPSILON { + return Ok(initial_pos); + } let mut cur_stepsize = step_size; let mut cur_t1 = t0 + cur_stepsize; @@ -314,7 +321,7 @@ impl<'a, const N: usize, const NM1: usize> PicardIntegrator { /// times where the function will be evaluated starting at t0 and ending at t1. fn evaluation_times(&self, t0: Time, t1: Time) -> [Time; N] { let w2 = (t1 - t0).elapsed / 2.0; - let w1 = (t1.jd + t0.jd) / 2.0; + let w1 = f64::midpoint(t1.jd, t0.jd); let mut times: [Time; N] = [0.0.into(); N]; times .iter_mut() @@ -327,12 +334,15 @@ impl<'a, const N: usize, const NM1: usize> PicardIntegrator { impl PicardStep { /// Evaluate the fitted integration solution at the specified time. /// This will fail if the requested time is outside of the integration bounds. + /// + /// # Errors + /// Evaluation may fail if ``t`` is outside of bounds. pub fn evaluate(&self, t: f64) -> KeteResult<[f64; DIM]> { let w1 = (self.t0.jd + self.t1.jd) * 0.5; let w2 = (self.t1 - self.t0).elapsed * 0.5; let tau_time = ((t - w1) * w2).acos(); if tau_time.is_nan() { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "Queried time it outside of the fitted time span".into(), )); } @@ -342,17 +352,17 @@ impl PicardStep { impl Default for PicardIntegrator { fn default() -> Self { - // NM1 must be equal to N-1, I wish I could do this with const generics - // but that would require switching to nightly rust and I wont do that. - // So this compile time check is done instead. - const { assert!(N - 1 == NM1, "NM1 must be 1 less than N.") }; - // Combining the cos(k*arccos(-cos(...))) simplifies to this equation // This is used throughout the integrator's construction fn cheb_t(k: f64, j: f64, n: f64) -> f64 { (k * (j + n - 1.0) * PI / (n - 1.0)).cos() } + // NM1 must be equal to N-1, I wish I could do this with const generics + // but that would require switching to nightly rust and I wont do that. + // So this compile time check is done instead. + const { assert!(N - 1 == NM1, "NM1 must be 1 less than N.") }; + let n = N as f64; // Construct the C matrix @@ -390,6 +400,7 @@ impl Default for PicardIntegrator { // Now we construct the SA Vector, in the original paper they keep S as a // separate vector and multiply it against A over and over, whereas we are just // doing it once and saving it. + #[allow(clippy::cast_possible_wrap, reason = "cast cant fail.")] let s: SVector = SVector::from_iterator((0..N).map(|idx| 2.0 * (-1_f64).powi(idx as i32))); @@ -450,6 +461,7 @@ mod tests { // This has the analytic solution of f=k * exp(-c*t) // define the f' function + #[allow(clippy::unnecessary_wraps, reason = "Required for integrator")] fn func( _: Time, vals: &SVector, diff --git a/src/kete_core/src/propagation/radau.rs b/src/kete_core/src/propagation/radau.rs index bde6783..9e67221 100644 --- a/src/kete_core/src/propagation/radau.rs +++ b/src/kete_core/src/propagation/radau.rs @@ -34,8 +34,8 @@ use crate::prelude::KeteResult; use crate::propagation::util::SecondOrderODE; use crate::time::{TDB, Time}; use itertools::izip; +use nalgebra::Matrix; use nalgebra::allocator::Allocator; -use nalgebra::*; use nalgebra::{DefaultAllocator, Dim, OMatrix, OVector, RowSVector, SMatrix, SVector, U1, U7}; /// Integrator will return a result of this type. @@ -174,6 +174,10 @@ where } /// Integrate the functions from the initial time to the final time. + /// + /// # Errors + /// Integration may fail for a number of reasons, either the function fails, or + /// convergence of the integrator fails. pub fn integrate( func: SecondOrderODE<'a, MType, D>, state_init: OVector, @@ -218,9 +222,12 @@ where step_failures = 0; } Err(error) => match error { - Error::Impact(_, _) => Err(error)?, - Error::ExceedsLimits(_) => Err(error)?, - _ => { + Error::Bounds(_) | Error::Impact(_, _) => Err(error)?, + Error::Convergence(_) + | Error::ValueError(_) + | Error::UnknownFrame(_) + | Error::IOError(_) + | Error::LockFailed => { step_failures += 1; next_step_size *= 0.7; if step_failures > 10 { @@ -249,6 +256,7 @@ where for _ in 0..10 { self.b_scratch.set_column(0, &self.cur_b.column(6)); // Calculate b and g + #[allow(clippy::cast_possible_wrap, reason = "idx does not exceed 8")] for (idj, gauss_radau_frac) in GAUSS_RADAU_SPACINGS.iter().enumerate().skip(1) { // the sample point at the Guass-Radau spacings. // Update each parameter using the current B as a guess to estimate the diff --git a/src/kete_core/src/propagation/runge_kutta.rs b/src/kete_core/src/propagation/runge_kutta.rs index e5853a5..f2cec1e 100644 --- a/src/kete_core/src/propagation/runge_kutta.rs +++ b/src/kete_core/src/propagation/runge_kutta.rs @@ -141,6 +141,10 @@ impl<'a, MType, const D: usize> RK45Integrator<'a, MType, D> { /// This performs integration on first order Initial Value Problems (IVP). /// /// This is a relatively fast, but not very precise numerical integration method. + /// + /// # Errors + /// Integration can fail for a number of reasons, including convergence failing or + /// function evals failing. pub fn integrate( func: FirstOrderODE<'a, MType, D>, state_init: SVector, @@ -185,6 +189,8 @@ mod tests { #[test] fn basic_exp() { + #[allow(clippy::unnecessary_wraps, reason = "doesnt matter")] + #[allow(clippy::trivially_copy_pass_by_ref, reason = "doesnt matter")] fn f( _t: Time, state: &Vector1, @@ -199,17 +205,19 @@ mod tests { &f, Vector1::::repeat(1_f64), 0.0.into(), - (step as f64 * 10.0).into(), + (f64::from(step) * 10.0).into(), (), 1e-14, ); assert!(res.is_ok()); - assert!((res.unwrap().0[0] - (-step as f64 * 10.0).exp()).abs() < 1e-14); + assert!((res.unwrap().0[0] - (-f64::from(step) * 10.0).exp()).abs() < 1e-14); } } #[test] fn basic_line() { + #[allow(clippy::unnecessary_wraps, reason = "doesnt matter")] + #[allow(clippy::trivially_copy_pass_by_ref, reason = "doesnt matter")] fn f( _t: Time, _state: &Vector1, diff --git a/src/kete_core/src/propagation/state_transition.rs b/src/kete_core/src/propagation/state_transition.rs index 5083849..5c4b214 100644 --- a/src/kete_core/src/propagation/state_transition.rs +++ b/src/kete_core/src/propagation/state_transition.rs @@ -82,11 +82,14 @@ fn stm_ivp_eqn( /// Compute a state transition matrix assuming only 2-body mechanics. /// /// This uses the Picard-Chebyshev integrator [`PC15`]. +/// +/// # Errors +/// Error is returned if convergence fails. pub fn compute_state_transition( state: &mut State, jd: Time, central_mass: f64, -) -> ([[f64; 3]; 2], Matrix6) { +) -> KeteResult<([[f64; 3]; 2], Matrix6)> { let mut meta = CentralAccelMeta { mass_scaling: central_mass, ..Default::default() @@ -107,17 +110,15 @@ pub fn compute_state_transition( .set_column(0, &(Vector3::from(state.vel) / GMS_SQRT)); let integrator = &PC15; - let rad = integrator - .integrate( - &stm_ivp_eqn, - &dumb_picard_init, - initial_state, - (state.epoch.jd * GMS_SQRT).into(), - (jd.jd * GMS_SQRT).into(), - 1.0, - &mut meta, - ) - .unwrap(); + let rad = integrator.integrate( + &stm_ivp_eqn, + &dumb_picard_init, + initial_state, + (state.epoch.jd * GMS_SQRT).into(), + (jd.jd * GMS_SQRT).into(), + 1.0, + &mut meta, + )?; let vec_reshape = rad .fixed_rows::<36>(6) @@ -138,11 +139,11 @@ pub fn compute_state_transition( ); let scaling_b = Matrix6::::from_diagonal(&[1.0, 1.0, 1.0, GMS_SQRT, GMS_SQRT, GMS_SQRT].into()); - ( + Ok(( [ rad.fixed_rows::<3>(0).into(), (rad.fixed_rows::<3>(3) * GMS_SQRT).into(), ], scaling_a * vec_reshape * scaling_b, - ) + )) } diff --git a/src/kete_core/src/simult_states.rs b/src/kete_core/src/simult_states.rs index bcd36b1..557e28e 100644 --- a/src/kete_core/src/simult_states.rs +++ b/src/kete_core/src/simult_states.rs @@ -62,16 +62,20 @@ impl SimultaneousStates { /// Create a new Exact `SimultaneousStates` /// Simultaneous States occur at the same JD, which is defined by either the time /// in the optional fov, or the time of the first state. + /// + /// # Errors + /// + /// [`Error::ValueError`] possible for multiple reasons: + /// - Input vector must contain at least one state. + /// - Center ids of all states must match. + /// - Epoch times must match unless an FOV is provided. pub fn new_exact(states: Vec>, fov: Option) -> KeteResult { - if states.is_empty() { + let Some(state) = states.first() else { return Err(Error::ValueError( "SimultaneousStates must contain at least one state.".into(), )); - } - let (mut jd, center_id) = { - let first = states.first().unwrap(); - (first.epoch, first.center_id) }; + let (mut jd, center_id) = (state.epoch, state.center_id); if let Some(f) = &fov { jd = f.observer().epoch; @@ -79,14 +83,14 @@ impl SimultaneousStates { if states.iter().any(|state| state.center_id != center_id) { return Err(Error::ValueError("Center IDs do not match expected".into())); - }; + } if fov.is_none() && states.iter().any(|state| state.epoch != jd) { return Err(Error::ValueError( "Epoch JDs do not match expected, this is only allowed if there is an associated FOV." .into(), )); - }; + } Ok(Self { states, @@ -102,13 +106,18 @@ impl SimultaneousStates { /// are automatically cast into the equatorial frame. /// The returned RA rate is scaled by cos(dec) so that it is equivalent to a /// linear projection onto the observing plane. + /// + /// # Errors + /// + /// [`Error::ValueError`] possible for multiple reasons: + /// - FOV is not specified. + /// - Center id of the FOV state does not match the center ID of the states. pub fn ra_dec_with_rates(&self) -> KeteResult> { - if self.fov.is_none() { + let Some(fov) = &self.fov else { return Err(Error::ValueError( "Field of view must be specified for the ra/dec to be computed.".into(), )); - } - let fov = self.fov.as_ref().unwrap(); + }; let obs = fov.observer(); diff --git a/src/kete_core/src/spice/ck.rs b/src/kete_core/src/spice/ck.rs index aca7d35..e06cb4d 100644 --- a/src/kete_core/src/spice/ck.rs +++ b/src/kete_core/src/spice/ck.rs @@ -81,7 +81,7 @@ impl CkCollection { /// Get the closest record to the given JD for the specified instrument ID. /// /// # Errors - /// [`Error::ExceedsLimits`] if the instrument ID does not have a record for the + /// [`crate::prelude::Error::Bounds`] if the instrument ID does not have a record for the /// target JD. /// pub fn try_get_frame( @@ -90,7 +90,7 @@ impl CkCollection { instrument_id: i32, ) -> KeteResult<(Time, NonInertialFrame)> { let time = Time::::new(jd); - let sclk = LOADED_SCLK.try_read().unwrap(); + let sclk = LOADED_SCLK.try_read()?; let spice_id = instrument_id / 1000; let tick = sclk.try_time_to_tick(spice_id, time)?; @@ -101,12 +101,13 @@ impl CkCollection { } } - Err(Error::ExceedsLimits(format!( + Err(Error::Bounds(format!( "Instrument ({instrument_id}) does not have an CK record for the target JD." )))? } /// Return a list of all loaded instrument ids. + #[must_use] pub fn loaded_instruments(&self) -> Vec { self.segments .iter() @@ -122,6 +123,7 @@ impl CkCollection { } /// For a given NAIF ID, return all increments of time which are currently loaded. + #[must_use] pub fn available_info(&self, instrument_id: i32) -> Vec<(i32, i32, i32, f64, f64)> { self.segments .iter() diff --git a/src/kete_core/src/spice/ck_segments.rs b/src/kete_core/src/spice/ck_segments.rs index 6881125..c1fd545 100644 --- a/src/kete_core/src/spice/ck_segments.rs +++ b/src/kete_core/src/spice/ck_segments.rs @@ -49,7 +49,7 @@ impl CkSegment { ) -> KeteResult<(Time, NonInertialFrame)> { let arr_ref: &CkArray = self.into(); if arr_ref.instrument_id != instrument_id { - return Err(Error::ExceedsLimits(format!( + return Err(Error::Bounds(format!( "Instrument ID is not present in this record. {}", arr_ref.instrument_id ))); @@ -138,7 +138,7 @@ impl CkSegmentType2 { ) -> KeteResult<(Time, NonInertialFrame)> { let sclk = LOADED_SCLK .try_read() - .map_err(|_| Error::ExceedsLimits("Failed to read SCLK data.".into()))?; + .map_err(|_| Error::Bounds("Failed to read SCLK data.".into()))?; let tick = sclk.try_time_to_tick(self.array.naif_id, time)?; // get the time of the last record and its index @@ -165,13 +165,15 @@ impl CkSegmentType2 { let dt = tick - record_time; if dt < 0.0 { - return Err(Error::ExceedsLimits(format!( + return Err(Error::Bounds(format!( "Requested time {record_idx} is before the start of the segment." ))); } let mut rotation = Unit::from_quaternion(quaternion).to_rotation_matrix(); - accel_vec.iter_mut().for_each(|x| *x *= 86400.0 * dt * rate); + for x in &mut accel_vec { + *x *= 86400.0 * dt * rate; + } let rates = Rotation3::from_scaled_axis(accel_vec.into()); rotation *= rates; @@ -204,13 +206,13 @@ impl TryFrom for CkSegmentType2 { dir_size = n_records / 100; if array_len != (n_records * 10 + dir_size) { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "CK File is not formatted correctly, directory size of segments appear incorrect." .into(), )); } if n_records == 0 { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "CK File does not contain any records.".into(), )); } @@ -261,7 +263,7 @@ impl CkSegmentType3 { .data .get_unchecked(idx * self.rec_size..(idx + 1) * self.rec_size); Type3RecordView { - quaternion: &rec[..4], + quaternion: rec[..4].try_into().unwrap_unchecked(), accel: &rec[4..], } } @@ -323,7 +325,9 @@ impl CkSegmentType3 { let (time, quaternion, accel) = self.get_quaternion_at_time(time)?; let mut rates: [f64; 3] = accel.unwrap_or_default(); - rates.iter_mut().for_each(|x| *x *= 86400.0); + for x in &mut rates { + *x *= 86400.0; + } let rotation_rate = Rotation3::from_scaled_axis(rates.into()); let frame = NonInertialFrame::from_rotations( @@ -348,7 +352,7 @@ impl CkSegmentType3 { ) -> KeteResult<(Time, UnitQuaternion, Option<[f64; 3]>)> { let sclk = LOADED_SCLK .try_read() - .map_err(|_| Error::ExceedsLimits("Failed to read SCLK data.".into()))?; + .map_err(|_| Error::Bounds("Failed to read SCLK data.".into()))?; let tick = sclk.try_time_to_tick(self.array.naif_id, time)?; // If there is only one record, return it immediately. @@ -401,19 +405,19 @@ impl CkSegmentType3 { } struct Type3RecordView<'a> { - quaternion: &'a [f64], + quaternion: &'a [f64; 4], accel: &'a [f64], } impl From> for (Quaternion, Option<[f64; 3]>) { fn from(record: Type3RecordView<'_>) -> Self { - let quaternion: [f64; 4] = record.quaternion.try_into().unwrap(); - let quaternion = - Quaternion::new(quaternion[0], quaternion[1], quaternion[2], quaternion[3]); - let accel = if record.accel.is_empty() { - None - } else { - Some(record.accel.try_into().unwrap()) + let quaternion = match record.quaternion { + &[a, b, c, d] => Quaternion::new(a, b, c, d), + }; + + let accel = match record.accel { + &[a, b, c] => Some([a, b, c]), + _ => None, }; (quaternion, accel) } @@ -422,17 +426,21 @@ impl From> for (Quaternion, Option<[f64; 3]>) { impl TryFrom for CkSegmentType3 { type Error = Error; + #[allow( + clippy::cast_sign_loss, + reason = "cast should work except when file is incorrectly formatted" + )] fn try_from(array: CkArray) -> Result { let n_records = array.daf[array.daf.len() - 1] as usize; let n_intervals = array.daf[array.daf.len() - 2] as usize; if n_records == 0 { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "CK File does not contain any records.".into(), )); } if n_intervals == 0 { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "CK File does not contain any intervals of records.".into(), )); } @@ -455,7 +463,7 @@ impl TryFrom for CkSegmentType3 { expected_size += time_dir_size + interval_dir_size; if expected_size != array.daf.len() { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "CK File not formatted correctly. Number of records found in file don't match expected." .into(), )); diff --git a/src/kete_core/src/spice/daf.rs b/src/kete_core/src/spice/daf.rs index 516f4f3..52b162f 100644 --- a/src/kete_core/src/spice/daf.rs +++ b/src/kete_core/src/spice/daf.rs @@ -70,13 +70,19 @@ pub enum DAFType { Ck, } -impl From<&str> for DAFType { - fn from(magic: &str) -> Self { - match &magic.to_uppercase()[4..7] { - "SPK" => Self::Spk, - "PCK" => Self::Pck, - "CK " => Self::Ck, - other => Self::Unrecognized(other.as_bytes().try_into().unwrap()), +impl TryFrom<&str> for DAFType { + type Error = Error; + + fn try_from(magic: &str) -> KeteResult { + match magic + .to_uppercase() + .get(4..7) + .ok_or(Error::IOError("DAF Magic number not long enough".into()))? + { + "SPK" => Ok(Self::Spk), + "PCK" => Ok(Self::Pck), + "CK " => Ok(Self::Ck), + other => Ok(Self::Unrecognized(other.as_bytes().try_into().unwrap())), } } } @@ -131,15 +137,21 @@ pub struct DafFile { impl DafFile { /// Try to load a single record from the DAF. + /// + /// # Errors + /// [`Error::IOError`] if a seek fails in the file. pub fn try_load_record(file: &mut T, idx: u64) -> KeteResult> { let _ = file.seek(std::io::SeekFrom::Start(1024 * (idx - 1)))?; read_bytes_exact(file, 1024) } /// Load the contents of a DAF file. + /// + /// # Errors + /// Fails if there are read errors, or the file is incorrect in some way. pub fn from_buffer(mut buffer: T) -> KeteResult { let bytes = Self::try_load_record(&mut buffer, 1)?; - let daf_type: DAFType = bytes_to_string(&bytes[0..8]).as_str().into(); + let daf_type: DAFType = bytes_to_string(&bytes[0..8]).as_str().try_into()?; let little_endian = match bytes_to_string(&bytes[88..96]).to_lowercase().as_str() { "ltl-ieee" => true, @@ -155,7 +167,7 @@ impl DafFile { // record index of the first summary record in the file // records are 1024 long, and 1 indexed because fortran. - let init_summary_record_index = bytes_to_i32(&bytes[76..80], little_endian)?; + let init_summary_record_index = bytes_to_i32(&bytes[76..80], little_endian)?.abs(); // the following values are not used, so are not stored. let internal_desc = bytes_to_string(&bytes[16..76]); @@ -168,6 +180,7 @@ impl DafFile { // so read the next (init_summary_record_index-2) records: // -1 for fortran indexing // -1 for having already read a single record + #[allow(clippy::cast_sign_loss, reason = "known to be positive")] let mut comments: Vec = Vec::with_capacity(init_summary_record_index as usize - 2); for _ in 0..(init_summary_record_index - 2) { // TODO: Check if the 1000 character limit is what other formats use. @@ -195,6 +208,9 @@ impl DafFile { } /// Load DAF file from the specified filename. + /// + /// # Errors + /// Fails if there is a read error or parsing error. pub fn from_file(filename: &str) -> KeteResult { let mut file = std::fs::File::open(filename)?; let mut buffer = Vec::new(); @@ -204,9 +220,19 @@ impl DafFile { } /// Load all [`DafArray`] segments from the DAF file. - /// These are tuples containing a series of f64s and i32s along with arrays of data. - /// The meaning of these values depends on the particular implementation of the DAF. + /// These are tuples containing a series of f64s and i32s along with arrays of + /// data. + /// The meaning of these values depends on the particular implementation of the + /// DAF. + /// + /// # Errors + /// Can fail if file fails to read or byte conversion fails (file incorrectly + /// formatted). /// + #[allow( + clippy::cast_sign_loss, + reason = "cast should work except when file is incorrectly formatted" + )] pub fn try_load_arrays(&mut self, file: &mut T) -> KeteResult<()> { let summary_size = self.n_doubles + (self.n_ints + 1) / 2; @@ -272,7 +298,14 @@ impl Debug for DafArray { } impl DafArray { - /// Try to load an DAF array from summary data + /// Try to load an DAF array from summary data. + /// + /// # Errors + /// Fails when a file is either incorrectly formatted, or a read error occurs. + #[allow( + clippy::cast_sign_loss, + reason = "cast should work except when file is incorrectly formatted" + )] pub fn try_load_array( buffer: &mut T, summary_floats: Box<[f64]>, @@ -310,11 +343,13 @@ impl DafArray { } /// Total length of the array. + #[must_use] pub fn len(&self) -> usize { self.data.len() } /// Test if array is empty. + #[must_use] pub fn is_empty(&self) -> bool { self.data.is_empty() } @@ -360,6 +395,7 @@ pub struct SpkArray { impl SpkArray { /// Is the specified JD within the range of this array. + #[must_use] pub fn contains(&self, jd: Time) -> bool { let jds = jd_to_spice_jd(jd); (jds >= self.jds_start) && (jds <= self.jds_end) @@ -431,6 +467,7 @@ pub struct PckArray { impl PckArray { /// Is the specified JD within the range of this array. + #[must_use] pub fn contains(&self, jd: Time) -> bool { let jds = jd_to_spice_jd(jd); (jds >= self.jds_start) && (jds <= self.jds_end) @@ -507,6 +544,7 @@ pub struct CkArray { impl CkArray { /// Is the specified SCLK tick within the range of this array. + #[must_use] pub fn contains(&self, tick: f64) -> bool { (tick >= self.tick_start) && (tick <= self.tick_end) } diff --git a/src/kete_core/src/spice/interpolation.rs b/src/kete_core/src/spice/interpolation.rs index cf9fa14..7a5b245 100644 --- a/src/kete_core/src/spice/interpolation.rs +++ b/src/kete_core/src/spice/interpolation.rs @@ -242,7 +242,7 @@ pub(crate) fn lagrange_interpolation(x: &[f64], y: &mut [f64], eval_time: f64) - } let deg = x.len() - 1; let mut val = y[deg]; - for k in 1..deg + 1 { + for k in 1..=deg { val = y[deg - k] + (eval_time - x[deg - k]) * val; } val @@ -258,7 +258,7 @@ mod tests { let y = times.clone(); for v in 0..100 { - let eval_time = (v as f64) / 100. * 9.0; + let eval_time = f64::from(v) / 100. * 9.0; let interp = lagrange_interpolation(×, &mut y.clone(), eval_time); assert!((interp - eval_time).abs() < 1e-12); } @@ -269,7 +269,7 @@ mod tests { .collect(); for v in 0..100 { - let x = (v as f64) / 100. * 9.0; + let x = f64::from(v) / 100. * 9.0; let expected = x + 1.75 * x.powi(2) - 3.0 * x.powi(3) - 11.0 * x.powi(4); let interp = lagrange_interpolation(×, &mut y1.clone(), x); assert!( diff --git a/src/kete_core/src/spice/naif_ids.rs b/src/kete_core/src/spice/naif_ids.rs index 15bc1de..a8395e2 100644 --- a/src/kete_core/src/spice/naif_ids.rs +++ b/src/kete_core/src/spice/naif_ids.rs @@ -87,10 +87,8 @@ pub fn try_name_from_id(id: i32) -> Option { /// /// This does a partial string match, case insensitive. pub fn naif_ids_from_name(name: &str) -> Vec { - // this should be re-written to be simpler - let desigs: Vec = NAIF_IDS.iter().map(|n| n.name.to_lowercase()).collect(); - let desigs: Vec<&str> = desigs.iter().map(String::as_str).collect(); - partial_str_match(&name.to_lowercase(), &desigs) + let desigs: Vec<&str> = NAIF_IDS.iter().map(|n| n.name.as_str()).collect(); + partial_str_match(name, &desigs) .into_iter() .map(|(i, _)| NAIF_IDS[i].clone()) .collect() diff --git a/src/kete_core/src/spice/pck.rs b/src/kete_core/src/spice/pck.rs index 94df098..a5ce7eb 100644 --- a/src/kete_core/src/spice/pck.rs +++ b/src/kete_core/src/spice/pck.rs @@ -57,6 +57,9 @@ pub struct PckCollection { impl PckCollection { /// Given an PCK filename, load all the segments present inside of it. /// These segments are added to the PCK singleton in memory. + /// + /// # Errors + /// May fail if there are IO or Parsing errors. pub fn load_file(&mut self, filename: &str) -> KeteResult<()> { let file = DafFile::from_file(filename)?; if !matches!(file.daf_type, DAFType::Pck) { @@ -75,15 +78,18 @@ impl PckCollection { /// Get the raw orientation from the loaded PCK files. /// This orientation will have the frame of what was originally present in the file. + /// + /// # Errors + /// Fails when the specified ID is not found in known segments. pub fn try_get_orientation(&self, id: i32, jd: Time) -> KeteResult { - for segment in self.segments.iter() { + for segment in &self.segments { let array: &PckArray = segment.into(); if (array.frame_id == id) & array.contains(jd) { return segment.try_get_orientation(id, jd); } } - Err(Error::ExceedsLimits(format!( + Err(Error::Bounds(format!( "Object ({id}) does not have an PCK record for the target JD." )))? } @@ -95,6 +101,7 @@ impl PckCollection { /// Return a list of all loaded segments in the PCK singleton. /// This is a list of the center NAIF IDs of the segments. + #[must_use] pub fn loaded_objects(&self) -> Vec { let loaded: HashSet = self .segments @@ -105,31 +112,40 @@ impl PckCollection { } /// Load the core files. + /// + /// # Errors + /// May fail if there are IO or Parsing errors. pub fn load_core(&mut self) -> KeteResult<()> { let cache = cache_path("kernels/core")?; - self.load_directory(cache)?; + self.load_directory(&cache)?; Ok(()) } /// Load files in the cache directory. + /// + /// # Errors + /// May fail if there are IO or Parsing errors. pub fn load_cache(&mut self) -> KeteResult<()> { let cache = cache_path("kernels")?; - self.load_directory(cache)?; + self.load_directory(&cache)?; Ok(()) } /// Load all PCK files from a directory. - pub fn load_directory(&mut self, directory: String) -> KeteResult<()> { - fs::read_dir(&directory)?.for_each(|entry| { - let entry = entry.unwrap(); - let path = entry.path(); - if path.is_file() { - let filename = path.to_str().unwrap(); - if filename.to_lowercase().ends_with(".bpc") - && let Err(err) = self.load_file(filename) - { - eprintln!("Failed to load PCK file {filename}: {err}"); - } + /// + /// # Errors + /// This only fails when there is a file IO error. When individual files fail to load, + /// ``eprintln`` is used, but loading will continue. + pub fn load_directory(&mut self, directory: &str) -> KeteResult<()> { + fs::read_dir(directory)?.for_each(|entry| { + // rust is crazy sometimes + if let Ok(entry) = entry + && entry.path().is_file() + && let Some(filename) = entry.path().to_str() + && filename.to_lowercase().ends_with(".bpc") + && let Err(err) = self.load_file(filename) + { + eprintln!("Failed to load PCK file {filename}: {err}"); } }); Ok(()) diff --git a/src/kete_core/src/spice/pck_segments.rs b/src/kete_core/src/spice/pck_segments.rs index 145290a..7d2ff74 100644 --- a/src/kete_core/src/spice/pck_segments.rs +++ b/src/kete_core/src/spice/pck_segments.rs @@ -43,7 +43,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::jd_to_spice_jd; -use super::{PckArray, interpolation::*}; +use super::{PckArray, interpolation::chebyshev_evaluate_both}; use crate::errors::Error; use crate::frames::NonInertialFrame; use crate::prelude::KeteResult; @@ -96,7 +96,7 @@ impl PckSegment { let arr_ref: &PckArray = self.into(); if center_id != arr_ref.frame_id { - Err(Error::ExceedsLimits( + Err(Error::Bounds( "Center ID is not present in this record.".into(), ))?; } @@ -104,9 +104,7 @@ impl PckSegment { let jds = jd_to_spice_jd(epoch); if jds < arr_ref.jds_start || jds > arr_ref.jds_end { - Err(Error::ExceedsLimits( - "JD is not present in this record.".into(), - ))?; + Err(Error::Bounds("JD is not present in this record.".into()))?; } if arr_ref.reference_frame_id != 17 { Err(Error::ValueError(format!( @@ -159,6 +157,10 @@ impl PckSegmentType2 { // Rate of change for each of these values can be calculated by using the // derivative of chebyshev of the first kind, which is done below. let jds_start = self.array.jds_start; + #[allow( + clippy::cast_sign_loss, + reason = "safe as long as file is correctly formatted." + )] let record_index = ((jds - jds_start) / self.jd_step).floor() as usize; let record = self.get_record(record_index); let t_mid = record[0]; @@ -195,6 +197,10 @@ impl PckSegmentType2 { impl TryFrom for PckSegmentType2 { type Error = Error; + #[allow( + clippy::cast_sign_loss, + reason = "cast should work except when file is incorrectly formatted" + )] fn try_from(array: PckArray) -> Result { let n_records = array.daf[array.daf.len() - 1] as usize; let record_len = array.daf[array.daf.len() - 2] as usize; @@ -204,7 +210,7 @@ impl TryFrom for PckSegmentType2 { if n_records * record_len + 4 != array.daf.len() { return Err(Error::IOError( - "PCK File not formatted correctly. Number records found in file dont match expected number." + "PCK File not formatted correctly. Number records found in file do not match expected number." .into(), )); } diff --git a/src/kete_core/src/spice/sclk.rs b/src/kete_core/src/spice/sclk.rs index 4536c6c..c18a072 100644 --- a/src/kete_core/src/spice/sclk.rs +++ b/src/kete_core/src/spice/sclk.rs @@ -129,7 +129,7 @@ impl SclkCollection { /// [`Error::ValueError`] if the SCLK clock for the given ID is not found. pub fn try_tick_to_time(&self, id: i32, clock_tick: f64) -> KeteResult> { if let Some(sclk) = self.clocks.get(&id) { - sclk.tick_to_time(clock_tick) + Ok(sclk.tick_to_time(clock_tick)) } else { Err(Error::ValueError(format!( "SCLK clock for spacecraft ID {id} not found." @@ -149,7 +149,7 @@ impl SclkCollection { /// [`Error::ValueError`] if the SCLK clock for the given ID is not found. pub fn try_time_to_tick(&self, id: i32, time: Time) -> KeteResult { if let Some(sclk) = self.clocks.get(&id) { - sclk.time_to_tick(time) + Ok(sclk.time_to_tick(time)) } else { Err(Error::ValueError(format!( "SCLK clock for spacecraft ID {id} not found." @@ -164,6 +164,7 @@ impl SclkCollection { /// Return a list of all loaded segments in the SCLK singleton. /// This is a list of the center NAIF IDs of the segments. + #[must_use] pub fn loaded_objects(&self) -> Vec { self.clocks.keys().copied().collect() } @@ -245,29 +246,27 @@ impl Sclk { /// Given an SCLK clock string, parse it into a `Time`. fn string_to_time(&self, time_str: &str) -> KeteResult> { let (_, tick) = self.string_to_tick(time_str)?; - self.tick_to_time(tick) + Ok(self.tick_to_time(tick)) } /// Convert a spacecraft clock tick (SCLK time) into a [`Time`]. - fn tick_to_time(&self, tick: f64) -> KeteResult> { - let clock_rate = self.find_tick_rate(tick)?; + fn tick_to_time(&self, tick: f64) -> Time { + let clock_rate = self.find_tick_rate(tick); let par_time = (tick - clock_rate[0]) * (clock_rate[2] / (*self.tick_rates.first().unwrap() as f64)) + clock_rate[1]; - Ok(spice_jd_to_jd(par_time)) + spice_jd_to_jd(par_time) } /// Convert time in TDB to a spacecraft clock tick count. - fn time_to_tick(&self, time: Time) -> KeteResult { + fn time_to_tick(&self, time: Time) -> f64 { let par_time = jd_to_spice_jd(time); - let clock_rate = self.find_parallel_time_rate(par_time)?; + let clock_rate = self.find_parallel_time_rate(par_time); - let tick = (par_time - clock_rate[1]) - * ((*self.tick_rates.first().unwrap() as f64) / clock_rate[2]) - + clock_rate[0]; - Ok(tick) + (par_time - clock_rate[1]) * ((*self.tick_rates.first().unwrap() as f64) / clock_rate[2]) + + clock_rate[0] } /// Convert a spacecraft clock string into the partition and tick count. @@ -330,19 +329,19 @@ impl Sclk { } /// Go through the coefficients and find the clock rate for a given spacecraft clock. - fn find_tick_rate(&self, tick: f64) -> KeteResult<[f64; 3]> { + fn find_tick_rate(&self, tick: f64) -> [f64; 3] { let mut idx = self.coefficients.partition_point(|probe| probe[0] <= tick); idx = idx.saturating_sub(1); - Ok(self.coefficients[idx]) + self.coefficients[idx] } /// Go through the coefficients and find the clock rate for a given spacecraft clock. - fn find_parallel_time_rate(&self, par_time: f64) -> KeteResult<[f64; 3]> { + fn find_parallel_time_rate(&self, par_time: f64) -> [f64; 3] { let mut idx = self .coefficients .partition_point(|probe| probe[1] <= par_time); idx = idx.saturating_sub(1); - Ok(self.coefficients[idx]) + self.coefficients[idx] } /// Given a tick count, find the partition and the number of ticks from the @@ -403,10 +402,13 @@ impl TryFrom> for Sclk { let mut coefficients: Option> = None; let mut tdb: Option = None; + #[allow( + clippy::cast_possible_wrap, + reason = "i32 casts are valid as long as file is correctly formatted" + )] for token in value { match token { - SclkToken::MagicNumber => (), - SclkToken::Comments(_) => (), + SclkToken::MagicNumber | SclkToken::Comments(_) => (), SclkToken::KernelID(id) => { if kernel_id.is_some() { return Err(Error::ValueError("Multiple SCLK_KERNEL_ID found.".into())); @@ -598,10 +600,10 @@ impl TryFrom> for Sclk { n_fields, moduli, offsets, - tick_rates, partition_start, partition_end, coefficients, + tick_rates, }) } } @@ -645,7 +647,7 @@ fn parse_key_suffix(input: &str) -> IResult<&str, (&str, Option)> { let (_, num) = opt(parse_num::).parse(&word[last_underscore + 1..])?; if num.is_some() { - Ok((rem, (&word[..last_underscore + 1], num))) + Ok((rem, (&word[..=last_underscore], num))) } else { Ok((rem, (word, None))) } @@ -990,8 +992,8 @@ mod tests { let t = clock.string_to_time("1/1000:00:00").unwrap(); - let ticks = clock.time_to_tick(t).unwrap(); - let t2 = clock.tick_to_time(ticks).unwrap(); + let ticks = clock.time_to_tick(t); + let t2 = clock.tick_to_time(ticks); assert_eq!(t, t2); let (part, count) = clock.partition_tick_count(0.0).unwrap(); diff --git a/src/kete_core/src/spice/spk.rs b/src/kete_core/src/spice/spk.rs index aba740a..e814894 100644 --- a/src/kete_core/src/spice/spk.rs +++ b/src/kete_core/src/spice/spk.rs @@ -92,6 +92,9 @@ impl SpkCollection { /// Get the raw state from the loaded SPK files. /// This state will have the center and frame of whatever was originally loaded /// into the file. + /// + /// # Errors + /// Fails when the id or jd is not found in the [`SpkCollection`]. #[inline(always)] pub fn try_get_state(&self, id: i32, jd: Time) -> KeteResult> { for segment in &self.planet_segments { @@ -108,13 +111,16 @@ impl SpkCollection { } } } - Err(Error::ExceedsLimits(format!( + Err(Error::Bounds(format!( "Object ({id}) does not have an SPK record for the target JD." ))) } /// Load a state from the file, then attempt to change the center to the center id /// specified. + /// + /// # Errors + /// Fails when the id or jd is not found in the [`SpkCollection`]. #[inline(always)] pub fn try_get_state_with_center( &self, @@ -130,6 +136,9 @@ impl SpkCollection { } /// Use the data loaded in the SPKs to change the center ID of the provided state. + /// + /// # Errors + /// Fails when the id or jd is not found in the [`SpkCollection`]. pub fn try_change_center( &self, state: &mut State, @@ -164,6 +173,7 @@ impl SpkCollection { } /// For a given NAIF ID, return all increments of time which are currently loaded. + #[must_use] pub fn available_info(&self, id: i32) -> Vec<(Time, Time, i32, i32, i32)> { let mut segment_info = Vec::<(Time, Time, i32, i32, i32)>::new(); if let Some(segments) = self.segments.get(&id) { @@ -225,6 +235,7 @@ impl SpkCollection { /// be included in the loaded objects set, as 0 is a privileged position at the /// barycenter of the solar system. It is not typically defined in relation to /// anything else. + #[must_use] pub fn loaded_objects(&self, include_centers: bool) -> HashSet { let mut found = HashSet::new(); @@ -270,7 +281,7 @@ impl SpkCollection { if let Some((v, _)) = result { Ok(v.iter().skip(1).map(|x| x.1).collect()) } else { - Err(Error::ExceedsLimits(format!( + Err(Error::Bounds(format!( "SPK files are missing information to be able to map from obj {start} to obj {goal}" ))) } @@ -283,8 +294,6 @@ impl SpkCollection { fn build_mapping(&mut self) { static PRECACHE: &[i32] = &[0, 10, 399]; - let mut nodes: HashMap> = HashMap::new(); - fn update_nodes(segment: &SpkSegment, nodes: &mut HashMap>) { let array_ref: &SpkArray = segment.into(); if let std::collections::hash_map::Entry::Vacant(e) = nodes.entry(array_ref.object_id) { @@ -309,12 +318,16 @@ impl SpkCollection { } } + let mut nodes: HashMap> = HashMap::new(); + self.planet_segments .iter() .for_each(|x| update_nodes(x, &mut nodes)); for segs in self.segments.values() { - segs.iter().for_each(|x| update_nodes(x, &mut nodes)); + for x in segs { + update_nodes(x, &mut nodes); + } } let loaded = self.loaded_objects(true); @@ -348,6 +361,10 @@ impl SpkCollection { /// Given an SPK filename, load all the segments present inside of it. /// These segments are added to the SPK singleton in memory. + /// + /// # Errors + /// Loading files may fail for a number of reasons, including incorrect formatted + /// files or IO errors. pub fn load_file(&mut self, filename: &str) -> KeteResult<()> { let file = DafFile::from_file(filename)?; @@ -377,31 +394,38 @@ impl SpkCollection { } /// Load the core files. + /// + /// # Errors + /// May fail if there are IO or Parsing errors. pub fn load_core(&mut self) -> KeteResult<()> { let cache = cache_path("kernels/core")?; - self.load_directory(cache)?; + self.load_directory(&cache)?; Ok(()) } /// Load files in the cache directory. + /// + /// # Errors + /// May fail if there are IO or Parsing errors. pub fn load_cache(&mut self) -> KeteResult<()> { let cache = cache_path("kernels")?; - self.load_directory(cache)?; + self.load_directory(&cache)?; Ok(()) } /// Load all SPK files from a directory. - pub fn load_directory(&mut self, directory: String) -> KeteResult<()> { - fs::read_dir(&directory)?.for_each(|entry| { - let entry = entry.unwrap(); - let path = entry.path(); - if path.is_file() { - let filename = path.to_str().unwrap(); - if filename.to_lowercase().ends_with(".bsp") - && let Err(err) = self.load_file(filename) - { - eprintln!("Failed to load SPK file {filename}: {err}"); - } + /// + /// # Errors + /// May fail if there are IO or Parsing errors. + pub fn load_directory(&mut self, directory: &str) -> KeteResult<()> { + fs::read_dir(directory)?.for_each(|entry| { + if let Ok(entry) = entry + && entry.path().is_file() + && let Some(filename) = entry.path().to_str() + && filename.to_lowercase().ends_with(".bsp") + && let Err(err) = self.load_file(filename) + { + eprintln!("Failed to load SPK file {filename}: {err}"); } }); Ok(()) @@ -409,11 +433,12 @@ impl SpkCollection { /// Try to get the unique loaded NAIF ID for the given name. /// - /// If no ID is found, an error is returned. - /// If multiple IDs are found, an error is returned. - /// /// If there are multiple ids which match, but one of them is an exact match, /// that one is returned. + /// + /// # Errors + /// If no ID is found, an error is returned. + /// If multiple IDs are found, an error is returned. pub fn try_id_from_name(&mut self, name: &str) -> KeteResult { // check first for cache hit with a read only lock @@ -440,7 +465,11 @@ impl SpkCollection { "No NAIF ID found for name: {name}" ))); } else if ids.len() == 1 { - let id = ids[0].clone(); + #[allow( + clippy::missing_panics_doc, + reason = "Length is 1, unwrap always possible." + )] + let id = ids.first().unwrap().clone(); let _ = self.naif_ids.insert(name.to_lowercase(), id.clone()); return Ok(id); } diff --git a/src/kete_core/src/spice/spk_segments.rs b/src/kete_core/src/spice/spk_segments.rs index 1a4839e..79f5f79 100644 --- a/src/kete_core/src/spice/spk_segments.rs +++ b/src/kete_core/src/spice/spk_segments.rs @@ -117,7 +117,7 @@ impl SpkSegment { // this is faster than calling contains, probably because the || instead of && if jds < arr_ref.jds_start || jds > arr_ref.jds_end { - return Err(Error::ExceedsLimits( + return Err(Error::Bounds( "JD is not present in this record.".to_string(), )); } @@ -188,6 +188,10 @@ pub(in crate::spice) struct SpkSegmentType1 { n_records: usize, } +#[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." +)] impl SpkSegmentType1 { #[inline(always)] fn get_record(&self, idx: usize) -> &[f64] { @@ -275,7 +279,7 @@ impl SpkSegmentType1 { // position interpolation let pos = std::array::from_fn(|idx| { - let sum: f64 = (1..(kq[idx] as usize + 1)) + let sum: f64 = (1..=(kq[idx] as usize)) .rev() .map(|j| divided_diff_array[15 * idx + j - 1] * w[j + ks - 1]) .sum(); @@ -302,6 +306,10 @@ impl SpkSegmentType1 { impl From for SpkSegmentType1 { fn from(array: SpkArray) -> Self { + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let n_records = array.daf[array.daf.len() - 1] as usize; Self { array, n_records } @@ -355,6 +363,11 @@ impl SpkSegmentType2 { #[inline(always)] fn try_get_pos_vel(&self, jds: f64) -> KeteResult<([f64; 3], [f64; 3])> { let jds_start = self.array.jds_start; + + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let record_index = ((jds - jds_start) / self.jds_step).floor() as usize; let record = self.get_record(record_index); @@ -380,6 +393,10 @@ impl TryFrom for SpkSegmentType2 { type Error = Error; fn try_from(array: SpkArray) -> Result { + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let record_len = array.daf[array.daf.len() - 2] as usize; let jds_step = array.daf[array.daf.len() - 3]; @@ -452,6 +469,11 @@ impl SpkSegmentType3 { #[inline(always)] fn try_get_pos_vel(&self, jds: f64) -> KeteResult<([f64; 3], [f64; 3])> { let jds_start = self.array.jds_start; + + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let record_index = ((jds - jds_start) / self.jds_step).floor() as usize; let record = self.get_record(record_index); @@ -473,6 +495,10 @@ impl SpkSegmentType3 { impl TryFrom for SpkSegmentType3 { type Error = Error; fn try_from(array: SpkArray) -> KeteResult { + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let record_len = array.daf[array.daf.len() - 2] as usize; let jds_step = array.daf[array.daf.len() - 3]; @@ -507,6 +533,10 @@ pub(in crate::spice) struct SpkSegmentType9 { } impl From for SpkSegmentType9 { + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] fn from(array: SpkArray) -> Self { let n_records = array.daf[array.daf.len() - 1] as usize; let mut poly_degree = array.daf[array.daf.len() - 2] as usize; @@ -557,6 +587,10 @@ impl SpkSegmentType9 { } #[inline(always)] + #[allow( + clippy::cast_possible_wrap, + reason = "This is correct as long as the file is correct." + )] fn try_get_pos_vel(&self, jds: f64) -> ([f64; 3], [f64; 3]) { let times = self.get_times(); let window_size = self.poly_degree + 1; @@ -570,6 +604,11 @@ impl SpkSegmentType9 { } } }; + + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let start_idx = start_idx.clamp(0, (self.n_records - window_size) as isize) as usize; let mut pos = [0.0; 3]; @@ -745,6 +784,10 @@ pub(in crate::spice) struct SpkSegmentType13 { } impl From for SpkSegmentType13 { + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] fn from(array: SpkArray) -> Self { let n_records = array.daf[array.daf.len() - 1] as usize; let mut window_size = array.daf[array.daf.len() - 2] as usize; @@ -795,6 +838,10 @@ impl SpkSegmentType13 { } #[inline(always)] + #[allow( + clippy::cast_possible_wrap, + reason = "This is correct as long as the file is correct." + )] fn try_get_pos_vel(&self, jds: f64) -> ([f64; 3], [f64; 3]) { let times = self.get_times(); let start_idx: isize = match times.binary_search_by(|probe| probe.total_cmp(&jds)) { @@ -807,6 +854,11 @@ impl SpkSegmentType13 { } } }; + + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let start_idx = start_idx.clamp(0, self.n_records as isize - self.window_size as isize) as usize; @@ -857,6 +909,11 @@ pub(in crate::spice) struct SpkSegmentType18 { impl TryFrom for SpkSegmentType18 { type Error = Error; + + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] fn try_from(array: SpkArray) -> KeteResult { let n_records = array.daf[array.daf.len() - 1] as usize; let mut window_size = array.daf[array.daf.len() - 2] as usize; @@ -923,6 +980,10 @@ impl SpkSegmentType18 { } #[inline(always)] + #[allow( + clippy::cast_possible_wrap, + reason = "This is correct as long as the file is correct." + )] fn try_get_pos_vel(&self, jds: f64) -> ([f64; 3], [f64; 3]) { let times = self.get_times(); let start_idx: isize = match times.binary_search_by(|probe| probe.total_cmp(&jds)) { @@ -935,6 +996,11 @@ impl SpkSegmentType18 { } } }; + + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] let start_idx = start_idx.clamp(0, self.n_records as isize - self.window_size as isize) as usize; @@ -1018,6 +1084,10 @@ pub(in crate::spice) struct SpkSegmentType21 { } impl From for SpkSegmentType21 { + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] fn from(array: SpkArray) -> Self { let n_records = array.daf[array.daf.len() - 1] as usize; let n_coef = array.daf[array.daf.len() - 2] as usize; @@ -1054,6 +1124,10 @@ impl SpkSegmentType21 { } #[inline(always)] + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] fn try_get_pos_vel(&self, jds: f64) -> KeteResult<([f64; 3], [f64; 3])> { // Records are laid out as so: // @@ -1079,7 +1153,7 @@ impl SpkSegmentType21 { let ref_time = record[0]; - let func_vec = &record[1..self.n_coef + 1]; + let func_vec = &record[1..=self.n_coef]; let ref_state = &record[self.n_coef + 1..self.n_coef + 7]; let divided_diff_array = &record[self.n_coef + 7..4 * self.n_coef + 7]; @@ -1124,7 +1198,7 @@ impl SpkSegmentType21 { // position interpolation let pos = std::array::from_fn(|idx| { - let sum: f64 = (1..(kq[idx] as usize + 1)) + let sum: f64 = (1..=(kq[idx] as usize)) .rev() .map(|j| divided_diff_array[idx * self.n_coef + j - 1] * w[j + ks - 1]) .sum(); @@ -1139,7 +1213,7 @@ impl SpkSegmentType21 { // velocity interpolation let vel = std::array::from_fn(|idx| { - let sum: f64 = (1..(kq[idx] as usize + 1)) + let sum: f64 = (1..=(kq[idx] as usize)) .rev() .map(|j| divided_diff_array[idx * self.n_coef + j - 1] * w[j + ks - 1]) .sum(); @@ -1241,6 +1315,10 @@ impl GenericSegment { impl TryFrom for GenericSegment { type Error = Error; + #[allow( + clippy::cast_sign_loss, + reason = "This is correct as long as the file is correct." + )] fn try_from(array: SpkArray) -> KeteResult { // The very last value of this array is an int (cast to f64) which indicates the number // of meta-data values. diff --git a/src/kete_core/src/state.rs b/src/kete_core/src/state.rs index bdaa8a9..6e0f7da 100644 --- a/src/kete_core/src/state.rs +++ b/src/kete_core/src/state.rs @@ -1,4 +1,4 @@ -//! State vector representations. +//! # State vector representations. //! //! Keeping track of the location and velocity of an object requires more information //! than just a position and velocity vector. Because there is no universal coordinate @@ -61,6 +61,7 @@ use crate::time::{TDB, Time}; /// /// This state object assumes no uncertainty in its values. #[derive(Debug, Serialize, Clone, Deserialize, PartialEq)] +#[must_use] pub struct State where T: InertialFrame, @@ -116,6 +117,7 @@ impl State { } /// Are all values finite. + #[must_use] pub fn is_finite(&self) -> bool { self.pos.is_finite() & self.vel.is_finite() & self.epoch.jd.is_finite() } @@ -123,7 +125,7 @@ impl State { /// Trade the center ID and ID values, and flip the direction of the position and /// velocity vectors. #[inline(always)] - pub fn try_flip_center_id(&mut self) -> KeteResult<()> { + pub(crate) fn try_flip_center_id(&mut self) -> KeteResult<()> { if let Desig::Naif(mut id) = self.desig { std::mem::swap(&mut id, &mut self.center_id); self.pos = -self.pos; @@ -147,6 +149,14 @@ impl State { /// # Arguments /// /// * `state` - [`State`] object which defines the new center point. + /// + /// # Errors + /// + /// [`Error::ValueError`] possible for multiple reasons: + /// - Other state is not at the same instant in time (epoch). + /// - [`Desig`] is not a Naif ID. + /// - Center id of the state does not match the ID in the other state. + /// #[inline(always)] pub fn try_change_center(&mut self, mut state: Self) -> KeteResult<()> { if self.epoch != state.epoch { @@ -304,7 +314,7 @@ mod tests { 0.0.into(), [0.0, 1.0, 0.0].into(), [0.0, 1.0, 0.0].into(), - 1000000000, + 1_000_000_000, ); assert!(a.try_change_center(no_matching_id.into_frame()).is_err()); } diff --git a/src/kete_core/src/stats/ks_test.rs b/src/kete_core/src/stats/ks_test.rs index 8375aa6..9275ebb 100644 --- a/src/kete_core/src/stats/ks_test.rs +++ b/src/kete_core/src/stats/ks_test.rs @@ -37,6 +37,8 @@ use crate::errors::{Error, KeteResult}; /// /// This ignores NAN or INF values in the samples. /// +/// # Errors +/// Fails when data does not contain any finite values. pub fn two_sample_ks_statistic(sample_a: &[f64], sample_b: &[f64]) -> KeteResult { // Sort the two inputs and drop nan/inf let mut sample_a = sample_a diff --git a/src/kete_core/src/stats/quantile.rs b/src/kete_core/src/stats/quantile.rs index 611523f..86e28c6 100644 --- a/src/kete_core/src/stats/quantile.rs +++ b/src/kete_core/src/stats/quantile.rs @@ -38,8 +38,12 @@ use crate::{errors::Error, prelude::KeteResult}; /// Quantiles are linearly interpolated between the two closest ranked values. /// /// If only one valid data point is provided, all quantiles evaluate to that value. +/// +/// # Errors +/// Fails when quant is not between 0 and 1, or if the data does not have any finite +/// values. pub fn quantile(data: &[f64], quant: f64) -> KeteResult { - if quant <= 0.0 || quant >= 1.0 { + if !quant.is_finite() || quant <= 0.0 || quant >= 1.0 { Err(Error::ValueError( "Quantile must be between 0.0 and 1.0".into(), ))?; @@ -62,6 +66,10 @@ pub fn quantile(data: &[f64], quant: f64) -> KeteResult { } let frac_idx = quant * (n_data - 1) as f64; + #[allow( + clippy::cast_sign_loss, + reason = "By construction this is always positive." + )] let idx = frac_idx.floor() as usize; if idx as f64 == frac_idx { @@ -75,6 +83,9 @@ pub fn quantile(data: &[f64], quant: f64) -> KeteResult { /// Compute the median value of the data. /// /// This ignores non-finite values such as inf and nan. +/// +/// # Errors +/// Fails when data does not contain any finite values. pub fn median(data: &[f64]) -> KeteResult { quantile(data, 0.5) } @@ -83,6 +94,8 @@ pub fn median(data: &[f64]) -> KeteResult { /// /// /// +/// # Errors +/// Fails when data does not contain any finite values. pub fn mad(data: &[f64]) -> KeteResult { let median = quantile(data, 0.5)?; let abs_deviation_from_med: Box<[f64]> = data.iter().map(|d| d - median).collect(); diff --git a/src/kete_core/src/time/leap_second.rs b/src/kete_core/src/time/leap_second.rs index c452487..f032c45 100644 --- a/src/kete_core/src/time/leap_second.rs +++ b/src/kete_core/src/time/leap_second.rs @@ -103,12 +103,12 @@ mod tests { #[test] fn test_leap_second() { { - let t = &LEAP_SECONDS[0]; + let t = &LEAP_SECONDS.first().unwrap(); assert!(t.tai_m_utc == 10.0 / 86400.0); assert!(t.mjd == 41317.0); } { - let t = &LEAP_SECONDS[27]; + let t = &LEAP_SECONDS.last().unwrap(); assert!(t.tai_m_utc == 37.0 / 86400.0); assert!(t.mjd == 57754.0); } diff --git a/src/kete_core/src/time/mod.rs b/src/kete_core/src/time/mod.rs index d5c30e2..c6dacba 100644 --- a/src/kete_core/src/time/mod.rs +++ b/src/kete_core/src/time/mod.rs @@ -69,7 +69,7 @@ pub use self::scales::{JD_TO_MJD, TAI, TDB, TimeScale, UTC}; /// loss due to the nature of the representation of numbers on computers. /// #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] - +#[must_use] pub struct Time { /// Julian Date pub jd: f64, @@ -80,6 +80,7 @@ pub struct Time { /// Duration of time. #[derive(Debug, Clone, Copy, PartialEq)] +#[must_use] pub struct Duration { /// Elapsed time in days. pub elapsed: f64, @@ -114,30 +115,38 @@ impl Duration { } /// Convert Hours/Minutes/Seconds/millisecond to fraction of a day +#[must_use] pub fn hour_min_sec_to_day(h: u32, m: u32, s: u32, ms: u32) -> f64 { - h as f64 / 24.0 - + m as f64 / 60. / 24. - + s as f64 / 60. / 60. / 24. - + ms as f64 / 1000. / 60. / 60. / 24. + f64::from(h) / 24.0 + + f64::from(m) / 60. / 24. + + f64::from(s) / 60. / 60. / 24. + + f64::from(ms) / 1000. / 60. / 60. / 24. } /// Convert fraction of a day to hours/minutes/seconds/microseconds -pub fn frac_day_to_hmsms(mut frac: f64) -> Option<(u32, u32, u32, u32)> { - if frac.is_sign_negative() || frac.abs() >= 1.0 { - return None; +/// +/// # Errors +/// Fails if frac is non-finite, and if frac is not between 0 and 1. +#[allow(clippy::cast_possible_truncation, reason = "Truncation is intentional")] +#[allow(clippy::cast_sign_loss, reason = "Sign is manually validated")] +pub fn frac_day_to_hmsms(mut frac: f64) -> KeteResult<(u32, u32, u32, u32)> { + if !frac.is_finite() || frac.is_sign_negative() || frac.abs() >= 1.0 { + return Err(Error::ValueError( + "Day frac must be between 0.0 and 1.".into(), + )); } frac *= 24.0; let hour = frac as u32; - frac -= hour as f64; + frac -= f64::from(hour); frac *= 60.0; let minute = frac as u32; - frac -= minute as f64; + frac -= f64::from(minute); frac *= 60.0; let second = frac as u32; - frac -= second as f64; + frac -= f64::from(second); frac *= 1000.0; - Some((hour, minute, second, frac as u32)) + Ok((hour, minute, second, frac as u32)) } /// Days in the provided year. @@ -147,37 +156,35 @@ pub fn frac_day_to_hmsms(mut frac: f64) -> Option<(u32, u32, u32, u32)> { /// This is a proleptic implementation, meaning it does not take into account /// the Gregorian calendar reform, which is correct for most applications. /// -fn days_in_year(year: i64) -> u64 { +fn days_in_year(year: i64) -> u32 { let is_leap = (year % 4 == 0 && year % 100 != 0) || (year % 400 == 0); if is_leap { 366 } else { 365 } } impl Time { /// Read time from the standard ISO format for time. + /// + /// # Errors + /// An error is returned if ISO string parsing fails. pub fn from_iso(s: &str) -> KeteResult { let time = DateTime::parse_from_rfc3339(s)?.to_utc(); - Self::from_datetime(&time) + Ok(Self::from_datetime(&time)) } /// Construct time from the current time. - pub fn now() -> KeteResult { + pub fn now() -> Self { Self::from_datetime(&Utc::now()) } /// Construct a Time object from a UTC [`DateTime`]. - pub fn from_datetime(time: &DateTime) -> KeteResult { + pub fn from_datetime(time: &DateTime) -> Self { let frac_day = hour_min_sec_to_day( time.hour(), time.minute(), time.second(), time.timestamp_subsec_millis(), ); - Ok(Self::from_year_month_day( - time.year() as i64, - time.month(), - time.day(), - frac_day, - )) + Self::from_year_month_day(i64::from(time.year()), time.month(), time.day(), frac_day) } /// Return the Gregorian year, month, day, and fraction of a day. @@ -186,7 +193,10 @@ impl Time { /// "A Machine Algorithm for Processing Calendar Dates" /// /// - pub fn year_month_day(&self) -> (i64, u32, u32, f64) { + #[must_use] + #[allow(clippy::cast_possible_truncation, reason = "Truncation is intentional")] + #[allow(clippy::cast_sign_loss, reason = "Sign is manually validated")] + pub fn year_month_day(&self) -> (i32, u32, u32, f64) { let offset = self.jd + 0.5; let frac_day = offset.rem_euclid(1.0); @@ -202,7 +212,7 @@ impl Time { let month = k + 2 - 12 * l; let year = 100 * (n - 49) + i + l; - (year, month as u32, day as u32, frac_day) + (year as i32, month as u32, day as u32, frac_day) } /// Return the current time as a fraction of the year. @@ -211,39 +221,41 @@ impl Time { /// use kete_core::time::{Time, UTC}; /// /// let time = Time::from_year_month_day(2010, 1, 1, 0.0); - /// assert_eq!(time.year_as_float(), 2010.0); + /// assert_eq!(time.year_as_float(), Ok(2010.0)); /// /// let time = Time::::new(2457754.5); - /// assert_eq!(time.year_as_float(), 2017.0); + /// assert_eq!(time.year_as_float(), Ok(2017.0)); /// /// let time = Time::::new(2457754.5 + 364.9999); - /// assert_eq!(time.year_as_float(), 2017.999999726028); + /// assert_eq!(time.year_as_float(), Ok(2017.999999726028)); /// /// let time = Time::::new(2457754.5 + 365.0 / 2.0); - /// assert_eq!(time.year_as_float(), 2017.5); + /// assert_eq!(time.year_as_float(), Ok(2017.5)); /// /// // 2016 was a leap year, so 366 days instead of 365. /// let time = Time::::new(2457754.5 - 366.0); - /// assert_eq!(time.year_as_float(), 2016.0); + /// assert_eq!(time.year_as_float(), Ok(2016.0)); /// /// let time = Time::::new(2457754.5 - 366.0 / 2.0); - /// assert_eq!(time.year_as_float(), 2016.5); + /// assert_eq!(time.year_as_float(), Ok(2016.5)); /// /// ``` /// - pub fn year_as_float(&self) -> f64 { - let datetime = self.to_datetime().unwrap(); + /// # Errors + /// Failure may occur if the [`NaiveDate::from_ymd_opt`] conversion fails. + pub fn year_as_float(&self) -> KeteResult { + let datetime = self.to_datetime()?; // ordinal is the integer day of the year, starting at 0. - let mut ordinal = datetime.ordinal0() as f64; + let mut ordinal = f64::from(datetime.ordinal0()); // we need to add the fractional day to the ordinal. let (_, _, _, frac_day) = self.year_month_day(); ordinal += frac_day; - let year = datetime.year() as f64; - let days_in_year = days_in_year(datetime.year() as i64) as f64; - year + ordinal / days_in_year + let year = f64::from(datetime.year()); + let days_in_year = f64::from(days_in_year(i64::from(datetime.year()))); + Ok(year + ordinal / days_in_year) } /// Create Time from the date in the Gregorian calendar. @@ -252,11 +264,12 @@ impl Time { /// "A Machine Algorithm for Processing Calendar Dates" /// /// + #[allow(clippy::cast_possible_truncation, reason = "Truncation is expected")] pub fn from_year_month_day(year: i64, month: u32, day: u32, frac_day: f64) -> Self { let frac_day = frac_day - 0.5; - let day = day as i64 + frac_day.div_euclid(1.0) as i64; + let day = i64::from(day) + frac_day.div_euclid(1.0) as i64; let frac_day = frac_day.rem_euclid(1.0); - let month = month as i64; + let month = i64::from(month); let tmp = (month - 14) / 12; let days = day - 32075 + 1461 * (year + 4800 + tmp) / 4 + 367 * (month - 2 - tmp * 12) / 12 @@ -265,11 +278,15 @@ impl Time { Self::new(days as f64 + frac_day) } - /// Create a [`DateTime`] object + /// Create a [`DateTime`] object. + /// + /// # Errors + /// Conversion to datetime object may fail for various reasons, such as the JD is + /// too large or too small. pub fn to_datetime(&self) -> KeteResult> { - let (y, month, d, f) = self.year_month_day(); - let (h, m, s, ms) = frac_day_to_hmsms(f).unwrap(); - Ok(NaiveDate::from_ymd_opt(y as i32, month, d) + let (year, month, day, f) = self.year_month_day(); + let (h, m, s, ms) = frac_day_to_hmsms(f)?; + Ok(NaiveDate::from_ymd_opt(year, month, day) .ok_or(Error::ValueError("Failed to convert ymd".into()))? .and_hms_milli_opt(h, m, s, ms) .ok_or(Error::ValueError("Failed to convert hms".into()))? @@ -277,6 +294,10 @@ impl Time { } /// Construct a ISO compliant UTC string. + /// + /// # Errors + /// Conversion to datetime object may fail for various reasons, such as the JD is + /// too large or too small. pub fn to_iso(&self) -> KeteResult { let datetime = self.to_datetime()?; Ok(datetime.to_rfc3339()) @@ -322,6 +343,7 @@ impl Time { } /// Convert to an MJD float. + #[must_use] pub fn mjd(&self) -> f64 { self.jd + JD_TO_MJD } @@ -422,7 +444,7 @@ mod tests { #[test] fn test_time_near_leap_second() { for offset in -1000..1000 { - let offset = offset as f64 / 10.0; + let offset = f64::from(offset) / 10.0; let mjd = 41683.0 + offset / 86400.0; // TIME IN TAI let t = Time::::from_mjd(mjd); let t = t.tdb(); @@ -434,7 +456,7 @@ mod tests { // Perform round trip conversions in the seconds around a leap second. for offset in -1000..1000 { - let offset = offset as f64 / 10.0; + let offset = f64::from(offset) / 10.0; let mjd = 41683.0 + offset / 86400.0; // TIME IN TAI let t = Time::::from_mjd(mjd); let t = t.tai(); @@ -451,7 +473,7 @@ mod tests { } for offset in -1000..1000 { - let offset = offset as f64 / 10.0; + let offset = f64::from(offset) / 10.0; let mjd = 41683.0 + offset / 86400.0 + TT_TO_TAI; let t = Time::::from_mjd(mjd); diff --git a/src/kete_core/src/util.rs b/src/kete_core/src/util.rs index 109befc..6a665df 100644 --- a/src/kete_core/src/util.rs +++ b/src/kete_core/src/util.rs @@ -54,26 +54,31 @@ pub struct Degrees { impl Degrees { /// Construct from radians. + #[must_use] pub fn from_radians(radians: f64) -> Self { Self::from_degrees(radians.to_degrees()) } /// Converts to radians. + #[must_use] pub fn to_radians(&self) -> f64 { self.degrees.to_radians() } /// Construct from degrees. + #[must_use] pub fn from_degrees(degrees: f64) -> Self { Self { degrees } } /// Converts to degrees. + #[must_use] pub fn to_degrees(&self) -> f64 { self.degrees } /// Construct from hours. + #[must_use] pub fn from_hours(hours: f64) -> Self { Self { degrees: hours * 15.0, @@ -81,12 +86,17 @@ impl Degrees { } /// Converts to Hours as a float. + #[must_use] pub fn to_hours(&self) -> f64 { self.degrees / 15.0 } /// New Degrees from degrees and minutes. /// The degrees value can be negative, but the minutes must be non-negative. + /// + /// # Errors + /// + /// - [`Error::ValueError`] when the `minutes` parameter is negative. pub fn try_from_degrees_minutes(mut degrees: f64, minutes: f64) -> KeteResult { if minutes.is_sign_negative() { return Err(Error::ValueError(format!( @@ -98,7 +108,11 @@ impl Degrees { } /// New Degrees from degrees and minutes. - /// The degrees value can be negative, but the minutes must be non-negative. + /// The degrees value may be negative, but the seconds must be non-negative. + /// + /// # Errors + /// + /// - [`Error::ValueError`] when the `minutes` parameter is negative. pub fn try_from_degrees_minutes_seconds( mut degrees: f64, minutes: u32, @@ -109,12 +123,17 @@ impl Degrees { "Seconds value must be non-negative: {seconds}" ))); } - degrees += (minutes as f64).copysign(degrees) / 60.0; + degrees += f64::from(minutes).copysign(degrees) / 60.0; degrees += seconds.copysign(degrees) / 3600.0; Ok(Self { degrees }) } /// Construct from hours, minutes, and seconds. + /// The degrees value may be negative, but the seconds must be non-negative. + /// + /// # Errors + /// + /// - [`Error::ValueError`] when the `seconds` parameter is negative. pub fn try_from_hours_minutes_seconds( hours: f64, minutes: u32, @@ -125,7 +144,7 @@ impl Degrees { "Seconds values must be non-negative: {seconds}" ))); } - let minutes = (minutes as f64).copysign(hours); + let minutes = f64::from(minutes).copysign(hours); let seconds = seconds.copysign(hours); let seconds = hours * 3600.0 + minutes * 60.0 + seconds; Ok(Self { @@ -134,6 +153,10 @@ impl Degrees { } /// Construct from hours and minutes. + /// + /// # Errors + /// + /// - [`Error::ValueError`] when the `minutes` parameter is negative. pub fn try_from_hours_minutes(hours: f64, minutes: f64) -> KeteResult { if minutes.is_sign_negative() { return Err(Error::ValueError(format!( @@ -151,9 +174,10 @@ impl Degrees { /// /// This will be returned in the range [0, 24). /// - /// Parameters - /// ---------- + /// # Parameters + /// /// tol: Tolerance for rounding seconds to zero. + #[must_use] pub fn to_hours_minutes_seconds(&self, tol: f64) -> (f64, u32, f64) { let deg = self.degrees.rem_euclid(360.0); let hours = deg / 15.0; @@ -165,21 +189,25 @@ impl Degrees { s = 0.0; m += 1.0; } - if m.trunc() == 60.0 { - m = 0.0; + if m.trunc() >= 60.0 { + m -= 60.0; h += 1.0; } - if h == 24.0 { - h = 0.0; + if h >= 24.0 { + h -= 24.0; } + + #[allow(clippy::cast_possible_truncation, reason = "truncation is intentional")] + #[allow(clippy::cast_sign_loss, reason = "value always positive.")] (h, m.trunc() as u32, s) } /// Converts to Degrees Minutes Seconds. /// - /// Parameters - /// ---------- + /// # Parameters + /// /// tol: Tolerance for rounding seconds to zero. + #[must_use] pub fn to_degrees_minutes_seconds(&self, tol: f64) -> (f64, u32, f64) { let degrees = self.degrees.abs(); let mut deg = degrees.trunc(); @@ -190,11 +218,14 @@ impl Degrees { s = 0.0; m += 1.0; } - if m.trunc() == 60.0 { - m = 0.0; + if m.trunc() >= 60.0 { + m -= 60.0; deg += 1.0; } - (deg.copysign(self.degrees), m as u32, s) + + #[allow(clippy::cast_possible_truncation, reason = "truncation is intentional")] + #[allow(clippy::cast_sign_loss, reason = "value always positive.")] + (deg.copysign(self.degrees), m.trunc() as u32, s) } /// Construct from a string containing the hours minutes and seconds @@ -206,6 +237,9 @@ impl Degrees { /// This is a generous implementation, it allows multiple separator /// characters such as spaces, commas, colons, and semicolons. It will /// also allow all numbers to be floating point or scientific notation. + /// + /// # Errors + /// [`Error`] possible when parsing fails. pub fn try_from_hms_str(text: &str) -> KeteResult { let (h, m, s) = parse_str_to_floats(text)?; let hours = h + m.copysign(h) / 60.0 + s.copysign(h) / 3600.0; @@ -221,6 +255,9 @@ impl Degrees { /// This is a generous implementation, it allows multiple separator /// characters such as spaces, commas, colons, and semicolons. It will /// also allow all numbers to be floating point or scientific notation. + /// + /// # Errors + /// [`Error`] possible when parsing fails. pub fn try_from_dms_str(text: &str) -> KeteResult { let (h, m, s) = parse_str_to_floats(text)?; let degrees = h + m.copysign(h) / 60.0 + s.copysign(h) / 3600.0; @@ -240,12 +277,14 @@ impl Degrees { } /// Convert to a string in the format "+ddd mm ss.ss". + #[must_use] pub fn to_dms_str(&self) -> String { let (d, m, s) = self.to_degrees_minutes_seconds(1e-4); format!("{d:+03} {m:02} {s:05.2}") } /// Convert to a string in the format "hh mm ss.sss". + #[must_use] pub fn to_hms_str(&self) -> String { let (h, m, s) = self.to_hours_minutes_seconds(1e-5); format!("{h:02} {m:02} {s:06.3}") @@ -273,10 +312,10 @@ fn parse_str_to_floats(text: &str) -> KeteResult<(f64, f64, f64)> { ))); } - let (h, m, s) = match hms.len() { - 1 => (hms[0], 0.0, 0.0), - 2 => (hms[0], hms[1], 0.0), - 3 => (hms[0], hms[1], hms[2]), + let (h, m, s) = match *hms.as_slice() { + [x] => (x, 0.0, 0.0), + [x, y] => (x, y, 0.0), + [x, y, z] => (x, y, z), _ => { return Err(Error::ValueError(format!( "String has too many numbers: {text}", @@ -298,6 +337,7 @@ fn parse_str_to_floats(text: &str) -> KeteResult<(f64, f64, f64)> { /// Case insensitive search is performed. /// /// Return all possible matches along with their indices. +#[must_use] pub fn partial_str_match<'a>(needle: &str, haystack: &'a [&'a str]) -> Vec<(usize, &'a str)> { let needle = needle.trim().to_lowercase(); haystack @@ -315,7 +355,7 @@ mod tests { #[test] fn test_hms_deg_roundtrip() { for deg in 0..36000 { - let deg = deg as f64 / 100.0; + let deg = f64::from(deg) / 100.0; let hms = Degrees::from_degrees(deg); let converted_deg = hms.to_degrees(); assert!( @@ -328,7 +368,7 @@ mod tests { #[test] fn test_dms_deg_roundtrip() { for deg in 0..36000 { - let deg = deg as f64 / 100.0; + let deg = f64::from(deg) / 100.0; let hms = Degrees::from_degrees(deg); let converted_deg = hms.to_degrees(); assert!( @@ -451,9 +491,10 @@ mod tests { for minute in 0..6 { let minute = minute * 10; for second in 0..60 { - let second = second as f64 + 0.123; - let hms = Degrees::try_from_hours_minutes_seconds(hour as f64, minute, second) - .unwrap(); + let second = f64::from(second) + 0.123; + let hms = + Degrees::try_from_hours_minutes_seconds(f64::from(hour), minute, second) + .unwrap(); let hms_str = format!("{hour:02} {minute:02} {second:06.3}"); assert!( (hms.to_degrees() @@ -473,10 +514,13 @@ mod tests { for minute in 0..6 { let minute = minute * 10; for second in 0..60 { - let second = second as f64 + 0.123; - let dms: Degrees = - Degrees::try_from_degrees_minutes_seconds(degree as f64, minute, second) - .unwrap(); + let second = f64::from(second) + 0.123; + let dms: Degrees = Degrees::try_from_degrees_minutes_seconds( + f64::from(degree), + minute, + second, + ) + .unwrap(); let dms_str = format!("{degree:02} {minute:02} {second:06.3}"); assert!( (dms.to_degrees()