diff --git a/DESCRIPTION b/DESCRIPTION
index f0d4e4a..2f09736 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,7 +1,7 @@
Package: NPSutils
Type: Package
Title: Collection of Functions to read and manipulate information from the NPS DataStore
-Version: 0.3.1
+Version: 0.3.2
Authors@R: c(
person(given = "Robert", family = "Baker", email = "robert_baker@nps.gov",
role = c("aut", "cre"),
@@ -17,9 +17,10 @@ Description: NPSutils is a collection of functions for interacting with NPS Data
License: MIT + file LICENSE
Encoding: UTF-8
LazyData: true
-Remotes:
+Remotes:
nationalparkservice/EMLeditor,
- nationalparkservice/DPchecker
+ nationalparkservice/DPchecker,
+ nationalparkservice/QCkit
Imports:
EML,
sf,
@@ -36,9 +37,12 @@ Imports:
lifecycle,
EMLeditor (>= 0.1.5),
DPchecker (>= 0.3.4),
+ QCkit (>= 0.1.4),
here,
jsonlite,
- cli
+ cli,
+ purrr,
+ tibble
RoxygenNote: 7.3.2
Suggests:
knitr,
diff --git a/NAMESPACE b/NAMESPACE
index a2e51f2..683736c 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -16,6 +16,7 @@ export(get_unit_code_info)
export(get_unit_info)
export(load_core_metadata)
export(load_data_package)
+export(load_data_package_deprecated)
export(load_data_packages)
export(load_domains)
export(load_pkg_metadata)
diff --git a/NEWS.md b/NEWS.md
index 1f59d6a..f312b22 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,4 +1,6 @@
-# NPSutils 0.3.2 (in development)
+# NPSutils 0.3.2 "Lost Coast"
+ * Add new functions, `load_data_packages()` and `load_data_package()`, which can load data packages (EML in .xml and data in .csv) similarly to the deprecated `load_data_package_deprecated()` function but also allows the data types in the tibbles loaded to be specified based on the information in the metadata.
+ * Deprecate `load_data_package()` and rename it to `load_data_package_deprecated()`.
* Update readme to us pak for package installation instead of devtools.
* Update _pkgdown.yml to use bootstrap 5
* added helper functions for API requests and user input to facilitate unit testing.
diff --git a/R/get_data_packages.R b/R/get_data_packages.R
index 10ddb1c..0f6bc17 100644
--- a/R/get_data_packages.R
+++ b/R/get_data_packages.R
@@ -112,7 +112,7 @@ get_data_packages <- function(reference_id,
destination_dir <- paste("data/", reference_id[i], sep = "")
#if the directory already exists, prompt user to overwrite:
if(force == FALSE) {
- if (file.exists(destination_dir) & force == FALSE){
+ if (file.exists(destination_dir)){
cat("The directory ",
crayon::blue$bold(destination_dir),
" already exists.\n",
diff --git a/R/load_core_metadata.R b/R/load_core_metadata.R
index be2d518..b102711 100644
--- a/R/load_core_metadata.R
+++ b/R/load_core_metadata.R
@@ -132,6 +132,7 @@ load_core_metadata <- function(ds_ref, path = paste0(getwd(), "/data")){
#'
#' @return dataframe
#' @keywords private
+#' @noRd
#'
#' @examples
#' \dontrun{
@@ -199,6 +200,7 @@ load_core_metadata <- function(ds_ref, path = paste0(getwd(), "/data")){
#'
#' @return dataframe
#' @keywords private
+#' @noRd
#'
#' @examples
#' \dontrun{
diff --git a/R/load_data_package.R b/R/load_data_package.R
index 0f11d5d..5ae4073 100644
--- a/R/load_data_package.R
+++ b/R/load_data_package.R
@@ -1,6 +1,7 @@
#' Read contents of data package and constructs a list of tibbles based on the data file(s)
#'
-#' @description \code{load_data_package} reads the data file(s) from a package and loads it into a list of tibbles. Current implementation only supports .csv data files.
+#' `r lifecycle::badge("deprecated")`
+#' @description `load_data_package_deprecated()` reads the data file(s) from a package and loads it into a list of tibbles. Current implementation only supports .csv data files.
#'
#' @param reference_id is a 6-7 digit number corresponding to the reference ID of the data package.
#'
@@ -12,9 +13,13 @@
#' \dontrun{
#' load_data_package(2272461)
#' }
-load_data_package <- function(reference_id) {
+load_data_package_deprecated <- function(reference_id) {
data_package_directory <- paste("data/", reference_id, sep = "")
data_package_filename <- paste(data_package_directory, ".zip", sep = "")
+
+ lifecycle::deprecate_warn("0.3.2",
+ "load_data_pacakge_deprecated()",
+ "load_data_packages()")
# Look for the zipped data package and attempt to unzip it. If the zipped file exists but cannot be unzipped, give the user a warning. If neither the unzipped nor zipped data packages exist, suggest the user check their working directory or use getDataPackage() to get the data package.
if (!file.exists(data_package_directory)) {
diff --git a/R/load_data_packages.R b/R/load_data_packages.R
index e774334..4069c5c 100644
--- a/R/load_data_packages.R
+++ b/R/load_data_packages.R
@@ -1,15 +1,13 @@
-#' Read contents of data package(s) and return a tibble with a tibble for each data file.
-#'
-#' `r lifecycle::badge("experimental")`
+#' Read contents of data package(s) and return a list of tibbles list of tibbles based on the data file(s). Can use metadata to specify data types.
#'
-#' @description `load_data_packages()` loads one to may data packages and returns a tibble of tibbles where each data package is a tibble and within that each data file is it's own tibble. `load_data_packages()` will only work with .csv data files and EML metadata. `load_data_packages()` can also utilize the metadata to assign attributes to each data column.
+#' @description `load_data_packages()` loads one to many data packages and returns a list. If only one data package is loaded, the list will be a list of tibbles where each tibble is a data (.csv) file from the data package. If multiple data packages are loaded, the list will be a list of lists where each nested list contains a list of tibble and each tibble is a data file (.csv). See `simplify` below for details on handling these lists.
#'
-#' @details currently `load_data_packages()` only supports EML metadata and .csv files. To take advantage of the default settings in load_data_packages, use the default settings in `get_data_package()` or `get_data_packages()`. Archived (.zip) files must be extracted before `load_data_packages()` will work properly. Again, `get_data_package()` or `get_data_packages()` will accomplish this for you.
+#' @details currently `load_data_packages()` only supports EML metadata and .csv files. The reference_id
#' '
-#' @param reference_id is a list of 6-7 digit numbers corresponding to the DataStore reference ID of the datapackage(s) to load. Alternatively, you can set `reference_id` to "load_all", which will load all the data packages in your /data folder.
-#' @param directory is the location of a folder, 'data' (created during `get_data_packages()`) which contains sub-directories where each sub-directory is the DataStore referenceId of the data package. Again, this file structure is all set up using `get_data_packages()`. Defaults to the current working directory (which is the default location for `get_data_packages()`).
-#' @param assign_attributes Logical. Defaults to FALSE. Data will be loaded using `readr::read_csv()` guessing algorithm for calling column types. If set to TRUE, column types will be set using metadata attributes via the yet-to-be written `load_metadata()` function. `r lifecycle::badge('experimental')`
-#' @param simplify Logical. Defaults to TRUE. If there is only a single data package loaded, the function will return a simple list of tibbles (where each tibble reflects a data file from within the data package). If set to FALSE, the function will return a list that contains a list of tibbles. This structure mirrors the object structure returned if multiple data packages are simultaneously loaded (a list of data packages with each data package containing a list of tibbles where each tibble corresponds to a data file in the given data package).
+#' @param reference_id the immediate directory/directories where your data packages reside. For data packages downloaded from DataStore using `get_data_package()` or `get_data_packages()` default settings, this is the DataStore reference ID for your data package(s). Alternatively, you can set `reference_id` to "`load_all`", which will load all the data packages in the directory specified in via `directory` (typically ./data).
+#' @param directory is the location of a folder that contains all of the data packages (where data packages are a folder containing .csv data files and a single .xml EML metadata file). If these data packages were downloaded from DataStore using the default settings for `get_data_packages`, this folder is "./data" and you can use the default settings for `directory`.
+#' @param assign_attributes Logical. Defaults to FALSE. Data will be loaded using `readr::read_csv()` guessing algorithm for calling column types. If you set to `assign_attributes = TRUE`, column types will be set using the data types specified in the metadata. Currently supported data types include string, dateTime, float, double, integer, and categorical (factor in R). This assignment is very stringent: for instance if you did not specify date-time formats using ISO-8601 notation (i.e. "YYYY", not "yyyy"), your data will import as NAs. If you have undefined missing values or blank cells, your data will not import at all. If you run into problems consider using the default settings and letting `read_csv` guess the column types.
+#' @param simplify Logical. Defaults to TRUE. If `simplify = TRUE`, the function will return a list of tibbles where each tibble is a data file from the data package(s) specified. The tibbles are named using the following format: "pkg_% dplyr::distinct(attributeName,
+ .keep_all = TRUE)
- if (data_format == "csv" & metadata_format == "eml") {
- filelist <- utils::unzip(data_package_filename, list = TRUE)
- if (assign_attributes == TRUE) {
- #assign attributes using metadata via a yet-to-be-built sub-function.
+ attribs <- attribs %>% dplyr::mutate(R_data_type = dplyr::case_when(
+ storageType == "string" ~ "collector_character",
+ storageType == "date" ~ "collector_date",
+ storageType == "float" ~ "collector_double",
+ storageType == "double" ~ "collector_double",
+ storageType == "integer" ~ "collector_integer"))
+
+ #get column specification as R would guess:
+ csv_cols <- readr::spec_csv(file_path)
+
+ #set data types based on EML, simple:
+ for(j in 1:nrow(attribs)) {
+ class(csv_cols$cols[[j]]) <- attribs$R_data_type[[j]]
+ }
+
+ #set date/time col type format string:
+ for(j in 1:nrow(attribs)) {
+ if("dateTime" %in% names(attribs$measurementScale[j])) {
+ eml_date <-
+ attribs$measurementScale[j][["dateTime"]][["formatString"]]
+ r_date <- QCkit::convert_datetime_format(eml_date)
+ csv_cols$cols[[j]]$format <- r_date
+ }
+ }
+ #set levels for factor call types:
+ for (j in 1:nrow(attribs)) {
+ if("nominal" %in% names(attribs$measurementScale[j])) {
+ nom <- attribs$measurementScale[j][["nominal"]]
+ if ("nonNumericDomain" %in% names(nom)) {
+ nom2 <- nom[["nonNumericDomain"]]
+ if ("enumeratedDomain" %in% names(nom2)) {
+ nom3 <- nom2[["enumeratedDomain"]]
+ if ("codeDefinition" %in% names(nom3)) {
+ nom4 <- nom3[["codeDefinition"]]
+ #get factors
+ factors <- NULL
+ #handle case where there is only one code definition
+ if ("code" %in% names(nom4)) {
+ nom4 <- list(nom4)
+ }
+ for (k in 1:length(seq_along(nom4))) {
+ factors <- append(factors, nom4[[k]][["code"]])
+ }
+ #set column type:
+ csv_cols$cols[[j]] <- readr::col_factor(factors,
+ include_na = FALSE,
+ ordered = FALSE)
+ }
+ }
+ }
+ }
+ }
+ suppressWarnings(package_data[[i]] <-
+ assign(names[i],
+ readr::read_csv(file_path,
+ col_types = csv_cols,
+ show_col_types = FALSE)
+ )
+ )
+ names(package_data)[i] <- names[i]
+ } else {
+ # Do not call attributes:
+ suppressWarnings(package_data[[i]] <-
+ assign(names[i],
+ readr::read_csv(file_path,
+ show_col_types = FALSE)
+ )
+ )
+ names(package_data)[i] <- names[i]
+ }
}
- return(fileList)
- } else {
- print("data/metadata format combination not supported")
+ tibble_list[[h]] <- package_data
+ names(tibble_list)[[h]] <- paste0("pkg_", reference_id[h])
}
+ #put all the tibbles in a single list that is not nested
+ #(simplifies subsequent extraction)
+ if (simplify == TRUE) {
+ tibble_list <- extract_tbl(tibble_list)
+ }
+ return(tibble_list)
+}
+
+#' @export
+#' @rdname load_data_packages
+load_data_package <- function(reference_id,
+ directory = here::here("data"),
+ assign_attributes = FALSE,
+ simplify = TRUE) {
+
+ x <- load_data_packages(reference_id,
+ directory = here::here("data"),
+ assign_attributes = FALSE,
+ simplify = TRUE)
+ return(x)
}
+
+#' extract nested tibbles
+#'
+#' Adapted from stack overflow find_df function found at:
+#' https://stackoverflow.com/questions/70512869/extract-data-frames-from-nested-list
+#' And accessed on 2024-10-02
+#'
+#' @param x a (potentially deeply) nested list containing at least one tibble
+#'
+#' @return a list where each item in the list is a tibble found in the nested list `x`
+#' @keywords Internal
+#' @noRd
+#'
+#' @examples
+#' \dontrun{
+#' z <- .extract_tbl(x)
+#' }
+extract_tbl <- function(x) {
+ if (is_tibble(x))
+ return(list(x))
+ if (!is.list(x))
+ return(NULL)
+ unlist(lapply(x, extract_tbl), FALSE)
+}
\ No newline at end of file
diff --git a/R/utils.R b/R/utils.R
index 84f77e1..3e88b00 100644
--- a/R/utils.R
+++ b/R/utils.R
@@ -29,7 +29,8 @@ assign("ds_dev_api", "https://irmadevservices.nps.gov/datastore-secure/v7/rest/"
#' Prompts for, gets, and returns binary user input (1 or 2)
#'
#' @return Factor. 1 or 2.
-#'
+#' @keywords internal
+#' @noRd
#' @examples
#' \dontrun{
#' var1 <- .get_user_input()
diff --git a/docs/404.html b/docs/404.html
index dc40cd6..3949753 100644
--- a/docs/404.html
+++ b/docs/404.html
@@ -32,7 +32,7 @@
NPSutils
- 0.3.1
+ 0.3.2
diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html
index e0c578d..3db8c14 100644
--- a/docs/LICENSE-text.html
+++ b/docs/LICENSE-text.html
@@ -17,7 +17,7 @@
NPSutils
- 0.3.1
+ 0.3.2
diff --git a/docs/LICENSE.html b/docs/LICENSE.html
index 4e75229..1ad2900 100644
--- a/docs/LICENSE.html
+++ b/docs/LICENSE.html
@@ -17,7 +17,7 @@
NPSutils
- 0.3.1
+ 0.3.2
diff --git a/docs/articles/NPSutils.html b/docs/articles/NPSutils.html
index af4a28f..6d63125 100644
--- a/docs/articles/NPSutils.html
+++ b/docs/articles/NPSutils.html
@@ -32,7 +32,7 @@
NPSutils
- 0.3.1
+ 0.3.2
@@ -79,7 +79,7 @@
@Manual{,
title = {NPSutils: Collection of Functions to read and manipulate information from the NPS DataStore},
author = {Robert Baker and Joe DeVivo and Judd Patterson},
year = {2024},
- note = {R package version 0.3.1},
+ note = {R package version 0.3.2},
url = {https://nationalparkservice.github.io/NPSutils/},
}
Update readme to us pak for package installation instead of devtools.
+
NPSutils 0.3.2 “Lost Coast”
+
Add new functions, load_data_packages() and load_data_package(), which can load data packages (EML in .xml and data in .csv) similarly to the deprecated load_data_package_deprecated() function but also allows the data types in the tibbles loaded to be specified based on the information in the metadata.
added private functions .get_authors() and .get_contacts() to retrieve authors and contacts (and emails) from EML
added load_EML_df(), which retrieves commonly available metadata items from an EML-formatted R object and returns them as a single dataframe (for loading into Power BI)
`.get_authors()` extracts the "creators" element from EML metadata and returns it as a dataframe with three columsn, first a column indicating that each row is an author. Second, and column with the author's name (first last). Third, the author's email address.
-
-
-
-
.get_authors(metadata)
-
-
-
-
Arguments
-
-
-
metadata
-
an EML formatted R object
-
-
-
-
Value
-
dataframe
-
-
-
Details
-
`r lifecycle::badge('experimental')`
-
There are some known issues with this function; unfortunately at this time we do not have example data packages to test them. These include: authors without a givenName, authors with more than two givenNames (e.g. multiple middle names), organizations as authors where there is no individualName.
`.get_contacts()` extracts the "contacts" element from EML metadata and returns it as a dataframe with three columsn, first a column indicating that each row is an contact. Second, and column with the contact's name (first last). Third, the contact's email address.
diff --git a/docs/reference/load_data_package.html b/docs/reference/load_data_package.html
deleted file mode 100644
index 923a297..0000000
--- a/docs/reference/load_data_package.html
+++ /dev/null
@@ -1,105 +0,0 @@
-
-Read contents of data package and constructs a list of tibbles based on the data file(s) — load_data_package • NPSutils
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/reference/load_data_packages.html b/docs/reference/load_data_packages.html
index 1e7afae..d9e69d5 100644
--- a/docs/reference/load_data_packages.html
+++ b/docs/reference/load_data_packages.html
@@ -1,5 +1,5 @@
-Read contents of data package(s) and return a tibble with a tibble for each data file. — load_data_packages • NPSutilsRead contents of data package(s) and return a list of tibbles list of tibbles based on the data file(s). Can use metadata to specify data types. — load_data_packages • NPSutils
@@ -17,7 +17,7 @@
NPSutils
- 0.3.1
+ 0.3.2
@@ -46,19 +46,26 @@
-
Read contents of data package(s) and return a tibble with a tibble for each data file.
+
Read contents of data package(s) and return a list of tibbles list of tibbles based on the data file(s). Can use metadata to specify data types.
`load_data_packages()` loads one to may data packages and returns a tibble of tibbles where each data package is a tibble and within that each data file is it's own tibble. `load_data_packages()` will only work with .csv data files and EML metadata. `load_data_packages()` can also utilize the metadata to assign attributes to each data column.
+
`load_data_packages()` loads one to many data packages and returns a list. If only one data package is loaded, the list will be a list of tibbles where each tibble is a data (.csv) file from the data package. If multiple data packages are loaded, the list will be a list of lists where each nested list contains a list of tibble and each tibble is a data file (.csv). See `simplify` below for details on handling these lists.
is a list of 6-7 digit numbers corresponding to the DataStore reference ID of the datapackage(s) to load. Alternatively, you can set `reference_id` to "load_all", which will load all the data packages in your /data folder.
+
the immediate directory/directories where your data packages reside. For data packages downloaded from DataStore using `get_data_package()` or `get_data_packages()` default settings, this is the DataStore reference ID for your data package(s). Alternatively, you can set `reference_id` to "`load_all`", which will load all the data packages in the directory specified in via `directory` (typically ./data).
directory
-
is the location of a folder, 'data' (created during `get_data_packages()`) which contains sub-directories where each sub-directory is the DataStore referenceId of the data package. Again, this file structure is all set up using `get_data_packages()`. Defaults to the current working directory (which is the default location for `get_data_packages()`).
+
is the location of a folder that contains all of the data packages (where data packages are a folder containing .csv data files and a single .xml EML metadata file). If these data packages were downloaded from DataStore using the default settings for `get_data_packages`, this folder is "./data" and you can use the default settings for `directory`.
assign_attributes
-
Logical. Defaults to FALSE. Data will be loaded using `readr::read_csv()` guessing algorithm for calling column types. If set to TRUE, column types will be set using metadata attributes via the yet-to-be written `load_metadata()` function. `r lifecycle::badge('experimental')`
+
Logical. Defaults to FALSE. Data will be loaded using `readr::read_csv()` guessing algorithm for calling column types. If you set to `assign_attributes = TRUE`, column types will be set using the data types specified in the metadata. Currently supported data types include string, dateTime, float, double, integer, and categorical (factor in R). This assignment is very stringent: for instance if you did not specify date-time formats using ISO-8601 notation (i.e. "YYYY", not "yyyy"), your data will import as NAs. If you have undefined missing values or blank cells, your data will not import at all. If you run into problems consider using the default settings and letting `read_csv` guess the column types.
simplify
-
Logical. Defaults to TRUE. If there is only a single data package loaded, the function will return a simple list of tibbles (where each tibble reflects a data file from within the data package). If set to FALSE, the function will return a list that contains a list of tibbles. This structure mirrors the object structure returned if multiple data packages are simultaneously loaded (a list of data packages with each data package containing a list of tibbles where each tibble corresponds to a data file in the given data package).
+
Logical. Defaults to TRUE. If `simplify = TRUE`, the function will return a list of tibbles where each tibble is a data file from the data package(s) specified. The tibbles are named using the following format: "pkg_<reference_id.filename" (without the filename extension). If you want to load each individual data file into R for further processing, use `simplify = TRUE` and then run `list2env(x, envir=.GlobalEnv)`. If you set `simplify = FALSE`, the object returned will either be a list of tibbles identical to that returned by `simplify = TRUE` (if only one data package is loaded) or will be a list of lists where each nested list is a contains one tibble for each data file in each data package.Setting `simplify = FALSE` may make it easier to do post-processing on a package-by-package level rather than a tibble-by-tibble level.
@@ -90,8 +97,7 @@
Value
Details
-
`r lifecycle::badge("experimental")`
-
currently `load_data_packages()` only supports EML metadata and .csv files. To take advantage of the default settings in load_data_packages, use the default settings in `get_data_package()` or `get_data_packages()`. Archived (.zip) files must be extracted before `load_data_packages()` will work properly. Again, `get_data_package()` or `get_data_packages()` will accomplish this for you.
+
currently `load_data_packages()` only supports EML metadata and .csv files. The reference_id
'
diff --git a/docs/reference/map_wkt.html b/docs/reference/map_wkt.html
index 22a6292..70771c7 100644
--- a/docs/reference/map_wkt.html
+++ b/docs/reference/map_wkt.html
@@ -17,7 +17,7 @@
NPSutils
- 0.3.1
+ 0.3.2
diff --git a/docs/reference/rm_local_packages.html b/docs/reference/rm_local_packages.html
index db5ee0a..ed0de13 100644
--- a/docs/reference/rm_local_packages.html
+++ b/docs/reference/rm_local_packages.html
@@ -17,7 +17,7 @@
NPSutils
- 0.3.1
+ 0.3.2
diff --git a/docs/reference/validate_data_package.html b/docs/reference/validate_data_package.html
index 314bd5b..84fe751 100644
--- a/docs/reference/validate_data_package.html
+++ b/docs/reference/validate_data_package.html
@@ -17,7 +17,7 @@
NPSutils
- 0.3.1
+ 0.3.2
diff --git a/docs/sitemap.xml b/docs/sitemap.xml
index 89b2506..80cfae3 100644
--- a/docs/sitemap.xml
+++ b/docs/sitemap.xml
@@ -10,9 +10,6 @@
/reference/check_is_data_package.html/reference/check_new_version.html/reference/check_ref_exists.html
-/reference/dot-get_authors.html
-/reference/dot-get_contacts.html
-/reference/dot-get_user_input.html/reference/get_data_packages.html/reference/get_new_version_id.html/reference/get_park_code.html
@@ -25,8 +22,8 @@
/reference/get_unit_info.html/reference/index.html/reference/load_core_metadata.html
-/reference/load_data_package.html/reference/load_data_packages.html
+/reference/load_data_package_deprecated.html/reference/load_domains.html/reference/load_pkg_metadata.html/reference/map_wkt.html
diff --git a/man/dot-get_authors.Rd b/man/dot-get_authors.Rd
deleted file mode 100644
index 606aec3..0000000
--- a/man/dot-get_authors.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/load_core_metadata.R
-\name{.get_authors}
-\alias{.get_authors}
-\title{Extracts authors and contact email addresses from EML metadata}
-\usage{
-.get_authors(metadata)
-}
-\arguments{
-\item{metadata}{an EML formatted R object}
-}
-\value{
-dataframe
-}
-\description{
-`.get_authors()` extracts the "creators" element from EML metadata and returns it as a dataframe with three columsn, first a column indicating that each row is an author. Second, and column with the author's name (first last). Third, the author's email address.
-}
-\details{
-`r lifecycle::badge('experimental')`
-
-There are some known issues with this function; unfortunately at this time we do not have example data packages to test them. These include: authors without a givenName, authors with more than two givenNames (e.g. multiple middle names), organizations as authors where there is no individualName.
-}
-\examples{
-\dontrun{
-authors <- get_authors(metadata)
-}
-}
-\keyword{private}
diff --git a/man/dot-get_contacts.Rd b/man/dot-get_contacts.Rd
deleted file mode 100644
index 401ce7e..0000000
--- a/man/dot-get_contacts.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/load_core_metadata.R
-\name{.get_contacts}
-\alias{.get_contacts}
-\title{Extracts contacts and email addresses from EML metadata}
-\usage{
-.get_contacts(metadata)
-}
-\arguments{
-\item{metadata}{an EML formatted R object}
-}
-\value{
-dataframe
-}
-\description{
-`.get_contacts()` extracts the "contacts" element from EML metadata and returns it as a dataframe with three columsn, first a column indicating that each row is an contact. Second, and column with the contact's name (first last). Third, the contact's email address.
-}
-\details{
-`r lifecycle::badge('experimental')`
-}
-\examples{
-\dontrun{
-contacts <- get_contacts(metadata)
-}
-}
-\keyword{private}
diff --git a/man/dot-get_user_input.Rd b/man/dot-get_user_input.Rd
deleted file mode 100644
index 9560c1d..0000000
--- a/man/dot-get_user_input.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/utils.R
-\name{.get_user_input}
-\alias{.get_user_input}
-\title{Get Binary User Input}
-\usage{
-.get_user_input()
-}
-\value{
-Factor. 1 or 2.
-}
-\description{
-Prompts for, gets, and returns binary user input (1 or 2)
-}
-\examples{
-\dontrun{
-var1 <- .get_user_input()
-}
-}
diff --git a/man/load_data_package.Rd b/man/load_data_package_deprecated.Rd
similarity index 59%
rename from man/load_data_package.Rd
rename to man/load_data_package_deprecated.Rd
index ff5ba7b..55fefad 100644
--- a/man/load_data_package.Rd
+++ b/man/load_data_package_deprecated.Rd
@@ -1,10 +1,10 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_data_package.R
-\name{load_data_package}
-\alias{load_data_package}
+\name{load_data_package_deprecated}
+\alias{load_data_package_deprecated}
\title{Read contents of data package and constructs a list of tibbles based on the data file(s)}
\usage{
-load_data_package(reference_id)
+load_data_package_deprecated(reference_id)
}
\arguments{
\item{reference_id}{is a 6-7 digit number corresponding to the reference ID of the data package.}
@@ -13,7 +13,10 @@ load_data_package(reference_id)
a list of one or more tibbles contained within the data package to the global environment.
}
\description{
-\code{load_data_package} reads the data file(s) from a package and loads it into a list of tibbles. Current implementation only supports .csv data files.
+`load_data_package_deprecated()` reads the data file(s) from a package and loads it into a list of tibbles. Current implementation only supports .csv data files.
+}
+\details{
+`r lifecycle::badge("deprecated")`
}
\examples{
\dontrun{
diff --git a/man/load_data_packages.Rd b/man/load_data_packages.Rd
index 5e2e58e..6fd978c 100644
--- a/man/load_data_packages.Rd
+++ b/man/load_data_packages.Rd
@@ -2,34 +2,40 @@
% Please edit documentation in R/load_data_packages.R
\name{load_data_packages}
\alias{load_data_packages}
-\title{Read contents of data package(s) and return a tibble with a tibble for each data file.}
+\alias{load_data_package}
+\title{Read contents of data package(s) and return a list of tibbles list of tibbles based on the data file(s). Can use metadata to specify data types.}
\usage{
load_data_packages(
reference_id,
- directory = here::here(),
+ directory = here::here("data"),
+ assign_attributes = FALSE,
+ simplify = TRUE
+)
+
+load_data_package(
+ reference_id,
+ directory = here::here("data"),
assign_attributes = FALSE,
simplify = TRUE
)
}
\arguments{
-\item{reference_id}{is a list of 6-7 digit numbers corresponding to the DataStore reference ID of the datapackage(s) to load. Alternatively, you can set `reference_id` to "load_all", which will load all the data packages in your /data folder.}
+\item{reference_id}{the immediate directory/directories where your data packages reside. For data packages downloaded from DataStore using `get_data_package()` or `get_data_packages()` default settings, this is the DataStore reference ID for your data package(s). Alternatively, you can set `reference_id` to "`load_all`", which will load all the data packages in the directory specified in via `directory` (typically ./data).}
-\item{directory}{is the location of a folder, 'data' (created during `get_data_packages()`) which contains sub-directories where each sub-directory is the DataStore referenceId of the data package. Again, this file structure is all set up using `get_data_packages()`. Defaults to the current working directory (which is the default location for `get_data_packages()`).}
+\item{directory}{is the location of a folder that contains all of the data packages (where data packages are a folder containing .csv data files and a single .xml EML metadata file). If these data packages were downloaded from DataStore using the default settings for `get_data_packages`, this folder is "./data" and you can use the default settings for `directory`.}
-\item{assign_attributes}{Logical. Defaults to FALSE. Data will be loaded using `readr::read_csv()` guessing algorithm for calling column types. If set to TRUE, column types will be set using metadata attributes via the yet-to-be written `load_metadata()` function. `r lifecycle::badge('experimental')`}
+\item{assign_attributes}{Logical. Defaults to FALSE. Data will be loaded using `readr::read_csv()` guessing algorithm for calling column types. If you set to `assign_attributes = TRUE`, column types will be set using the data types specified in the metadata. Currently supported data types include string, dateTime, float, double, integer, and categorical (factor in R). This assignment is very stringent: for instance if you did not specify date-time formats using ISO-8601 notation (i.e. "YYYY", not "yyyy"), your data will import as NAs. If you have undefined missing values or blank cells, your data will not import at all. If you run into problems consider using the default settings and letting `read_csv` guess the column types.}
-\item{simplify}{Logical. Defaults to TRUE. If there is only a single data package loaded, the function will return a simple list of tibbles (where each tibble reflects a data file from within the data package). If set to FALSE, the function will return a list that contains a list of tibbles. This structure mirrors the object structure returned if multiple data packages are simultaneously loaded (a list of data packages with each data package containing a list of tibbles where each tibble corresponds to a data file in the given data package).}
+\item{simplify}{Logical. Defaults to TRUE. If `simplify = TRUE`, the function will return a list of tibbles where each tibble is a data file from the data package(s) specified. The tibbles are named using the following format: "pkg_