From 6bae55aea161ff4fe848094907d398f370d8ef98 Mon Sep 17 00:00:00 2001 From: Socorro Dominguez Date: Tue, 25 Nov 2025 15:10:57 -0800 Subject: [PATCH 1/4] document from devtools --- man/plot.Rd | 5 ++--- man/sub-sub.Rd | 36 ++++++++++++++++++------------------ man/sub-subset.Rd | 28 ++++++++++++++-------------- man/sub.Rd | 26 ++++++++++++-------------- man/subset.Rd | 43 +++++++++++++++++++++---------------------- 5 files changed, 67 insertions(+), 71 deletions(-) diff --git a/man/plot.Rd b/man/plot.Rd index 0a7c4920..654b4fa3 100644 --- a/man/plot.Rd +++ b/man/plot.Rd @@ -3,13 +3,12 @@ \name{plot} \alias{plot} \alias{plot,sites-method} -\alias{plot,site,ANY-method} \alias{plot,site-method} \title{Plot site coordinates using a basic plot.} \usage{ -\S4method{plot}{sites,ANY}(x, y, ...) +\S4method{plot}{sites}(x, y, ...) -\S4method{plot}{site,ANY}(x, y, ...) +\S4method{plot}{site}(x, y, ...) } \arguments{ \item{x}{sites object} diff --git a/man/sub-sub.Rd b/man/sub-sub.Rd index 4bf61ab6..e7eeeaa5 100644 --- a/man/sub-sub.Rd +++ b/man/sub-sub.Rd @@ -3,43 +3,43 @@ % R/contact-methods.R, R/dataset-methods.R, R/publication-methods.R, % R/samples-methods.R, R/site-methods.R, R/speleothem-methods.R, % R/taxon-methods.R -\name{[[,chronologies,numeric,ANY-method} -\alias{[[,chronologies,numeric,ANY-method} -\alias{[[,collunits,numeric,ANY-method} +\name{[[,chronologies,numeric-method} +\alias{[[,chronologies,numeric-method} +\alias{[[,collunits,numeric-method} \alias{sub-sub,collunits-method} -\alias{[[,contacts,numeric,ANY-method} +\alias{[[,contacts,numeric-method} \alias{sub-sub,contacts-method} -\alias{[[,datasets,numeric,ANY-method} +\alias{[[,datasets,numeric-method} \alias{sub-sub,datasets-method} -\alias{[[,publications,numeric,ANY-method} +\alias{[[,publications,numeric-method} \alias{sub-sub,publications-method} -\alias{[[,samples,numeric,ANY-method} +\alias{[[,samples,numeric-method} \alias{sub-sub,samples-method} \alias{sub-sub} \alias{[[,sites,numeric-method} -\alias{[[,speleothems,numeric,ANY-method} +\alias{[[,speleothems,numeric-method} \alias{sub-sub,speleothems-method} -\alias{[[,taxa,numeric,ANY-method} +\alias{[[,taxa,numeric-method} \alias{sub-sub,taxa-method} \title{sub-sub} \usage{ -\S4method{[[}{chronologies,numeric,ANY}(x, i) +\S4method{[[}{chronologies,numeric}(x, i) -\S4method{[[}{collunits,numeric,ANY}(x, i) +\S4method{[[}{collunits,numeric}(x, i) -\S4method{[[}{contacts,numeric,ANY}(x, i) +\S4method{[[}{contacts,numeric}(x, i) -\S4method{[[}{datasets,numeric,ANY}(x, i) +\S4method{[[}{datasets,numeric}(x, i) -\S4method{[[}{publications,numeric,ANY}(x, i) +\S4method{[[}{publications,numeric}(x, i) -\S4method{[[}{samples,numeric,ANY}(x, i) +\S4method{[[}{samples,numeric}(x, i) -\S4method{[[}{sites,numeric,ANY}(x, i) +\S4method{[[}{sites,numeric}(x, i) -\S4method{[[}{speleothems,numeric,ANY}(x, i) +\S4method{[[}{speleothems,numeric}(x, i) -\S4method{[[}{taxa,numeric,ANY}(x, i) +\S4method{[[}{taxa,numeric}(x, i) } \arguments{ \item{x}{Neotoma2 nested object} diff --git a/man/sub-subset.Rd b/man/sub-subset.Rd index c6cbaf6e..83116846 100644 --- a/man/sub-subset.Rd +++ b/man/sub-subset.Rd @@ -2,36 +2,36 @@ % Please edit documentation in R/collunits-methods.R, R/dataset-methods.R, % R/publication-methods.R, R/samples-methods.R, R/site-methods.R, % R/speleothem-methods.R, R/taxon-methods.R -\name{[[<-,collunits,ANY,ANY-method} -\alias{[[<-,collunits,ANY,ANY-method} +\name{[[<-,collunits-method} +\alias{[[<-,collunits-method} \alias{sub-subset,collunits-method} -\alias{[[<-,datasets,ANY,ANY-method} +\alias{[[<-,datasets-method} \alias{sub-subset,datasets-method} -\alias{[[<-,publications,ANY,ANY-method} +\alias{[[<-,publications-method} \alias{sub-subset,publications-method} -\alias{[[<-,samples,ANY,ANY-method} +\alias{[[<-,samples-method} \alias{sub-subset,samples-method} \alias{sub-subset} \alias{[[<-,sites-method} -\alias{[[<-,speleothems,ANY,ANY-method} +\alias{[[<-,speleothems-method} \alias{sub-subset,speleothems-method} -\alias{[[<-,taxa,ANY,ANY-method} +\alias{[[<-,taxa-method} \alias{sub-subset,taxa-method} \title{sub-subset} \usage{ -\S4method{[[}{collunits,ANY,ANY}(x, i) <- value +\S4method{[[}{collunits}(x, i) <- value -\S4method{[[}{datasets,ANY,ANY}(x, i) <- value +\S4method{[[}{datasets}(x, i) <- value -\S4method{[[}{publications,ANY,ANY}(x, i) <- value +\S4method{[[}{publications}(x, i) <- value -\S4method{[[}{samples,ANY,ANY}(x, i) <- value +\S4method{[[}{samples}(x, i) <- value -\S4method{[[}{sites,ANY,ANY}(x, i) <- value +\S4method{[[}{sites}(x, i) <- value -\S4method{[[}{speleothems,ANY,ANY}(x, i) <- value +\S4method{[[}{speleothems}(x, i) <- value -\S4method{[[}{taxa,ANY,ANY}(x, i) <- value +\S4method{[[}{taxa}(x, i) <- value } \arguments{ \item{x}{\code{neotoma2} object} diff --git a/man/sub.Rd b/man/sub.Rd index 37ae0b8d..acbd47e5 100644 --- a/man/sub.Rd +++ b/man/sub.Rd @@ -1,36 +1,34 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/collunits-methods.R, R/dataset-methods.R, % R/site-methods.R, R/speleothem-methods.R, R/taxon-methods.R -\name{[,collunits,numeric,ANY-method} -\alias{[,collunits,numeric,ANY-method} +\name{[,collunits,numeric-method} +\alias{[,collunits,numeric-method} \alias{sub,collunits-method} -\alias{[,datasets,numeric,ANY-method} +\alias{[,datasets,numeric-method} \alias{sub,datasets-method} \alias{[} \alias{[,sites,numeric-method} -\alias{[,site,numeric,ANY-method} \alias{[,site,numeric-method} -\alias{[,site,character,ANY-method} \alias{[,site,character-method} -\alias{[,speleothems,numeric,ANY-method} +\alias{[,speleothems,numeric-method} \alias{sub,speleothems-method} -\alias{[,taxa,numeric,ANY-method} +\alias{[,taxa,numeric-method} \alias{sub,taxa-method} \title{sub} \usage{ -\S4method{[}{collunits,numeric,ANY}(x, i) +\S4method{[}{collunits,numeric}(x, i) -\S4method{[}{datasets,numeric,ANY}(x, i) +\S4method{[}{datasets,numeric}(x, i) -\S4method{[}{sites,numeric,ANY}(x, i) +\S4method{[}{sites,numeric}(x, i) -\S4method{[}{site,numeric,ANY}(x, i) +\S4method{[}{site,numeric}(x, i) -\S4method{[}{site,character,ANY}(x, i) +\S4method{[}{site,character}(x, i) -\S4method{[}{speleothems,numeric,ANY}(x, i) +\S4method{[}{speleothems,numeric}(x, i) -\S4method{[}{taxa,numeric,ANY}(x, i) +\S4method{[}{taxa,numeric}(x, i) } \arguments{ \item{x}{The \code{sites} object} diff --git a/man/subset.Rd b/man/subset.Rd index 273b885b..a06f5070 100644 --- a/man/subset.Rd +++ b/man/subset.Rd @@ -2,48 +2,47 @@ % Please edit documentation in R/collunits-methods.R, R/dataset-methods.R, % R/samples-methods.R, R/site-methods.R, R/speleothem-methods.R, % R/taxon-methods.R -\name{[<-,collunit,character,ANY-method} -\alias{[<-,collunit,character,ANY-method} +\name{[<-,collunit,character-method} +\alias{[<-,collunit,character-method} \alias{subset,collunit-method} -\alias{[<-,collunit,numeric,ANY-method} -\alias{[<-,dataset,character,ANY-method} +\alias{[<-,collunit,numeric-method} +\alias{[<-,dataset,character-method} \alias{subset,dataset-method} -\alias{[<-,dataset,numeric,ANY-method} -\alias{[<-,sample,character,ANY-method} +\alias{[<-,dataset,numeric-method} +\alias{[<-,sample,character-method} \alias{subset,sample-method} \alias{subset} \alias{[<-,site,character-method} -\alias{[<-,site,numeric,ANY-method} \alias{[<-,site,numeric-method} -\alias{[<-,speleothem,character,ANY-method} +\alias{[<-,speleothem,character-method} \alias{subset,speleothem-method} -\alias{[<-,speleothem,numeric,ANY-method} -\alias{[<-,taxon,character,ANY-method} +\alias{[<-,speleothem,numeric-method} +\alias{[<-,taxon,character-method} \alias{subset,taxon-method} -\alias{[<-,taxon,numeric,ANY-method} +\alias{[<-,taxon,numeric-method} \title{subset} \usage{ -\S4method{[}{collunit,character,ANY}(x, i) <- value +\S4method{[}{collunit,character}(x, i) <- value -\S4method{[}{collunit,numeric,ANY}(x, i) <- value +\S4method{[}{collunit,numeric}(x, i) <- value -\S4method{[}{dataset,character,ANY}(x, i) <- value +\S4method{[}{dataset,character}(x, i) <- value -\S4method{[}{dataset,numeric,ANY}(x, i) <- value +\S4method{[}{dataset,numeric}(x, i) <- value -\S4method{[}{sample,character,ANY}(x, i) <- value +\S4method{[}{sample,character}(x, i) <- value -\S4method{[}{site,character,ANY}(x, i) <- value +\S4method{[}{site,character}(x, i) <- value -\S4method{[}{site,numeric,ANY}(x, i) <- value +\S4method{[}{site,numeric}(x, i) <- value -\S4method{[}{speleothem,character,ANY}(x, i) <- value +\S4method{[}{speleothem,character}(x, i) <- value -\S4method{[}{speleothem,numeric,ANY}(x, i) <- value +\S4method{[}{speleothem,numeric}(x, i) <- value -\S4method{[}{taxon,character,ANY}(x, i) <- value +\S4method{[}{taxon,character}(x, i) <- value -\S4method{[}{taxon,numeric,ANY}(x, i) <- value +\S4method{[}{taxon,numeric}(x, i) <- value } \arguments{ \item{x}{A \code{neotoma2} object.} From 41ad5c7004d87b3c5d7a30533bf5db81f9a4a5a3 Mon Sep 17 00:00:00 2001 From: hoffmanick <134728053+hoffmanick@users.noreply.github.com> Date: Thu, 2 Apr 2026 09:21:03 -0500 Subject: [PATCH 2/4] Two changes 1. changed test_get_downloads to remove Rhode Island from test gpids. Some messed up data in RI. 2. Added in notes to collunit builder in parse_sites --- .RData | Bin 0 -> 2595 bytes R/parse_site.R | 1 + inst/doc/neotoma2-package.html | 5198 --------------------------- tests/testthat/Rplots.pdf | Bin 0 -> 5643 bytes tests/testthat/test_get_downloads.R | 3 +- 5 files changed, 3 insertions(+), 5199 deletions(-) create mode 100644 .RData delete mode 100644 inst/doc/neotoma2-package.html create mode 100644 tests/testthat/Rplots.pdf diff --git a/.RData b/.RData new file mode 100644 index 0000000000000000000000000000000000000000..06153a809bbde446c0794a89d13bf49b63a3ef88 GIT binary patch literal 2595 zcmV+;3f%P{iwFP!000000|5*Q^Yv0hSThP(3IG5A0{{dC0RRI9000001yxi=EjR!G z1Ofm60096500{s901PftVQyq^Z7y?VWn=&V01W^D0&)NVD5C%X018+75#iox+jG%b z@8Tz?CsZdzDp#B(>P;ALP2}WLr^?=Z>NQ+UaF33tah4ga?LgjbpaaVoq9HV*r4S|B z>h7kd)|vRi*DGGPk{UnlaNHDK?BBgH8np)53{3Uv5TcxqPyH~$toPl`+8z)fwAdVL z=sN`8(b<6Z$7mg&N|t9jG!ZV3Wy9@IsnUo?pk$fZU#DE%F#V4Cr1Z#OgW3;UcO{R8 zK0iG1<;FH+0#KmU{oq~m$3A%Xsm_wX2KR*so^ z`T1QJbBhf)-@1Q(&b(fs#sA~dBUQulRBS`)5W+}4(;d?6sh82~kp354BDlV}{G0o8 z3i&1*JV@WIpLWrM>akxrO5;x#|7;p%M>yx?)d@&-OlHg?m<9;s58VOq}ay$PMObRoR)9m=rIByZ?m!*|&pIj+q zLo)qYN-cCKDCclWY7Sq(7wA)cY3o4~dWqs2av1F@`9=q|_n|aidNWa#ccMbF@|3@| zDHqSDR}EW?1yAa&#F9Sq56u;R9wdA$aFem7yI^xigo@EDL5k9$^~&lwEu zGpIIO16Gp0yU`)K&x3+mdJ_I{?WFJG8E1{hhP|(`?lgkK^_rSvG39(6eSbyRH3szE z&X6JOM>Cs>6w&zM&VYM~*IxJAG^xMd$6K!@J!%8z&m&5VZF2u@RJ@FMVRBIQpzu3P;6#?Utytk9B0Az8Z*apv+&o0Dn-m?M zwZeG0Cg3TCb9^sX($LM6B%+FSNGL{7nG8-1BfRK^l`^h75x^Ie(loTdXULIbl)B=u zIgqM0A?#+Qj#MKI%#t%mJ0NQmG0dHZ&EOk;-ognc04Oe%F08bXu)HinHS$mNtuV^> zRHPU%Qn-Ef30ej_v5=<>KYZ#QxfJ*5e?t8pp8$56qr7Jt3s8jMZz34_MMxb(1`HCq zuw3>ESkiZy#sJDx2~;%$_2}UHbNWA~ywIzesVt^M*7Qa^Rgx7hz+l|n#S9HWQsBE) zwPJmADyS5F5=xRl>)AyQXIrr>V`9?V2a0HE7@r-J$}rs(Jnp6njHKs;~pj&jY&9B=}U!X&zZ zkT}hR7#jL|R?w6nTs$4<#FTGD=z`hNPr&_|DLnk24v?Yk2qN~{$Os1*JdFDnG#nS5 zf|5}-uz-8&BouEAp(qTG8ah|oRM0BnJZwDeKC~TAXcCXIuiyr9%goRD#o%gf8X{!W z)hNrpEew`|hCFS4*z%&f=I~u5e?)5ChFGP8N;>l&&hSVaxV&-TniPRKoWgVEY+%T9 z4SaU`SkOvnOd=r2=BTsHD~!ekRlNE4{L%1ZxXB_-=G3qqs$81+h8C^WWuIZJ}52U{j37o>pW6^S8@_@e=zB!_9O#UEI5Q> zXxd}jeUSVd+WcPZqm)>+UqaeV1_1kW?mYe=5>U#S9->dvOGs7k;tXR9Slp=m8d6^X z_<(+Q!&Fnk&FG>d&idnqOHe%Uj4X?7Idq`=7LpnjUSQtnc8td87ho?6BI0p7(5UZC z+{{A7k^G{I=dyqVN$|5GwZtI+b69Dm&wBYb<*T)$FXGf0V3c z&eD`W3Sgv>RZOOsksx@cW`c>Vh3J?CTkO43nmnV3CUR%bTX2-o>ReC8=*>>U_&4r~ zV!*n;)t(WVR%?R^P3;t|+*(Wy>lnzDP=O~HCNuFxoK#HZIAQ{RZuj4FVL{L;SH>eI ztpov8gtd`4BcGq)OZf4l1=e=NJY$T-C?!N>&%!T+M6jquaM?=-CQHj-e7ul^Lh9>L zV;^2QOR@@2>TC__RM?aOIloG4o^2pOBw&Wl`cmtuRJki%&lgKA6JE-vY>9A zoBqTVddnVWrev=i8Oth6QmBN>V<<~`Y$|3ICIp#ZMkmTfu za22oT@IRRWAK1t=A@&$lA_F7Xbw$6%VPdyK>d-eO#rZ%;tt;?+Zk#6S{K_U)mA~e{ zX$dJb${s{G$IQq9HJzQR@F&U%-cZ0X>h5Ielbf0df&FDTbm~~+e~73}88N{TDWe%V zoutr}fKp_Wlc&J+2l2`kqc(x#uhg>y)n!bL8E@h>qo{s|Ne0)m=rcLbzLnDhmhvUw z=Rn$9G^x%!NgMZG&g}PE^7`of+b=S%gE?N%RZh8{qnx>ADGiZet9R{dKY%jEt=?w# zQmgZ2jjfBMq8f90wAcWW`9V>EVe>s1=oBa_Eu!l-Dz`j+1c7#rw1%&J@WdO0&GmLK zm|q~a!no3q8RSU1duyJc2-{)$rDqqnW~}g^SGSaD04>5~{iy4By7r%4wHU zsD2?maBS7wh`l#nIfq1rt#DCB7!EpDRXcibx*cw2p2}#8+32c3=WPH00RA<5sz3}1 F001D^_0s?V literal 0 HcmV?d00001 diff --git a/R/parse_site.R b/R/parse_site.R index 97c7c09c..bac24b08 100644 --- a/R/parse_site.R +++ b/R/parse_site.R @@ -94,6 +94,7 @@ parse_site <- function(result, verbose = FALSE) { depositionalenvironment = use_na(y$depositionalenvironment, "char"), defaultchronology = use_na(y$defaultchronology, "int"), + notes = use_na(y$notes,"char"), speleothems = speleothems) do.call(build_collunits, cu_l) }) diff --git a/inst/doc/neotoma2-package.html b/inst/doc/neotoma2-package.html deleted file mode 100644 index 2c939b7d..00000000 --- a/inst/doc/neotoma2-package.html +++ /dev/null @@ -1,5198 +0,0 @@ - - - - - - - - - - - - - - -The neotoma2 R Package - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

The neotoma2 R Package

- - - -
-closeup of several Neotoma sites in the Caribbean. -
closeup of several Neotoma sites in the -Caribbean.
-
-
-

Neotoma Resources

-

The Neotoma Paleoecology -Database is a domain-specific data resource containing millions of -fossil records from around the globe, covering the last 5.4 million -years. The neotoma2 R package simplifies some of the data -structures and concepts to facilitate statistical analysis and -visualization. Users may wish to gain a deeper understanding of the -resource itself, or build more complex data objects and relationships. -For those users a partial list is provided here, including a table of -code examples focusing on different geographic regions, languages and -dataset types.

-
-

Resources

- -
-
-
-

Neotoma Data Structure

-
-Three panels showing context for Neotoma’s geographic representation of sites. In panel a a site is defined by the boundaries of a lake. The site also has a bounding box, and the core location is defined by a collection unit within the site that is defined with precise coordinates. In panel b a site is defined as a single point, for example, from a textual reference indicating the site is at the intersection of two roads. Here the site and collection unit share the unique point location. In panel c we show how that site location may be obfuscated using a bounding box as the site delimiter. In this case the collection unit would not be defined (but is represented as the triangle for illustration). Figure obtained from the Neotoma Database Manual. -
Three panels showing context for Neotoma’s -geographic representation of sites. In panel a a site is defined by the -boundaries of a lake. The site also has a bounding box, and the core -location is defined by a collection unit within the site that is defined -with precise coordinates. In panel b a site is defined as a single -point, for example, from a textual reference indicating the site is at -the intersection of two roads. Here the site and collection unit share -the unique point location. In panel c we show how that site location may -be obfuscated using a bounding box as the site delimiter. In this case -the collection unit would not be defined (but is represented as the -triangle for illustration). Figure obtained from the Neotoma Database -Manual.
-
-

Data in Neotoma is associated with sites, specific locations with -lat/long coordinates. Within a site, there may be one or more collection -units – locations at which samples are physically collected -within the site. For example, an archaeological site -may have one or more collection units, pits within a -broader dig site; a pollen sampling site on a lake may -have multiple collection units – core sites within the -lake basin. Collection units may have higher resolution GPS locations, -but are considered to be part of the broader site. Within a -collection unit data is collected at various -[analysis units] from which samples -are obtained.

-

Because Neotoma is made up of a number of constituent databases -(e.g., the Indo-Pacific Pollen Database, NANODe, FAUNMAP), a set of -samples associated with a collection -unit are assigned to a single dataset -associated with a particular dataset type (e.g., -pollen, diatom, vertebrate fauna) and constituent -database.

-
-Figure. The structure of sites, collection units and datasets within Neotoma. A site contains one or more collection units. Chronologies are associated with collection units. Data of a common type (pollen, diatoms, vertebrate fauna) are assigned to a dataset. -
Figure. The structure of -sites, collection units and datasets within Neotoma. A site contains one -or more collection units. Chronologies are associated with collection -units. Data of a common type (pollen, diatoms, vertebrate fauna) are -assigned to a dataset.
-
-

Researchers often begin by searching for sites within a particular -study area, whether that is defined by geographic or political -boundaries. From there they interrogate the available datasets for their -particular dataset type of interest. When they find records of interest, -they will then often call for the data and associated chronologies.

-

The neotoma2 R package is intended to act as the -intermediary to support these research activities using the Neotoma -Paleoecology Database. Because R is not a relational database, we needed -to modify the data structures of the objects. To do this the package -uses a set of S4 objects to represent different elements within the -database.

-
-A diagram showing the different major classes within the neotoma2 R package, and the way the elements are related to one another. Individual boxes represent the major classes (sites, site, collectionunits, etc.). Each box then has a list of the specific metadata contained within the class, and the variable type (e.g., siteid: integer). Below these are the functions that can be applied to the object (e.g., [[<-). -
A diagram showing the different major classes -within the neotoma2 R package, and the way the elements are -related to one another. Individual boxes represent the major classes -(sites, site, collectionunits, etc.). Each box then has a list of the -specific metadata contained within the class, and the variable type -(e.g., siteid: integer). Below these are the functions that can -be applied to the object (e.g., [[<-).
-
-

It is important to note, here and elsewhere: Almost -everything you will interact with is a sites -object. A sites object is the general currency of -this package. sites may have more or less metadata -associated with them, but they are the primary object, and, as you can -see in the diagram above, they have the most functions associated with -them.

-
-

Package Requirements

-

The earlier neotoma package tried to use base R as much -as possible. The neotoma2 package now draws primarily on -dplyr and purrr packages from the -tidyverse, and on the sf spatial data package. -The choice to integrate tidyverse packages was made largely -because of the current ubiquity of the tidyverse in R -education.

-
-
-
-

Site Searches

-

The highest level object in Neotoma is the site. -Sites have spatial coordinates and, in many cases, additional metadata -related to lake parameters, or other site-specific properties.

-

Sites can be searched using the get_sites() function, -or, can be created using the set_site() function. A single -site object is a special object in R, that can be combined -with other sites into a sites object. A sites -object is effectively a list() of site objects -with special methods for printing, plotting and exporting -information.

-
-

Finding Sites

-

All sites in Neotoma have a unique numeric identifier. With the -neotoma2 package you can search for a site using the -get_sites() function by its unique site id -(siteid), by name (sitename), by altitude -(altmin, altmax), by geopolitical name -(gpid), location (loc) or age bounds.

-

If we’re looking for a site and we know its specific identifier, we -can use the simplest implementation of get_sites(). Here we -are searching for a site (Alexander Lake), where we know that the siteid -for the record in Neotoma is 24. We can get these siteids -using the Neotoma -Explorer web application, or if we have some familiarity with the -site records already.

-
# Search for site by a single numeric ID:
-alex <- get_sites(24)
-alex
-#>  siteid       sitename      lat      long altitude
-#>      24 Alexander Lake 53.33333 -60.58333       73
-
-# Search for sites with multiple IDs using c():
-multiple_sites <- get_sites(c(24, 47))
-multiple_sites
-#>  siteid       sitename      lat      long altitude
-#>      24 Alexander Lake 53.33333 -60.58333       73
-#>      47        Liberty 43.52000 -90.78000      353
-

Once you search for a site, the neotoma2 R package makes -a call to the Neotoma Database, and returns a structured -sites object that contains metadata about the sites, and -some additional metadata about collection units and datasets at those -sites. This limited metadata helps speed up further searches, but is not -complete, for the purposes of analysis.

-
-The result of a hypothetical get_sites() call, is a sites object containing two individual site objects. Each site object contains a collunits object with some limited metadata. The top site appears to have two collection units, while the lower site has only a single collection unit. Each of the top two collection units appear to contain two datasets, while the bottom site has only the one collection unit with only one dataset. -
The result of a hypothetical -get_sites() call, is a sites object containing -two individual site objects. Each site object -contains a collunits object with some limited metadata. The -top site appears to have two collection units, while the -lower site has only a single collection unit. Each of the top two -collection units appear to contain two datasets, while the bottom site -has only the one collection unit with only one dataset.
-
-
-

Searching for Sites by Name

-

Often we do not know the particular siteid. If we’re -looking for a site and we know its name or a part of its name, we can -search using the function with the sitename argument, -get_site(sitename = 'XXX'), where 'XXX' is the -site name. This does not support multiple text strings (i.e., you can’t -use c()).

-
alex <- get_sites(sitename = "Alexander Lake")
-alex
-#>  siteid       sitename      lat      long altitude
-#>      24 Alexander Lake 53.33333 -60.58333       73
-

Neotoma uses a Postgres Database to manage data. Postgres uses the -% sign as a general wildcard, so we can use the -% in the sitename argument operator to help us -find sites when we’re not sure the exact match. Note that the search is -case insensitive so a search for alex% or -Alex% will return the same results.

-
alex <- get_sites(sitename = 'Alex%')
-alex
-#>  siteid           sitename      lat      long altitude
-#>      24     Alexander Lake 53.33333 -60.58333       73
-#>      25        Alexis Lake 52.51667 -57.03333      193
-#>    4478 Alexander [3CN117] 35.25000 -92.61667      180
-#>   26226     Alexandra Lake 43.29030 -74.16966      351
-

Since this new sites object has 4. elements that belong -to site, we may want to access only one of the objects, or -sets of metadata from our variable alex.

-
-
-

Searching for Sites by Age

-

There are several ways of searching for sites using age parameters. -These are represented below:

-
-Site searches using age parameters including ageof, ageyoung, ageold, maxage and minage. -
Site searches using age parameters including -ageof, ageyoung, ageold, -maxage and minage.
-
-

We offer several methods of searching because different users have -different requirements. A user might be only interested in one specific -point in time in the past, for example the 8.2ka event. In this instance -they would search get_sites(ageof = 8200). They may want -sites with records that completely span a time period, for example the -Atlantic chronozone of the Holocene: -get_sites(ageyounger = 5000, ageolder = 8000). These sites -would have samples both within and outside the defined age range, so -that the user could track change into and out of the time period. A user -may also be interested in any record within a time bin, regardless of -whether the site spans that time zone or not. They would query -get_sites(minage = 5000, maxage = 8000).

-

We can see how these age bounds differ:

-
# Note, we are using the `all_data = TRUE` flag here to avoid the default limit of 25 records, discussed below.
-# Because these queries are searching through every record they are slow and and are not
-# run in knitting this vignette.
-get_sites(ageof = 8200, all_data = TRUE) %>% length()
-get_sites(ageyounger = 5000, ageolder = 8000, all_data = TRUE) %>% length()
-get_sites(minage = 5000, maxage = 8000, all_data = TRUE) %>% length()
-

It is possible to pass all parameters (ageof, -minage, maxage, ageyounger, . . . -), but it is likely that these will conflict and result in an empty set -of records. To avoid this, be aware of the relationships among these -search parameters, and how they might affect your search window.

-
-
-
-

Accessing sites metadata

-

Although the sites are structured using S4 objects (see -Hadley Wickham’s S4 -documentation), we’ve added helper functions to make accessing -elements easier for users.

-

The alex object is composed of several smaller objects -of class site. We can call any individual site using -[[ ]], placing the index of the desired object between the -brackets. Then we can also call the particular variable we want using -the $ symbol.

-
alex <- get_sites(sitename = "Alexander Lake")
-alex[[1]]$siteid
-#> [1] 24
-

The elements within a site are the same as the defined -columns within the Neotoma ndb.sites -table, with the exception of the collunits slot, which -contains the collection units and associated datasets that are found -within a site. You can see all the site slots using the -names() function. You can select individual elements of a -site, and you can assign values to these parameters:

-
names(alex[[1]])
-#> [1] "siteid"       "sitename"     "geography"    "altitude"     "geopolitical"
-#> [6] "area"         "notes"        "description"  "collunits"
-
-# Modify a value using $<- assignment:
-alex[[1]]$area
-#> [1] NA
-alex[[1]]$area <- 100
-alex[[1]]$area
-#> [1] 100
-
-# Modify a value using [<- assignment:
-alex[[1]]["area"] <- 30
-alex[[1]]$area
-#> [1] 30
-# alex[[1]][7] <- 30  This fails because the `Notes` field expects a character string.
-

Using assignment, we can add information programmatically, for -example, by working interactively with a digital elevation model or -hydrographic data to obtain lake area measurements. Although not -currently implemented, the goal is to support direct upload of updated -information by users.

-
-
-

Creating a Site

-

As explained above, a site is the fundamental unit of -the Neotoma Database. If you are working with your own data, you might -want to create a site object to allow it to interact with -other data within Neotoma. You can create a site with the -set_site() function. It will ask you to provide important -information such as sitename, lat, and -long attributes.

-
my_site <- set_site(sitename = "My Lake", 
-                    geography = st_sf(a = 3, st_sfc(st_point(1:2))), 
-                    description = "my lake", 
-                    altitude = 30)
-my_site
-#>    siteid sitename lat long altitude
-#>  35960541  My Lake   2    1       30
-

If we have a set of sites that we are analyzing, we can add the new -site to the set of sites, either by appending it to the end, using -c(), or by replacing a particular element using -[[<-.

-

This method allows us to begin modifying site information for -existing sites if we have updated knowledge about site properties.

-
# Add a new site that's been edited using set_site()
-longer_alex <- c(alex, my_site)
-# Or replace an element within the existing list of sites
-# with the newly created site.
-longer_alex[[2]] <- my_site
-
-# Or append to the `sites` list with assignment:
-longer_alex[[3]] <- my_site
-

We can also use set_sites() as a tool to update the -metadata associated with an existing site object:

-
# Update a value within an existing `sites` object:
-longer_alex[[3]] <- set_site(longer_alex[[3]],
-altitude = 3000)
-longer_alex
-
-
-
-

Datasets

-

If you need to get to a deeper level of the sites object, you may -want to look at the get_datasets() function. You can use -get_datasets() using search parameters, or you can use it -on an existing sites object, such as our prior -alex dataset.

-

get_datasets() adds additional metadata to the -site objects, letting us know which -datasettypes are associated with a site, and the dataset -sample locations at the site.

-
-Using get_datasets() provides more complete metadata about a record, including the addition of chronological information, and more complete metadata about the datasets, compared to the get_sites() call, shown above. The objects here are the same as above, but now have chronology metadata, and contact metadata for the records. Note that there is still no sample or taxonomic information about these records. This comes from the get_downloads() function. -
Using get_datasets() provides more -complete metadata about a record, including the addition of -chronological information, and more complete metadata about the -datasets, compared to the get_sites() call, shown above. -The objects here are the same as above, but now have chronology -metadata, and contact metadata for the records. Note that there is still -no sample or taxonomic information about these records. This comes from -the get_downloads() function.
-
-

Getting the datasets by id is the easiest call, you can also pass a -vector of IDs or, if you already have a sites object, you -can pass a sites object.

-
# Getting datasets by ID
-my_datasets <- get_datasets(c(5, 10, 15, 20))
-my_datasets
-#>  siteid                   sitename       lat      long altitude
-#>       5                       17/2  55.25000 -74.93333      300
-#>      10 Site 1 (Cohen unpublished)  30.83000 -82.33000       36
-#>      15                    Aguilar -23.83333 -65.75000     3828
-#>      20                   Akuvaara  69.12326  27.67406      159
-

You can also retrieve datasets by type directly from the API.

-
# Getting datasets by type
-my_pollen_datasets <- get_datasets(datasettype = "pollen", limit = 25)
-my_pollen_datasets
-#>  siteid                            sitename       lat       long altitude
-#>       7                     Three Pines Bog  47.00000  -80.11667      329
-#>       8                 Abalone Rocks Marsh  33.95639 -119.97667        9
-#>       9                              Adange  43.30556   41.33333     2065
-#>      11        Konus Exposure, Adycha River  67.75000  135.58333      137
-#>      12                       Ageröds Mosse  55.93329   13.42559       47
-#>      13                     Aguas Calientes -23.08333  -67.40000     4233
-#>      14                   Aguas Calientes 2 -23.50000  -67.58333     4198
-#>      15                             Aguilar -23.83333  -65.75000     3828
-#>      16                           Ahlenmoor  53.69908    8.74688        5
-#>      17                               Ajata -18.25000  -69.20000     4773
-#>      18                    South Soefje Bog  29.60000  -97.51694      100
-#>      19             Akulinin Exposure P1282  47.11667  138.55000      367
-#>      20                            Akuvaara  69.12326   27.67406      159
-#>      21 Alazeya River Exposure, 8 m Terrace  68.50000  154.50000       50
-#>      22 Alazeya River Exposure, 9 m Terrace  64.33333  154.50000      125
-#>      24                      Alexander Lake  53.33333  -60.58333       73
-#>      25                         Alexis Lake  52.51667  -57.03333      193
-#>      27                          Aliuk Pond  54.58333  -57.36667        9
-#>      29                          Lake Allie  44.80156  -94.55982      320
-#>      30                         Almora Lake  46.20611  -95.29361      437
-#>      31                           Alut Lake  60.13667  152.31278      488
-#>      32                             Amarete -15.23333  -68.98333     3755
-#>      33             Amba River Exposure 596  43.31667  131.81667        0
-#>      68     Amguema River Valley Exposure 1  67.75000  178.70000      493
-#>      69     Amguema River Valley Exposure 2  67.66667  178.60000      376
-

It can be computationally intensive to obtain the full set of records -for sites or datasets. By default the -limit for all queries is 25. The default -offset is 0. To capture all results we can use -the all_data = TRUE flag in our calls. -However, this is hard on the Neotoma servers. We tend -to prefer that users use all_data = TRUE once their -analytic workflow is mostly complete.

-

We can use that all_data = TRUE in R in the following -way:

-
allSites_dt <- get_sites(datasettype = "diatom")
-allSites_dt_all <- get_sites(datasettype = "diatom", all_data = TRUE)
-
-# Because we used the `all_data = TRUE` flag, there will be more sites
-# in allSites_dt_all, because it represents all sites containing diatom datasets.
-length(allSites_dt_all) > length(allSites_dt)
-
-

Spatial Searches

-

You can get the coordinates to create a GeoJson bounding box from here, or you can use -pre-existing objects within R, for example, country-level data within -the spData package:

-

Accessing datasets by bounding box:

-
brazil <- '{"type": "Polygon", 
-            "coordinates": [[
-                [-73.125, -9.102],
-                [-56.953, -33.138],
-                [-36.563, -7.711],
-                [-68.203, 13.923],
-                [-73.125, -9.102]
-              ]]}'
-
-# We can make the geojson a spatial object if we want to use the
-# functionality of the `sf` package.
-brazil_sf <- geojsonsf::geojson_sf(brazil)
-
-
-brazil_datasets <- get_datasets(loc = brazil_sf)
-

Now we have an object called brazil_datasets that -contains 19.

-

You can plot these findings!

-
plotLeaflet(brazil_datasets)
-
- -
-
-
-

Filtering Records

-

Sometimes we take a large number of records, do some analysis, and -then choose to select a subset. For example, we may want to select all -sites in a region, and then subset those by dataset type. If we want to -look at only the geochronological datasets from Brazil, we can start -with the set of records returned from our get_datasets() -query, and then use the filter function in -neotoma2 to select only those datasets that are -geochronologic:

-

-brazil_dates <- neotoma2::filter(brazil_datasets,
-                                   datasettype == "geochronologic")
-
-# or:
-brazil_dates <- brazil_datasets %>%
-    neotoma2::filter(datasettype == "geochronologic")
-
-# With boolean operators:
-brazil_space <- brazil_datasets %>% neotoma2::filter(lat > -18 & lat < -16)
-

The filter() function takes as the first argument, a -datasets object, followed by the criteria we want to use to filter. -Current supported criteria includes:

-
    -
  • lat
  • -
  • long
  • -
  • elev
  • -
  • datasettype
  • -
-

You also need to make sure that you accompany any of these terms with -the following boolean operators: <, > or -==, !=. datasettype has to be of -type string, while the other terms must be numeric. If you need to -filter by the same argument, let’s say, you need to filter -“geochronologic” and “pollen data types, then you will also make use of -& and | operators.

-
-
-

Sample and Taxonomic data

-

Once we have the set of records we wish to examine, we then want to -recover the actual sample data. This will provide us with information -about the kinds of elements found at the site, within the dataset, their -sample ages, and their counts or measurements. To do this we use the -get_downloads() call. Note, as before, we are returning a -sites objects, but this time with the most complete -metadata.

-
-Using get_downloads() returns a sites object, but one that contains dataset objects with filled samples slots. The samples slot is often very large relative to the other metadata associated with sites, and so it is commonly held back until a direct request is provided. Helper functions at the sites level can pull out sample data once get_downloads() has been called. -
Using get_downloads() returns a -sites object, but one that contains dataset objects with -filled samples slots. The samples slot is -often very large relative to the other metadata associated with -sites, and so it is commonly held back until a direct -request is provided. Helper functions at the sites level -can pull out sample data once get_downloads() -has been called.
-
-

Assuming we continue with our example from Brazil, we want to extract -records from the country, filter to only pollen records with samples -covering the last 10,000 years, and then look at the relative frequency -of taxa across sites. We might do something like this:

-
brazil <- '{"type": "Polygon", 
-            "coordinates": [[
-                [-73.125, -9.102],
-                [-56.953, -33.138],
-                [-36.563, -7.711],
-                [-68.203, 13.923],
-                [-73.125, -9.102]
-              ]]}'
-
-# We can make the geojson a spatial object if we want to use the
-# functionality of the `sf` package.
-brazil_sf <- geojsonsf::geojson_sf(brazil)
-
-brazil_records <- get_datasets(loc = brazil_sf, all_data=TRUE) %>%
-    neotoma2::filter(datasettype == "pollen" & age_range_young <= 1000 & age_range_old >= 10000) %>%
-    get_downloads()
-
-
-count_by_site <- samples(brazil_records) %>%
-  dplyr::filter(elementtype == "pollen" & units == "NISP") %>%
-  group_by(siteid, variablename) %>%
-  summarise(n = n()) %>%
-  group_by(variablename) %>%
-  summarise(n = n()) %>%
-  arrange(desc(n))
-#> `summarise()` has grouped output by 'siteid'. You can override using the
-#> `.groups` argument.
-

In this code chunk we define the bounding polygon for our sites, -filter by time and dataset type, and then return the full records for -those sites. We get a sites object with dataset and sample -information (because we used get_downloads()). We execute -the samples() function to extract all the samples from the -sites objects, and then filter the resulting -data.frame to pull only pollen (a pollen dataset may -contain spores and other elements that are not, strictly speaking, -pollen) that are counted using the number of identified specimens (or -NISP). We then group_by() the unique site identifiers -(siteid) and the taxa (variablename) to get a -count of the number of times each taxon appears in each site. We then -want to summarize() to a higher level, just trying to -understand how many sites each taxon appears in. After that we -arrange() so that the records show the most common taxa -first in the resulting variable count_by_site.

-
-
-

Publications

-

Many Neotoma records have publications associated with them. The -publication object (and the publications -collection) provide the opportunity to do this. The publication -table in Neotoma contains an extensive number of fields. The methods for -publications in the neotoma2 package provide -us with tools to retrieve publication data from Neotoma, to set and -manipulate publication data locally, and to retrieve publication data -from external sources (e.g., using a DOI).

-
-

get_publications() from Neotoma

-

The most simple case is a search for a publication based on one or -more publication IDs. Most people do not know the unique publication ID -of individual articles, but this provides a simple method to highlight -the way Neotoma retrieves and presents publication information.

-
-

Get Publication By ID

-

We can use a single publication ID or multiple IDs. In either case -the API returns the publication(s) and creates a new -publications object (which consists of multiple individual -publications).

-
one <- get_publications(12)
-two <- get_publications(c(12, 14))
-

From there we can then then subset and extract elements from the list -using the standard [[ format. For example:

-
two[[2]]
-

Will return the second publication in the list, corresponding to the -publication with publicationid 14 in this case.

-
- -
-

Create (or Import) New Publications

-

Just as we can use the set_sites() function to set new -site information, we can also create new publication information using -set_publications(). With set_publications() -you can enter as much or as little of the article metadata as you’d -like, but it’s designed (in part) to use the CrossRef API to return -information from a DOI.

-
new_pub <- set_publications(
-articletitle = "Myrtle Lake: a late- and post-glacial pollen diagram from northern Minnesota",
-journal = "Canadian Journal of Botany",
-volume = 46)
-

A publication has a large number of slots that can be -defined. These may be left blank, they may be set directly after the -publication is defined:

-
new_pub@pages <- "1397-1410"
-
-
-
-
-

Workshops and Code Examples

-
    -
  • 2022 International AL/IPA Meeting; Bariloche, Argentina
  • -
  • English -Language Simple Workflow
  • -
  • Topics: Simple search, climate gradients, stratigraphic -plotting
  • -
  • Spatial Domain: South America
  • -
  • Dataset Types: Diatoms
  • -
  • Spanish -Language Simple Workflow
  • -
  • Topics: Simple search, climate gradients, stratigraphic -plotting
  • -
  • Spatial Domain: South America
  • -
  • Dataset Types: Diatoms
  • -
  • English -Language Complex Workflow
  • -
  • Topics: Chronology building, Bchron
  • -
  • Spatial Domain: South America
  • -
  • Dataset Types: Diatoms
  • -
  • Spanish -Language Complex Workflow
  • -
  • Topics: Chronology building, Bchron
  • -
  • Spatial Domain: South America
  • -
  • Dataset Types: Diatoms
  • -
  • 2022 European Pollen Database Meeting; Prague, Czech Republic
  • -
  • English -Language Simple Workflow
  • -
  • Topics: Simple search, climate gradients, stratigraphic plotting, -taxonomic harmonization
  • -
  • Spatial Domain: Europe/Czech Republic
  • -
  • Dataset Types: Pollen
  • -
  • English -Language Complex Workflow
  • -
  • Topics: Chronology building, Bchron
  • -
  • Spatial Domain: Europe/Czech Republic
  • -
  • Dataset Types: Pollen
  • -
  • 2022 American Quaternary Association Meeting
  • -
  • English -Language Simple Workflow
  • -
  • Topics: Simple search, climate gradients, stratigraphic -plotting
  • -
  • Spatial Domain: North America
  • -
  • Dataset Types: Pollen
  • -
  • English -Language Complex Workflow
  • -
  • Topics: Chronologies
  • -
  • Spatial Domain: North America
  • -
  • Dataset Types: Pollen
  • -
  • Neotoma-charcoal Workshop, Göttingen, Germany. Authors: Petr Kuneš -& Thomas Giesecke
  • -
  • English -Language Workflow
  • -
  • Topics: Simple Search, PCA, DCA, Charcoal/Pollen Correlation
  • -
  • Spatial Domain: Global/Czech Republic
  • -
  • Dataset Types: Pollen, Charcoal
  • -
-
- - - - - - - - - - - diff --git a/tests/testthat/Rplots.pdf b/tests/testthat/Rplots.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a5ebbe97169d5a00049435fccbdc8d5068c69da0 GIT binary patch literal 5643 zcma)AcUV*T(gs;jqEe+P!Vy6cfsoL92|Xgc6C{KHk&r+Ny>|h*6s1U)CU_B%-c_2O0pwkR&7)?*f30kS=HWII^3l?jhaI0or_ z7(CVdHKVX4>PN!VyJ+t3auUr&eUqVBmLLU<$0{`b!i?60HaOcYbytm?2FYsR(!Fqk=nM*FRbEB zl45%HU7Ia9UC>@z#~z*wzXnj%R%&7diOCmbG3W)`#Mc6qDK1l-J0kdsB0BA4E!UCj z5WOR2SFg^Vv)tl4i}9+K*cdkTB72^_fv?CC8ooFT^mZ^2zgmzg&ELOy@VYtP|Mt$? zv%4bw?d@(&u^8sZuQwwLNo52Bvf5nkZq?h?oKtxH1mhS zx{*O$8CU}Qos~E}7Fvvp?P7MH?)m7^VuMu>?DW4bpjEe|92aL#n@nAIXH!7T&1)+l zy6XfgYu`MmyK>wAcHO8(uW#zYb2ma8<9tbX-V;&P7PT8pauy}6r%NpMUi&gUyPsYL zyjX|WFEu|}Fb_??er4#oOy@XpWD{O43MW7AuO;Zk4fdE98(k;eG_FgwzL6mAs4UO* zdWc#4!MQE5QSpz2H10Hw=`ZEFfo|B>w#A*38{4%Se9Q&8vayU&yk*)27os8WTU|*f zl4&QmUxq)gxw${)3)=HdOV4|$CT*XT`LsE8L!dIfcmx|?_x|O_q5YZVoTIuIDbJxATMX|MwW36w9y+-C5Pn`X=$GyX@qz7F4^KaV| zyl_U7u|BN>8Oe?OACvq|28ugPc9q}zg=eZieO11~z~D{SD292K%>a@Ow>v+zjC1gi zd{{W?P=UOb>uzlC?q=>9RIPSCMO?-{_h5cj1>QOFHG;9JH9gx^WGKnW%h&glrmY4T zfoxOqD*mRAcw;C#QuRqAlPB}hazX9j9O={^EY-{LEGI~@^mqW>*`N~&lC68sh3*t4 zt@-f5v!+VgRD*rg12@yy^rb($gAb;DkoTJAf@z46C`$GEqY(U2$K?MkBR}id|5}zH z68~3SI+XZZVUm)Q_sh!4-?Jmi>z%dxKEIz*_A+vs^O)DV>*_SChWhe3`At40-zz7?Ss9|Hrl!v;rCUZb z<11#IQKB|(>TR;JYemxZyKX9h49BmXd{VBY2;&IxW(X;0cM3)G)jqTo8*mi;^4UsP zC89`1d(-RQO48NzjlpY)x9Un|ybAo`Wj9XXW4Kc*Gp~~}{Wo`Xn=HFpq%&>G7dgVQ zozs5hJJa&{|3bW@!(I&)7sB$XIg+Yr3`9Z@|NhFH2S0f$tU**Q1>}+J{YtXG=txe4 zvP*u*a%mdIU(U3+-J*8*OzK&=z@v_z&aRA*7+o0y$}&CPw#Qw5v@LW+al=o6DQL_f zDXF}~TuDs1xGCs1S_r3wVcony zEDP2mA*I6|d}?W@g`wY|8fl;pDNJfzd8TH*h*FYJ-l@YWuGAC=<_DI&JzUyeb-=h{@3);X2Si`h&s?6g zoNM~AwT{vX`f{1p)8SR+*R}(mZN&}d>24kQeXZ}(69A%0o0k&XDM?tZJ z_8}oaU~LM?0!m6?Z72Yt^7tW5gSC7}I;JFQL!qQZJ&s}$izk?RAW?@1m}sJ-0Z9K8 z{|~WKA*wxFupSJifh3}x02!(j;%BQsefs&O>QrvVQH_8#(Owu7ntCYsDuw>Re~>s- zT3+VA{j*Sg`jf=7Q@zbh$2g<=T4Y;{l38MJ^}D&69ML`5qMIZx0%2s)f1F4oAfsvK zp=q2{d<-f)9Z~>GsuYNgq&2Qw{*6a-*wf?GnN&Tea-OMX-?g61DgK=xx7?P#Ntd<2 z(MFoYgK+w%SF9LAPUKsinpy0Kxf$o;aFq7MIt?9`X4<2wN`rG(`$)!-fH(2j4E9~F zmPRJv>%Pd4YWx$LAGx<0tOX!6M(Od3Uv|grc(`83DO+X<)fv7>uaz?BmC)}MunvP- zq!7P|+6HWApqb@2pb<(u-NfEm^!oh7#krgEY_0K04m@0o$95Z_trt+?B)urLPW*f& zPry|f8sX##@dcu(6IKkBEDQ}3oAafa)_rkpeGqK*s*k;!xuf@$jX0ZNNes^gJJYC} zDeQ4GeSHRtaUHKDm}GCBpM|{1;IZbNA<#JY9ax|8b?12`p!h~kUQWVY%)M23I2dt| znY`nbY9A52%A?7@+rDiXlyQ>gVKKU9Y%kg!!$E^srZsB%7Tx0jrip#jN@fc5T>!1#DpE3HrMSE_`}*>E>ke*U&dq|Zi*Trvp)vzNs=`xR?Gd)_uH#$>5ixi$Nc8$0GSRul}M`zaGeUN z$>Y8iR_<`|9s@a#RVrM@gPRnJnwF9~DjwSH@Y@8fi3C$l$fb4AD>asg$mc8^Ceb7S z_MG=oq*daXc{+L+s}b+5V?r^O04%e>6~vmdHHux`rjaWI7E zqv9b6+`(6PFK|>Hsp6?RGn617$rkUNp z(<`j<2>$gF^5_AzR(8freKEpH|d^Q|JpeW8D^b4s)$W)?PM z94ovp>>`XZizqWhn>GxShqafim*h^uB#Y_$O)y2(c-QdiOB}JW+4|M`Ir=wWdU9Mk zn}242?!(7T=pJ&9yr*a`W^QQSx^!VgX5?~N#mK@v-+P}&^oQS$Xb-AW&UdqySb%?h=Yjkk*%h1cg zvO$G{EwjuAEBV=FEwKHF?LsE=E(4vsD%fISFDeSuo|4pi&uJ4m?umXA{C0M)WzUpBf#DnjJwrJ|-O0~UqV1>J5mDcwDsIN#bc~nb zCi5smEhL#FdS!9at!^4Jp0d`?gARnAQ>d3v1IbBuzp)cCRkCo%eaIPTbkEsW^b1Wr z(PI6aS2;D}7~;-cVH--xXvd-xYzD=`#g2kyDp~tAi7N$ox1%YePE`vJg_L*7f*y zXrOvw9=-aAI&)Z0nAQ{Ja~`@5&$knsHcl=cU-XU)>ksK)$=`Vz-<}#7$e+N-e7u4& zm>bD%%rejx(&+FaES0`OphcitU`;baQ}3={0Y?G*-OPe(T|Qm6x^z+Hj+UMFu|qNJ z&&XYOBpWh5wl$cxE?BFUK)_VNf_Ieo#qGAKn1LgPmWVB_s zgM0QG8}tfFt3S2}%>;c6jiC$dN$BYjyCa4!_qbn3{^IcTN%xdb@X%x@_CCI(E{*R< zv|uz}aOUpHR=aw%|Gyq4R0q23BHWMG6cn;A*}an=xjiVflCaFDa@wc5uB)hI&cAzY zhkr=ff@t^NXN>Xbjl1l*R~K@#RBn@(K7O1ZLbbsC*0OFW6U)KP_;qam*v4W&U-rkH zPpZD}whk(f_i`oJJ0U(b&U=&+9|z^l9o^?wU^vU57|k2q2v$U_^5n<{%*O8D99w1EcVJ5>xj6%32P!(VBLJqU`y=&ot-m5Qr{{O zOMtSKz3DCtnW z{KIbl)%=)uo6iAMfyS$GPT!5VF)O)yE7K;1As{wtP=%tN1_OL8~}DCps9isO(cpko{~Ud zCk(|D5knaT#Bq3%6WSR76Aqn3QCfd27WoVAO2)Y$31oLHl1u`?co#el?MAVNJUp35 zZQT*S?CcJk{hyuq?-4j01;D0cN7CUAj|xI4{W#K{;u2*%IE>5hGP-E27n+1YAwl4u zaRClP`s Date: Thu, 2 Apr 2026 10:34:14 -0500 Subject: [PATCH 3/4] chronname param Adding in a chronname param to samples to enable selection of the particular chronology you want --- R/02_genericDefinitions.R | 5 +- R/samples.R | 308 +++++++++++++++++++--------------- tests/testthat/test_samples.R | 3 +- 3 files changed, 180 insertions(+), 136 deletions(-) diff --git a/R/02_genericDefinitions.R b/R/02_genericDefinitions.R index af406817..978e6aaa 100644 --- a/R/02_genericDefinitions.R +++ b/R/02_genericDefinitions.R @@ -42,9 +42,8 @@ setGeneric("showMatch", function(x) { #' @param x sites object #' @returns data.frame with record information at sample level #' @export -setGeneric("samples", function(x) { - standardGeneric(f = "samples") - }) +setGeneric("samples", + function(object, chronname=NULL) standardGeneric("samples")) #' @title Obtain speleothems from a record or multiple records. #' @param x sites object diff --git a/R/samples.R b/R/samples.R index ee1e0e5d..f6c27254 100644 --- a/R/samples.R +++ b/R/samples.R @@ -3,13 +3,10 @@ #' @author Simon Goring \email{goring@wisc.edu} #' @param x sites object #' @description Obtain all samples within a sites object -#' @examples { -#' tryCatch({ +#' @examples \dontrun{ +#' # Get full data download from API and create a long table with samples data. #' dw <- get_downloads(1) #' pollen <- samples(dw) -#' }, error = function(e) { -#' message("Neotoma server not responding. Try again later.") -#' }) #' } #' @importFrom dplyr bind_rows left_join rename mutate #' @importFrom purrr map @@ -17,147 +14,194 @@ #' @md #' @export setMethod(f = "samples", - signature = "sites", - definition = function(x) { - output <- map(x@sites, function(y) samples(y)) %>% - bind_rows() # %>% - # Handle NAs to allow distinct to work properly - #distinct(.data$sampleid, .keep_all = TRUE) - if (nrow(output) == 0) { - warnsite <- sprintf("No assigned samples. Did you run get_downloads()?") - warning(warnsite) - } - return(output) - } + signature = "sites", + definition = function(object,chronname = NULL) { + output <- map(object@sites, function(y) samples(y, chronname = chronname)) %>% + bind_rows() # %>% + # Handle NAs to allow distinct to work properly + #distinct(.data$sampleid, .keep_all = TRUE) + if (nrow(output) == 0) { + warnsite <- sprintf("No assigned samples. Did you run get_downloads()?") + warning(warnsite) + } + return(output) + } ) #' @rdname samples #' @export setMethod(f = "samples", - signature = "site", - definition = function(x) { - allids <- getids(x) - siteinfo <- as.data.frame(x) %>% - left_join(allids, by = "siteid") - sampset <- map(x@collunits@collunits, - function(y) samples(y)) %>% - bind_rows() %>% - bind_rows() %>% - left_join(siteinfo, by = "datasetid") %>% - rename(sitenotes = .data$notes) - return(sampset) - } + signature = "site", + definition = function(object,chronname = NULL) { + allids <- getids(object) + siteinfo <- as.data.frame(object) %>% + left_join(allids, by = "siteid") + sampset <- map(object@collunits@collunits, + function(y) samples(y, chronname = chronname)) %>% + bind_rows() %>% + bind_rows() %>% + left_join(siteinfo, by = "datasetid") %>% + rename(sitenotes = .data$notes) + return(sampset) + } ) #' @rdname samples #' @export setMethod(f = "samples", - signature = "collunits", - definition = function(x) { - map(x@collunits, function(x) samples(x)) %>% - bind_rows() - } + signature = "collunits", + definition = function(object, chronname = NULL) { + map(object@collunits, function(object) samples(object, chronname=chronname)) %>% + bind_rows() + } ) #' @rdname samples #' @export +#' setMethod(f = "samples", - signature = "collunit", - definition = function(x) { - precedence <- c("Calendar years BP", - "Calibrated radiocarbon years BP", - "Radiocarbon years BP", "Varve years BP") - ids <- getids(x) - # Check the chronologies to make sure everything is okay: - if (length(chronologies(x)) > 0) { - # This pulls the chronology IDs, then applies the Neotoma - # age model precedence (see get_table('agetypes')). - # It returns a value that is larger when your age reporting is - # better. - defaultchron <- map(chronologies(x)@chronologies, - function(y) { - data.frame(chronologyid = y@chronologyid, - isdefault = y@isdefault, - modelagetype = y@modelagetype, - chronologyname = y@chronologyname, - dateprepared = y@dateprepared) - }) %>% - bind_rows() %>% - mutate(modelrank = match(.data$modelagetype, rev(precedence)), - order = .data$isdefault * match(.data$modelagetype, - rev(precedence))) - # Validation of default chrons, we want to check whether there - # exists either multiple default chronologies for the same - # time-frame or, alternately, no default chronology. - all_na <- all(is.na(defaultchron$order)) - max_order <- max(defaultchron$order, na.rm = TRUE) - if (sum(defaultchron$order == max_order, na.rm = TRUE) > 1) { - if (any(is.na(defaultchron$dateprepared))) { - high_chron <- defaultchron$order == max_order - newmax_order <- which.max(defaultchron$chronologyid[high_chron]) - defaultchron$order[high_chron][newmax_order] <- max_order + 1 - } else { - newmax_order <- which.max(defaultchron$dateprepared[ - defaultchron$order == max_order]) - defaultchron$order[defaultchron$order == max_order][ - newmax_order] <- max_order + 1 - } - } - if (all_na == TRUE) { - warnsite <- sprintf("The dataset %s has no default chronologies.", - ids$datasetid[1]) - warning(warnsite) - } else if (sum(defaultchron$order == max_order, na.rm = TRUE) > 1) { - warnsite <- sprintf("The dataset %s has multiple default chronologies. - Chronology %s has been used.", ids$datasetid[1], - defaultchron$chronologyid[ - which.max(defaultchron$order)]) - warning(warnsite) - defaultchron <- defaultchron[which.max(defaultchron$order), ] - } else { - defaultchron <- defaultchron[which.max(defaultchron$order), ] - } - } else { - defaultchron <- data.frame(chronologyid = NULL) - } - sampset <- map(datasets(x)@datasets, - function(y) { - dsid <- y$datasetid - allsamp <- - map(y@samples@samples, - function(z) { - whichage <- - which(z@ages$chronologyid == - defaultchron$chronologyid) - if (length(whichage) == 0) { - whichage <- 1 - } - if (dim(z@datum)[1] > 0) { - df <- - data.frame(z@ages[whichage,], - z@datum, - analysisunitid = z@analysisunitid, - sampleanalyst = - toString(unique(unlist( - z@sampleanalyst, - use.names = FALSE))), - sampleid = z@sampleid, - depth = z@depth, - thickness = z@thickness, - samplename = z@samplename, - row.names = NULL) - } else { - df <- data.frame() - } - return(df) + signature = "collunit", + definition = function(object, chronname) { + chron_exists <- FALSE + precedence <- c("Calendar years BP", + "Calibrated radiocarbon years BP", + "Radiocarbon years BP", "Varve years BP") + ids <- getids(object) + # Check the chronologies to make sure everything is okay: + if (length(chronologies(object)) > 0) { + # This pulls the chronology IDs, then applies the Neotoma + # age model precedence (see get_table('agetypes')). + # It returns a value that is larger when your age reporting is + # better. + defaultchron <- map(chronologies(object)@chronologies, + function(y) { + data.frame(chronologyid = y@chronologyid, + isdefault = y@isdefault, + modelagetype = y@modelagetype, + chronologyname = y@chronologyname, + dateprepared = y@dateprepared) + }) %>% + bind_rows() %>% + mutate(modelrank = match(.data$modelagetype, rev(precedence)), + order = .data$isdefault * match(.data$modelagetype, + rev(precedence))) + #print(defaultchron) + #print(unique(defaultchron$modelagetype)) + #print(unique(defaultchron$isdefault)) + # Validation of default chrons, we want to check whether there + # exists either multiple default chronologies for the same + # time-frame or, alternately, no default chronology. + all_na <- all(is.na(defaultchron$order)) + #print(all_na) + #print(defaultchron$order) + max_order <- max(defaultchron$order, na.rm = TRUE) + if (sum(defaultchron$order == max_order, na.rm = TRUE) > 1) { + if (any(is.na(defaultchron$dateprepared))) { + high_chron <- defaultchron$order == max_order + newmax_order <- which.max(defaultchron$chronologyid[high_chron]) + defaultchron$order[high_chron][newmax_order] <- max_order + 1 + } else { + newmax_order <- which.max(defaultchron$dateprepared[ + defaultchron$order == max_order]) + defaultchron$order[defaultchron$order == max_order][ + newmax_order] <- max_order + 1 + } + } + if (all_na == TRUE) { + warnsite <- sprintf("The dataset %s has no default chronologies.", + ids$datasetid[1]) + warning(warnsite) + + } + + chron_exists <- !is.null(chronname) && + chronname %in% defaultchron$chronologyname + + if (all_na == TRUE) { + + warnsite <- sprintf("The dataset %s has no default chronologies.", + ids$datasetid[1]) + warning(warnsite) + + } else if (chron_exists) { + + # chronology explicitly requested and exists + matches <- defaultchron$chronologyname == chronname + + if (sum(matches, na.rm = TRUE) == 1) { + defaultchron <- defaultchron[matches, ] + } + + if (sum(matches, na.rm = TRUE) > 1) { + defaultchron <- defaultchron[matches, ][1,] + warnsite <- sprintf( + "The dataset %s has multiple chronologies matching chronname %s. Chronology %s has been used.", + ids$datasetid[1], chronname, defaultchron$chronologyid) + warning(warnsite) + } + + } else { + + # either no chronname OR chronname invalid + defaultchron <- defaultchron[which.max(defaultchron$order), ] + + if (!is.null(chronname)) { + warnsite <- sprintf( + "The dataset %s has no chronology matching chronname %s. Default chronology %s has been used.", + ids$datasetid[1], chronname, defaultchron$chronologyid) + warning(warnsite) + } + + } + } else { + defaultchron <- data.frame(chronologyid = NULL) + } + #print(paste0("default chron: ", defaultchron)) + sampset <- map(datasets(object)@datasets, + function(y) { + dsid <- y$datasetid + allsamp <- + map(y@samples@samples, + function(z) { + whichage <- + which(z@ages$chronologyid == + defaultchron$chronologyid) + if (length(whichage) == 0) { + if (chron_exists) { + whichage <- NA + } else { + whichage <- 1 + } + } + if (dim(z@datum)[1] > 0) { + df <- + data.frame(z@ages[whichage,], + z@datum, + analysisunitid = z@analysisunitid, + sampleanalyst = + toString(unique(unlist( + z@sampleanalyst, + use.names = FALSE))), + sampleid = z@sampleid, + depth = z@depth, + thickness = z@thickness, + samplename = z@samplename, + row.names = NULL) + #print(z$ages) + } else { + df <- data.frame() + } + return(df) + }) %>% + bind_rows() %>% + mutate(datasetid = dsid) + return(allsamp) }) %>% - bind_rows() %>% - mutate(datasetid = dsid) - return(allsamp) - }) %>% - bind_rows() %>% - left_join(as.data.frame(datasets(x)), by = "datasetid") %>% - rename(datasetnotes = .data$notes) - return(sampset) - } + bind_rows() %>% + left_join(as.data.frame(datasets(object)), by = "datasetid") %>% + rename(datasetnotes = .data$notes) + if (!is.null(chronname) && sum(defaultchron$chronologyname == chronname, na.rm = TRUE) != 0) { + sampset = sampset %>% dplyr::filter(!is.na(chronologyid))} + return(sampset) + } ) \ No newline at end of file diff --git a/tests/testthat/test_samples.R b/tests/testthat/test_samples.R index e8fd9911..d469b3f5 100644 --- a/tests/testthat/test_samples.R +++ b/tests/testthat/test_samples.R @@ -86,4 +86,5 @@ test_that("Samples of all sites has the same nrow as samples of each site combin all_df <- nrow(samples(dl)) testthat::expect_equal(all_df, df1 + df2 + df3) -}) \ No newline at end of file +}) + From 34dd0ad787bb2dfb52e2fe3f93e6904eaf032b71 Mon Sep 17 00:00:00 2001 From: hoffmanick <134728053+hoffmanick@users.noreply.github.com> Date: Thu, 2 Apr 2026 13:43:21 -0500 Subject: [PATCH 4/4] gpids added in a statement to automatically paste a vector of GPIDs as a string when that's in the argument --- R/get_sites.R | 5 ++++- tests/testthat/Rplots.pdf | Bin 5643 -> 5643 bytes 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/R/get_sites.R b/R/get_sites.R index bee10f6a..62323168 100644 --- a/R/get_sites.R +++ b/R/get_sites.R @@ -139,6 +139,9 @@ get_sites.default <- function(...) { cl <- as.list(match.call()) cl[[1]] <- NULL cl <- lapply(cl, eval, envir = parent.frame()) + if ("gpid" %in% names(cl)) { + cl$gpid = paste(cl$gpid,collapse=",") + } if ("siteid" %in% names(cl)) { # redirect to numeric method if ("all_data" %in% names(cl)) { @@ -159,7 +162,7 @@ get_sites.default <- function(...) { on.exit(options(oo)) baseURL <- paste0("data/sites") result <- tryCatch( - parseURL(baseURL, ...), + do.call(parseURL, c(list(baseURL), cl)), error = function(e) { stop("API call failed: ", e$message) NULL diff --git a/tests/testthat/Rplots.pdf b/tests/testthat/Rplots.pdf index a5ebbe97169d5a00049435fccbdc8d5068c69da0..c2fb1a90afa07be830248d9362bdd463696b89c2 100644 GIT binary patch delta 27 dcmeCy>DHNG$!2J5Y-(gU(OwM7+8Dh-6aZ$B2U!3B delta 27 dcmeCy>DHNG$!1__XlQCa(OwM7+8Dh-6aZ&H2WbER