Compare commits

..

8 Commits

20 changed files with 2853 additions and 130 deletions

View File

@ -1,3 +1,5 @@
^renv$
^renv\.lock$
^.*\.Rproj$ ^.*\.Rproj$
^\.Rproj\.user$ ^\.Rproj\.user$
^\.github$ ^\.github$

10
.Rprofile Normal file
View File

@ -0,0 +1,10 @@
options(
renv.settings.snapshot.type = "explicit",
renv.config.auto.snapshot = TRUE,
renv.config.pak.enabled = TRUE
)
source("renv/activate.R")
if (interactive()) {
suppressMessages(require(usethis))
}

View File

@ -1,6 +1,6 @@
Package: stRoke Package: stRoke
Title: Clinical Stroke Research Title: Clinical Stroke Research
Version: 23.9.1 Version: 24.2.1
Authors@R: Authors@R:
person("Andreas Gammelgaard", "Damsbo", , "agdamsbo@clin.au.dk", role = c("aut", "cre"), person("Andreas Gammelgaard", "Damsbo", , "agdamsbo@clin.au.dk", role = c("aut", "cre"),
comment = c(ORCID = "0000-0002-7559-1154")) comment = c(ORCID = "0000-0002-7559-1154"))
@ -17,7 +17,7 @@ BugReports: https://github.com/agdamsbo/stRoke/issues
License: GPL-3 License: GPL-3
Encoding: UTF-8 Encoding: UTF-8
Roxygen: list(markdown = TRUE) Roxygen: list(markdown = TRUE)
RoxygenNote: 7.2.3 RoxygenNote: 7.3.1
LazyData: true LazyData: true
Suggests: Suggests:
covr, covr,
@ -25,13 +25,18 @@ Suggests:
knitr, knitr,
rmarkdown, rmarkdown,
spelling, spelling,
testthat (>= 3.0.0) usethis,
Language: en-US testthat,
git2r,
pak,
here,
rhub
Config/testthat/edition: 3 Config/testthat/edition: 3
Imports: Imports:
calendar, calendar,
dplyr, dplyr,
ggplot2, ggplot2,
glue,
grDevices, grDevices,
gtsummary, gtsummary,
lubridate, lubridate,
@ -39,7 +44,10 @@ Imports:
rankinPlot, rankinPlot,
stats, stats,
tidyr, tidyr,
utils utils,
tibble,
tidyselect
Depends: Depends:
R (>= 2.10) R (>= 2.10)
VignetteBuilder: knitr VignetteBuilder: knitr
Language: en-US

View File

@ -11,10 +11,12 @@ export(cpr_check)
export(cpr_dob) export(cpr_dob)
export(cpr_female) export(cpr_female)
export(ds2dd) export(ds2dd)
export(ds2ical)
export(files_filter) export(files_filter)
export(generic_stroke) export(generic_stroke)
export(index_plot) export(index_plot)
export(label_select) export(label_select)
export(mfi_domains)
export(n_chunks) export(n_chunks)
export(pase_calc) export(pase_calc)
export(quantile_cut) export(quantile_cut)

12
NEWS.md
View File

@ -1,3 +1,15 @@
# stRoke 24.2.1
### Functions:
* NEW: `ds2ical()` converts data set to ical format with easy glue string for summary and description. Export .ics file with `calendar::ic_write()`.
* UPDATE: `pase_calc()` updated for uniform column naming in output as well as streamlining the function a bit.
* UPDATE: `add_padding()` updated to include option to add leading and/or tailing string with `lead` or `tail`.
* NEW: `mfi_calc()` calculates domain scores from the MFI questionnaire. Takes data frame of 20 ordered as the questionnaire. Default is to reverse questions with reverse scoring.
# stRoke 23.9.1 # stRoke 23.9.1
### Functions: ### Functions:

View File

@ -4,13 +4,15 @@
#' @param length final string length #' @param length final string length
#' @param after if padding should be added after as opposed to default before #' @param after if padding should be added after as opposed to default before
#' @param pad padding string of length 1 #' @param pad padding string of length 1
#' @param lead leading string for all. Number or character vector. Cycled.
#' @param tail tailing string for all. Number or character vector. Cycled.
#' #'
#' @return vector or character strings of same length. #' @return vector or character strings of same length.
#' @export #' @export
#' #'
#' @examples #' @examples
#' add_padding(sample(1:200,5)) #' add_padding(sample(1:200,5),tail="AA",lead=c(2,3,"e"))
add_padding <- function(d,length=NULL,after=FALSE,pad="0"){ add_padding <- function(d,length=NULL,after=FALSE,pad="0",lead=NULL,tail=NULL){
if (!is.vector(d)) { if (!is.vector(d)) {
stop("Please supply vector") stop("Please supply vector")
} }
@ -31,8 +33,11 @@ add_padding <- function(d,length=NULL,after=FALSE,pad="0"){
paste(rep(pad,i),collapse="")})) paste(rep(pad,i),collapse="")}))
if (after) { if (after) {
paste0(d,ps) out <- paste0(d,ps)
} else { } else {
paste0(ps,d) out <- paste0(ps,d)
} }
paste0(lead,out,tail)
} }

65
R/ds2ical.R Normal file
View File

@ -0,0 +1,65 @@
#' Convert data set to ical file
#'
#' @param data data set
#' @param start dplyr style event start datetime column name. Data or datetime
#' object.
#' @param end dplyr style event end datetime column name. Data or datetime
#' object.
#' @param location dplyr style event location column name
#' @param summary.glue.string character string to pass to glue::glue() for event
#' name (summary). Can take any column from data set.
#' @param description.glue.string character string to pass to glue::glue() for
#' event description. Can take any column from data set.
#'
#' @return tibble of class "ical"
#' @export
#'
#' @examples
#' df <- dplyr::tibble(
#' start = c(Sys.time(), Sys.time() + lubridate::days(2)),
#' id = c("1", 3), assessor = "A", location = "111", note = c(NA, "OBS")
#' ) |>
#' dplyr::mutate(end = start + lubridate::hours(2))
#' df |> ds2ical()
#' df |> ds2ical(summary.glue.string = "ID {id} [{assessor}] {note}")
#' # Export .ics file: (not run)
#' ical <- df |> ds2ical(start, end, location,
#' description.glue.string = "{note}")
#' # ical |> calendar::ic_write(file=here::here("calendar.ics"))
ds2ical <- function(data,
start = start,
end = end,
location = NULL,
summary.glue.string = "ID {id} [{assessor}]",
description.glue.string = NULL) {
ds <- data |>
dplyr::mutate(
SUMMARY = glue::glue(summary.glue.string, .na = ""),
DTSTART = {{ start }},
DTEND = {{ end }},
LOCATION = {{ location }}
)
if (!is.null(description.glue.string)) {
ds <- dplyr::mutate(ds,
DESCRIPTION = glue::glue(
description.glue.string,
.na = ""
)
)
}
ds |>
dplyr::select(tidyselect::any_of(c("SUMMARY",
"DTSTART",
"DTEND",
"LOCATION",
"DESCRIPTION"))) |>
(\(x){
x |>
dplyr::mutate(UID = replicate(nrow(x), calendar::ic_guid()))
})() |>
dplyr::filter(!is.na(DTSTART)) |>
# dplyr::filter(dplyr::if_any(DTSTART, Negate(is.na))) |>
calendar::ical()
}

79
R/mfi_calc.R Normal file
View File

@ -0,0 +1,79 @@
#' Title
#'
#' @param d data frame or tibble
#' @param var numeric vector of indices of columns to reverse
#'
#' @return data.frame or tibble depending on input
#'
#' @examples
#' # rep_len(sample(1:5),length.out = 100) |> matrix(ncol=10) |> multi_rev(2:4)
multi_rev <- function(d, var){
# Forcing and coercing to numeric
dm <- d |> as.matrix() |>
as.numeric()|>
matrix(ncol=ncol(d)) |>
data.frame()
# Reversing everything (fast enough not to subset)
dr <- range(dm,na.rm=TRUE) |> sum()-dm
# Inserting reversed scores in correct places
for (i in var){
dm[i] <- dr[i]
}
if (tibble::is_tibble(d)){
tibble::tibble(dm)
} else {
dm
}
}
#' MFI domain score calculator
#'
#' @param ds data set of MFI scores, 20 columns
#' @param reverse.vars variables/columns to reverse
#' @param reverse reverse scoring
#'
#' @return tibble of domain scores
#' @export
#'
#' @examples
#' mfi_mess <- data.frame(matrix(
#' sample(c(" 1. ", "2. -A", "3.", " 4 ", "5.", NA),200,replace=TRUE),ncol=20))
#' mfi_mess |> mfi_domains()
mfi_domains <-
function(ds,
reverse = TRUE,
reverse.vars = c(2, 5, 9, 10, 13, 14, 16, 17, 18, 19)) {
if(ncol(ds)!=20){
stop("The supplied dataset should only contain the 20 MFI subscores")}
# Subscore indexes
indexes <- list(
data.frame(grp="gen", ndx=c(1, 5, 12, 16)),
data.frame(grp="phy", ndx=c(2, 8, 14, 20)),
data.frame(grp="act", ndx=c(3, 6, 10, 17)),
data.frame(grp="mot", ndx=c(4, 9, 15, 18)),
data.frame(grp="men", ndx=c(7, 11, 13, 19))
) |> dplyr::bind_rows() |> dplyr::arrange(ndx)
# Removes padding and converts to numeric
ds_n <- ds |> dplyr::mutate_if(is.factor, as.character) |>
dplyr::mutate(dplyr::across(tidyselect::everything(),
# Removes everything but the leading alphanumeric character
# Data should be cleaned accordingly
~str_extract(d=.,pattern="[[:alnum:]]")))
# Assumes reverse scores are not correctly reversed
if (reverse){ds_n <- ds_n |> multi_rev(var=reverse.vars)}
# Domain wise summations
split.default(ds_n, factor(indexes$grp)) |>
lapply(function(x){
apply(x, MARGIN = 1, sum, na.ignore=FALSE)
}) |> dplyr::bind_cols()
}

View File

@ -3,6 +3,7 @@
#' Calculates PASE score from raw questionnaire data. #' Calculates PASE score from raw questionnaire data.
#' @param ds data set #' @param ds data set
#' @param adjust_work flag to set whether to include 10b type 1. #' @param adjust_work flag to set whether to include 10b type 1.
#' @param consider.missing character vector of values considered missing.
#' Default is TRUE. #' Default is TRUE.
#' #'
#' @return data.frame #' @return data.frame
@ -27,10 +28,16 @@
#' #'
#' @examples #' @examples
#' summary(pase_calc(stRoke::pase)[,13]) #' summary(pase_calc(stRoke::pase)[,13])
#' str(pase_calc(stRoke::pase))
#' #'
pase_calc <- function(ds, adjust_work = FALSE) { pase_calc <- function(ds,
adjust_work = FALSE,
consider.missing = c("Not available")) {
if (ncol(ds) != 21) stop("supplied data set has to contain exactly 21 columns") if (ncol(ds) != 21) {
stop("supplied data set has to contain exactly 21 columns.
Formatting should follow the stRoke::pase data set.")
}
pase <- ds pase <- ds
@ -41,10 +48,20 @@ pase_calc <- function(ds, adjust_work = FALSE) {
pase <- do.call(data.frame, lapply(pase, as.character)) pase <- do.call(data.frame, lapply(pase, as.character))
## Missings and incompletes ## Missings and incompletes
# Cosidered missing if all data is missing
missings <- apply(apply(ds, 2, is.na), 1, all) missings <- apply(apply(ds, 2, is.na), 1, all)
# Considered incomplete if any entry in main answers is missing
mains <- grep("([0-9]{2}|(09[a-d]))$",colnames(pase))
if (length(mains)!=13){
stop("The supplied dataset does not contain expected variable names.
Please run str(stRoke::pase) and format your data accordingly.")
}
incompletes <- incompletes <-
apply(sapply(ds[, c(1, 3, 5, 7, 9, 11, 13:20)], function(x) { apply(sapply(ds[, mains], function(x) {
x == "Not available" | is.na(x) x %in% consider.missing | is.na(x)
}), 1, any) }), 1, any)
names(pase) <- c( names(pase) <- c(
@ -78,7 +95,7 @@ pase_calc <- function(ds, adjust_work = FALSE) {
## PASE 2-6 ## PASE 2-6
pase_weigths <- list( pase_weights <- list(
"1" = c( "1" = c(
"1" = 0.11, "1" = 0.11,
"2" = 0.32, "2" = 0.32,
@ -104,37 +121,45 @@ pase_calc <- function(ds, adjust_work = FALSE) {
pase_score_26 <- lapply(seq_along(pase_list[2:6]), function(x) { pase_score_26 <- lapply(seq_along(pase_list[2:6]), function(x) {
df <- pase_list[2:6][[x]] df <- pase_list[2:6][[x]]
score <- c() # score <- c()
## ===================== ## =====================
## Checking labelling ## Checking labelling
if (!all(range(suppressWarnings(as.numeric(substr( if (!all(stRoke::str_extract(df[, 1], "^[0-3]") |>
df[, 1], 1, 1 as.numeric() |>
))), na.rm = TRUE) == c(0, 3))) { range(na.rm = TRUE) == c(0, 3))) {
stop("Labelling of 02-06 should start with a number ranging 1-4") stop("Labelling of 02-06 should start with a number ranging 0-3")
} }
if (!all(stRoke::str_extract(df[, 2], "^[1-4]") |>
as.numeric() |>
range(na.rm = TRUE) == c(1, 4))) {
stop("Labelling of 02-06 subscores should start with a number ranging 1-4")
}
## ===================== ## =====================
for (i in seq_len(nrow(df))) { ## Extracting the first string element in main entry
# Setting categories from numbers n1 <- stRoke::str_extract(df[, 1],"^[0-3]") |> as.numeric()
n1 <- suppressWarnings(as.numeric(substr(df[, 1][i], 1, 1))) ## Extracting the first string element in subentry
n2 <- stRoke::str_extract(df[, 2],"^[1-4]") |> as.numeric()
# Using if statement to calculate row wise score <- c()
if (n1 %in% c(1:3)) { for (i in seq_along(n1)) {
# Second category
n2 <- suppressWarnings(as.numeric(substr(df[, 2][i], 1, 1)))
score[i] <- pase_weigths[[n1]][n2] * pase_multip_26[x]
} else if (n1 %in% 0) { ind1 <- match(n1[i],seq_along(pase_weights))
score[i] <- 0
if (is.na(ind1)){
score[i] <- n1[i]
} else { } else {
score[i] <- NA score[i] <- pase_weights[[ind1]][n2[i]] * pase_multip_26[x]
} }
} }
score score
}) })
names(pase_score_26) <- paste0("score_", names(pase_list[2:6])) names(pase_score_26) <- paste0("pase_score_", names(pase_list[2:6]))
## PASE 7-9d ## PASE 7-9d
pase_multip_79 <- c(25, 25, 30, 36, 20, 35) pase_multip_79 <- c(25, 25, 30, 36, 20, 35)
@ -147,7 +172,7 @@ pase_calc <- function(ds, adjust_work = FALSE) {
) * pase_multip_79)) ) * pase_multip_79))
names(pase_score_79) <- names(pase_score_79) <-
paste0("score_", sub("pase", "", names(pase_score_79))) paste0("pase_score_", sub("pase","",names(pase_score_79)))
## PASE 10 ## PASE 10
## Completely ignores if 10b is not completed ## Completely ignores if 10b is not completed
@ -157,15 +182,15 @@ pase_calc <- function(ds, adjust_work = FALSE) {
# Only includes work time if 10b is != 1 # Only includes work time if 10b is != 1
pase_score_10[substr(pase_list[[10]][[3]],1,1) == "1"] <- 0 pase_score_10[substr(pase_list[[10]][[3]],1,1) == "1"] <- 0
# Consequently consider "Not available" in 10b as incomplete # Consequently consider "Not available" in 10b as incomplete
incompletes[ds[,21] == "Not available" & !incompletes & !is.na(incompletes)] <- TRUE incompletes[ds[,21] %in% consider.missing & !incompletes & !is.na(incompletes)] <- TRUE
} }
pase_score <- cbind(pase_score_26, pase_score_79, pase_score_10) pase_score <- cbind(pase_score_26, pase_score_79, pase_score_10)
data.frame( data.frame(
pase_score, pase_score,
score_sum = rowSums(pase_score, na.rm = TRUE), pase_score_sum = rowSums(pase_score, na.rm = TRUE),
score_missings = missings, pase_score_missings = missings,
score_incompletes = incompletes pase_score_incompletes = incompletes
) )
} }

View File

@ -1,15 +1,14 @@
Andreas Andreas
CMD
Changelog Changelog
Codecov Codecov
DDMMYY DDMMYY
DOI DOI
DataDictionary
Gammelgaard Gammelgaard
Github Github
Kraglund Kraglund
Labelling Labelling
METACRAN METACRAN
MFI
NA's NA's
OLR OLR
ORCID ORCID
@ -43,11 +42,13 @@ colouring
cpr cpr
cprs cprs
daDoctoR daDoctoR
datetime
ddmmyy ddmmyy
ddmmyyxxxx ddmmyyxxxx
difftime difftime
dk dk
doi doi
dplyr
ds ds
eeptools eeptools
eg eg
@ -82,6 +83,7 @@ sapply
stackoverflow stackoverflow
strsplit strsplit
teppo teppo
tibble
vapply vapply
vec vec
winP winP

View File

@ -4,7 +4,14 @@
\alias{add_padding} \alias{add_padding}
\title{Add padding to string} \title{Add padding to string}
\usage{ \usage{
add_padding(d, length = NULL, after = FALSE, pad = "0") add_padding(
d,
length = NULL,
after = FALSE,
pad = "0",
lead = NULL,
tail = NULL
)
} }
\arguments{ \arguments{
\item{d}{vector of strings or numbers} \item{d}{vector of strings or numbers}
@ -14,6 +21,10 @@ add_padding(d, length = NULL, after = FALSE, pad = "0")
\item{after}{if padding should be added after as opposed to default before} \item{after}{if padding should be added after as opposed to default before}
\item{pad}{padding string of length 1} \item{pad}{padding string of length 1}
\item{lead}{leading string for all. Number or character vector. Cycled.}
\item{tail}{tailing string for all. Number or character vector. Cycled.}
} }
\value{ \value{
vector or character strings of same length. vector or character strings of same length.
@ -22,5 +33,5 @@ vector or character strings of same length.
Add padding to string Add padding to string
} }
\examples{ \examples{
add_padding(sample(1:200,5)) add_padding(sample(1:200,5),tail="AA",lead=c(2,3,"e"))
} }

51
man/ds2ical.Rd Normal file
View File

@ -0,0 +1,51 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds2ical.R
\name{ds2ical}
\alias{ds2ical}
\title{Convert data set to ical file}
\usage{
ds2ical(
data,
start = start,
end = end,
location = NULL,
summary.glue.string = "ID {id} [{assessor}]",
description.glue.string = NULL
)
}
\arguments{
\item{data}{data set}
\item{start}{dplyr style event start datetime column name. Data or datetime
object.}
\item{end}{dplyr style event end datetime column name. Data or datetime
object.}
\item{location}{dplyr style event location column name}
\item{summary.glue.string}{character string to pass to glue::glue() for event
name (summary). Can take any column from data set.}
\item{description.glue.string}{character string to pass to glue::glue() for
event description. Can take any column from data set.}
}
\value{
tibble of class "ical"
}
\description{
Convert data set to ical file
}
\examples{
df <- dplyr::tibble(
start = c(Sys.time(), Sys.time() + lubridate::days(2)),
id = c("1", 3), assessor = "A", location = "111", note = c(NA, "OBS")
) |>
dplyr::mutate(end = start + lubridate::hours(2))
df |> ds2ical()
df |> ds2ical(summary.glue.string = "ID {id} [{assessor}] {note}")
# Export .ics file: (not run)
ical <- df |> ds2ical(start, end, location,
description.glue.string = "{note}")
# ical |> calendar::ic_write(file=here::here("calendar.ics"))
}

30
man/mfi_domains.Rd Normal file
View File

@ -0,0 +1,30 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfi_calc.R
\name{mfi_domains}
\alias{mfi_domains}
\title{MFI domain score calculator}
\usage{
mfi_domains(
ds,
reverse = TRUE,
reverse.vars = c(2, 5, 9, 10, 13, 14, 16, 17, 18, 19)
)
}
\arguments{
\item{ds}{data set of MFI scores, 20 columns}
\item{reverse}{reverse scoring}
\item{reverse.vars}{variables/columns to reverse}
}
\value{
tibble of domain scores
}
\description{
MFI domain score calculator
}
\examples{
mfi_mess <- data.frame(matrix(
sample(c(" 1. ", "2. -A", "3.", " 4 ", "5.", NA),200,replace=TRUE),ncol=20))
mfi_mess |> mfi_domains()
}

22
man/multi_rev.Rd Normal file
View File

@ -0,0 +1,22 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfi_calc.R
\name{multi_rev}
\alias{multi_rev}
\title{Title}
\usage{
multi_rev(d, var)
}
\arguments{
\item{d}{data frame or tibble}
\item{var}{numeric vector of indices of columns to reverse}
}
\value{
data.frame or tibble depending on input
}
\description{
Title
}
\examples{
# rep_len(sample(1:5),length.out = 100) |> matrix(ncol=10) |> multi_rev(2:4)
}

View File

@ -4,12 +4,14 @@
\alias{pase_calc} \alias{pase_calc}
\title{PASE score calculator} \title{PASE score calculator}
\usage{ \usage{
pase_calc(ds, adjust_work = FALSE) pase_calc(ds, adjust_work = FALSE, consider.missing = c("Not available"))
} }
\arguments{ \arguments{
\item{ds}{data set} \item{ds}{data set}
\item{adjust_work}{flag to set whether to include 10b type 1. \item{adjust_work}{flag to set whether to include 10b type 1.}
\item{consider.missing}{character vector of values considered missing.
Default is TRUE.} Default is TRUE.}
} }
\value{ \value{
@ -41,5 +43,6 @@ set \code{TRUE}.
} }
\examples{ \examples{
summary(pase_calc(stRoke::pase)[,13]) summary(pase_calc(stRoke::pase)[,13])
str(pase_calc(stRoke::pase))
} }

1277
renv.lock Normal file

File diff suppressed because it is too large Load Diff

7
renv/.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
library/
local/
cellar/
lock/
python/
sandbox/
staging/

1180
renv/activate.R Normal file

File diff suppressed because it is too large Load Diff

19
renv/settings.json Normal file
View File

@ -0,0 +1,19 @@
{
"bioconductor.version": null,
"external.libraries": [],
"ignored.packages": [],
"package.dependency.fields": [
"Imports",
"Depends",
"LinkingTo"
],
"ppm.enabled": null,
"ppm.ignored.urls": [],
"r.version": null,
"snapshot.type": "explicit",
"use.cache": true,
"vcs.ignore.cellar": true,
"vcs.ignore.library": true,
"vcs.ignore.local": true,
"vcs.manage.ignores": true
}

View File

@ -1,87 +0,0 @@
---
title: "ds2dd"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ds2dd}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(stRoke)
```
# Easy data set to data base workflow
This function can be used as a simple tool for creating at data base metadata file for REDCap (called a DataDictionary) based on a given data set file.
## Step 1 - Load your data set
Here we'll use the sample TALOS dataset included with the package.
```{r}
data("talos")
ds <- talos
# As the data set lacks an ID column, one is added
ds$id <- seq_len(nrow(ds))
```
## Step 2 - Create the DataDictionary
```{r}
datadictionary <- ds2dd(ds,record.id = "id",include.column.names = TRUE)
```
Now additional specifications to the DataDictionary can be made manually, or it can be uploaded and modified manually in the graphical user interface on the web page.
The function will transform column names to lower case and substitute spaces for underscores. The output is a list with the DataDictionary and a vector of new column names for the dataset to fit the meta data.
## Step 3 - Meta data upload
Now the DataDictionary can be exported as a spreadsheet and uploaded or it can be uploaded using the `REDCapR` package (only projects with "Development" status).
Use one of the two approaches below:
### Manual upload
```{r eval=FALSE}
write.csv(datadictionary$DataDictionary,"datadictionary.csv")
```
### Upload with `REDCapR`
```{r eval=FALSE}
REDCapR::redcap_metadata_write(
datadictionary$DataDictionary,
redcap_uri = keyring::key_get("DB_URI"),
token = keyring::key_get("DB_TOKEN")
)
```
In the ["REDCap R Handbook"](https://agdamsbo.github.io/redcap-r-handbook/) more is written on interfacing with REDCap in R using the `library(keyring)`to store credentials in [chapter 1.1](https://agdamsbo.github.io/redcap-r-handbook/access.html#sec-getting-access).
## Step 4 - Data upload
The same two options are available for data upload as meta data upload: manual or through `REDCapR`.
Only the latter is shown here.
```{r eval=FALSE}
# new column names are applied
colnames(ds) <- datadictionary$`Column names`
REDCapR::redcap_write(
ds,
redcap_uri = keyring::key_get("DB_URI"),
token = keyring::key_get("DB_TOKEN")
)
```