mirror of
https://github.com/agdamsbo/stRoke.git
synced 2024-10-30 03:11:52 +01:00
Compare commits
No commits in common. "cb81f715d1c986e3d86170653710a146f80b666c" and "7554f91e34b690237f471ee84a1ece0e60e75a23" have entirely different histories.
cb81f715d1
...
7554f91e34
|
@ -1,5 +1,3 @@
|
|||
^renv$
|
||||
^renv\.lock$
|
||||
^.*\.Rproj$
|
||||
^\.Rproj\.user$
|
||||
^\.github$
|
||||
|
|
10
.Rprofile
10
.Rprofile
|
@ -1,10 +0,0 @@
|
|||
options(
|
||||
renv.settings.snapshot.type = "explicit",
|
||||
renv.config.auto.snapshot = TRUE,
|
||||
renv.config.pak.enabled = TRUE
|
||||
)
|
||||
source("renv/activate.R")
|
||||
|
||||
if (interactive()) {
|
||||
suppressMessages(require(usethis))
|
||||
}
|
18
DESCRIPTION
18
DESCRIPTION
|
@ -1,6 +1,6 @@
|
|||
Package: stRoke
|
||||
Title: Clinical Stroke Research
|
||||
Version: 24.2.1
|
||||
Version: 23.9.1
|
||||
Authors@R:
|
||||
person("Andreas Gammelgaard", "Damsbo", , "agdamsbo@clin.au.dk", role = c("aut", "cre"),
|
||||
comment = c(ORCID = "0000-0002-7559-1154"))
|
||||
|
@ -17,7 +17,7 @@ BugReports: https://github.com/agdamsbo/stRoke/issues
|
|||
License: GPL-3
|
||||
Encoding: UTF-8
|
||||
Roxygen: list(markdown = TRUE)
|
||||
RoxygenNote: 7.3.1
|
||||
RoxygenNote: 7.2.3
|
||||
LazyData: true
|
||||
Suggests:
|
||||
covr,
|
||||
|
@ -25,18 +25,13 @@ Suggests:
|
|||
knitr,
|
||||
rmarkdown,
|
||||
spelling,
|
||||
usethis,
|
||||
testthat,
|
||||
git2r,
|
||||
pak,
|
||||
here,
|
||||
rhub
|
||||
testthat (>= 3.0.0)
|
||||
Language: en-US
|
||||
Config/testthat/edition: 3
|
||||
Imports:
|
||||
calendar,
|
||||
dplyr,
|
||||
ggplot2,
|
||||
glue,
|
||||
grDevices,
|
||||
gtsummary,
|
||||
lubridate,
|
||||
|
@ -44,10 +39,7 @@ Imports:
|
|||
rankinPlot,
|
||||
stats,
|
||||
tidyr,
|
||||
utils,
|
||||
tibble,
|
||||
tidyselect
|
||||
utils
|
||||
Depends:
|
||||
R (>= 2.10)
|
||||
VignetteBuilder: knitr
|
||||
Language: en-US
|
||||
|
|
|
@ -11,12 +11,10 @@ export(cpr_check)
|
|||
export(cpr_dob)
|
||||
export(cpr_female)
|
||||
export(ds2dd)
|
||||
export(ds2ical)
|
||||
export(files_filter)
|
||||
export(generic_stroke)
|
||||
export(index_plot)
|
||||
export(label_select)
|
||||
export(mfi_domains)
|
||||
export(n_chunks)
|
||||
export(pase_calc)
|
||||
export(quantile_cut)
|
||||
|
|
12
NEWS.md
12
NEWS.md
|
@ -1,15 +1,3 @@
|
|||
# stRoke 24.2.1
|
||||
|
||||
### Functions:
|
||||
|
||||
* NEW: `ds2ical()` converts data set to ical format with easy glue string for summary and description. Export .ics file with `calendar::ic_write()`.
|
||||
|
||||
* UPDATE: `pase_calc()` updated for uniform column naming in output as well as streamlining the function a bit.
|
||||
|
||||
* UPDATE: `add_padding()` updated to include option to add leading and/or tailing string with `lead` or `tail`.
|
||||
|
||||
* NEW: `mfi_calc()` calculates domain scores from the MFI questionnaire. Takes data frame of 20 ordered as the questionnaire. Default is to reverse questions with reverse scoring.
|
||||
|
||||
# stRoke 23.9.1
|
||||
|
||||
### Functions:
|
||||
|
|
|
@ -4,15 +4,13 @@
|
|||
#' @param length final string length
|
||||
#' @param after if padding should be added after as opposed to default before
|
||||
#' @param pad padding string of length 1
|
||||
#' @param lead leading string for all. Number or character vector. Cycled.
|
||||
#' @param tail tailing string for all. Number or character vector. Cycled.
|
||||
#'
|
||||
#' @return vector or character strings of same length.
|
||||
#' @export
|
||||
#'
|
||||
#' @examples
|
||||
#' add_padding(sample(1:200,5),tail="AA",lead=c(2,3,"e"))
|
||||
add_padding <- function(d,length=NULL,after=FALSE,pad="0",lead=NULL,tail=NULL){
|
||||
#' add_padding(sample(1:200,5))
|
||||
add_padding <- function(d,length=NULL,after=FALSE,pad="0"){
|
||||
if (!is.vector(d)) {
|
||||
stop("Please supply vector")
|
||||
}
|
||||
|
@ -33,11 +31,8 @@ add_padding <- function(d,length=NULL,after=FALSE,pad="0",lead=NULL,tail=NULL){
|
|||
paste(rep(pad,i),collapse="")}))
|
||||
|
||||
if (after) {
|
||||
out <- paste0(d,ps)
|
||||
paste0(d,ps)
|
||||
} else {
|
||||
out <- paste0(ps,d)
|
||||
paste0(ps,d)
|
||||
}
|
||||
|
||||
paste0(lead,out,tail)
|
||||
|
||||
}
|
||||
|
|
65
R/ds2ical.R
65
R/ds2ical.R
|
@ -1,65 +0,0 @@
|
|||
#' Convert data set to ical file
|
||||
#'
|
||||
#' @param data data set
|
||||
#' @param start dplyr style event start datetime column name. Data or datetime
|
||||
#' object.
|
||||
#' @param end dplyr style event end datetime column name. Data or datetime
|
||||
#' object.
|
||||
#' @param location dplyr style event location column name
|
||||
#' @param summary.glue.string character string to pass to glue::glue() for event
|
||||
#' name (summary). Can take any column from data set.
|
||||
#' @param description.glue.string character string to pass to glue::glue() for
|
||||
#' event description. Can take any column from data set.
|
||||
#'
|
||||
#' @return tibble of class "ical"
|
||||
#' @export
|
||||
#'
|
||||
#' @examples
|
||||
#' df <- dplyr::tibble(
|
||||
#' start = c(Sys.time(), Sys.time() + lubridate::days(2)),
|
||||
#' id = c("1", 3), assessor = "A", location = "111", note = c(NA, "OBS")
|
||||
#' ) |>
|
||||
#' dplyr::mutate(end = start + lubridate::hours(2))
|
||||
#' df |> ds2ical()
|
||||
#' df |> ds2ical(summary.glue.string = "ID {id} [{assessor}] {note}")
|
||||
#' # Export .ics file: (not run)
|
||||
#' ical <- df |> ds2ical(start, end, location,
|
||||
#' description.glue.string = "{note}")
|
||||
#' # ical |> calendar::ic_write(file=here::here("calendar.ics"))
|
||||
ds2ical <- function(data,
|
||||
start = start,
|
||||
end = end,
|
||||
location = NULL,
|
||||
summary.glue.string = "ID {id} [{assessor}]",
|
||||
description.glue.string = NULL) {
|
||||
ds <- data |>
|
||||
dplyr::mutate(
|
||||
SUMMARY = glue::glue(summary.glue.string, .na = ""),
|
||||
DTSTART = {{ start }},
|
||||
DTEND = {{ end }},
|
||||
LOCATION = {{ location }}
|
||||
)
|
||||
|
||||
if (!is.null(description.glue.string)) {
|
||||
ds <- dplyr::mutate(ds,
|
||||
DESCRIPTION = glue::glue(
|
||||
description.glue.string,
|
||||
.na = ""
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
ds |>
|
||||
dplyr::select(tidyselect::any_of(c("SUMMARY",
|
||||
"DTSTART",
|
||||
"DTEND",
|
||||
"LOCATION",
|
||||
"DESCRIPTION"))) |>
|
||||
(\(x){
|
||||
x |>
|
||||
dplyr::mutate(UID = replicate(nrow(x), calendar::ic_guid()))
|
||||
})() |>
|
||||
dplyr::filter(!is.na(DTSTART)) |>
|
||||
# dplyr::filter(dplyr::if_any(DTSTART, Negate(is.na))) |>
|
||||
calendar::ical()
|
||||
}
|
79
R/mfi_calc.R
79
R/mfi_calc.R
|
@ -1,79 +0,0 @@
|
|||
#' Title
|
||||
#'
|
||||
#' @param d data frame or tibble
|
||||
#' @param var numeric vector of indices of columns to reverse
|
||||
#'
|
||||
#' @return data.frame or tibble depending on input
|
||||
#'
|
||||
#' @examples
|
||||
#' # rep_len(sample(1:5),length.out = 100) |> matrix(ncol=10) |> multi_rev(2:4)
|
||||
multi_rev <- function(d, var){
|
||||
# Forcing and coercing to numeric
|
||||
dm <- d |> as.matrix() |>
|
||||
as.numeric()|>
|
||||
matrix(ncol=ncol(d)) |>
|
||||
data.frame()
|
||||
|
||||
# Reversing everything (fast enough not to subset)
|
||||
dr <- range(dm,na.rm=TRUE) |> sum()-dm
|
||||
|
||||
# Inserting reversed scores in correct places
|
||||
for (i in var){
|
||||
dm[i] <- dr[i]
|
||||
}
|
||||
|
||||
if (tibble::is_tibble(d)){
|
||||
tibble::tibble(dm)
|
||||
} else {
|
||||
dm
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#' MFI domain score calculator
|
||||
#'
|
||||
#' @param ds data set of MFI scores, 20 columns
|
||||
#' @param reverse.vars variables/columns to reverse
|
||||
#' @param reverse reverse scoring
|
||||
#'
|
||||
#' @return tibble of domain scores
|
||||
#' @export
|
||||
#'
|
||||
#' @examples
|
||||
#' mfi_mess <- data.frame(matrix(
|
||||
#' sample(c(" 1. ", "2. -A", "3.", " 4 ", "5.", NA),200,replace=TRUE),ncol=20))
|
||||
#' mfi_mess |> mfi_domains()
|
||||
mfi_domains <-
|
||||
function(ds,
|
||||
reverse = TRUE,
|
||||
reverse.vars = c(2, 5, 9, 10, 13, 14, 16, 17, 18, 19)) {
|
||||
|
||||
if(ncol(ds)!=20){
|
||||
stop("The supplied dataset should only contain the 20 MFI subscores")}
|
||||
|
||||
# Subscore indexes
|
||||
indexes <- list(
|
||||
data.frame(grp="gen", ndx=c(1, 5, 12, 16)),
|
||||
data.frame(grp="phy", ndx=c(2, 8, 14, 20)),
|
||||
data.frame(grp="act", ndx=c(3, 6, 10, 17)),
|
||||
data.frame(grp="mot", ndx=c(4, 9, 15, 18)),
|
||||
data.frame(grp="men", ndx=c(7, 11, 13, 19))
|
||||
) |> dplyr::bind_rows() |> dplyr::arrange(ndx)
|
||||
|
||||
# Removes padding and converts to numeric
|
||||
ds_n <- ds |> dplyr::mutate_if(is.factor, as.character) |>
|
||||
dplyr::mutate(dplyr::across(tidyselect::everything(),
|
||||
# Removes everything but the leading alphanumeric character
|
||||
# Data should be cleaned accordingly
|
||||
~str_extract(d=.,pattern="[[:alnum:]]")))
|
||||
|
||||
# Assumes reverse scores are not correctly reversed
|
||||
if (reverse){ds_n <- ds_n |> multi_rev(var=reverse.vars)}
|
||||
|
||||
# Domain wise summations
|
||||
split.default(ds_n, factor(indexes$grp)) |>
|
||||
lapply(function(x){
|
||||
apply(x, MARGIN = 1, sum, na.ignore=FALSE)
|
||||
}) |> dplyr::bind_cols()
|
||||
|
||||
}
|
|
@ -3,7 +3,6 @@
|
|||
#' Calculates PASE score from raw questionnaire data.
|
||||
#' @param ds data set
|
||||
#' @param adjust_work flag to set whether to include 10b type 1.
|
||||
#' @param consider.missing character vector of values considered missing.
|
||||
#' Default is TRUE.
|
||||
#'
|
||||
#' @return data.frame
|
||||
|
@ -28,16 +27,10 @@
|
|||
#'
|
||||
#' @examples
|
||||
#' summary(pase_calc(stRoke::pase)[,13])
|
||||
#' str(pase_calc(stRoke::pase))
|
||||
#'
|
||||
pase_calc <- function(ds,
|
||||
adjust_work = FALSE,
|
||||
consider.missing = c("Not available")) {
|
||||
pase_calc <- function(ds, adjust_work = FALSE) {
|
||||
|
||||
if (ncol(ds) != 21) {
|
||||
stop("supplied data set has to contain exactly 21 columns.
|
||||
Formatting should follow the stRoke::pase data set.")
|
||||
}
|
||||
if (ncol(ds) != 21) stop("supplied data set has to contain exactly 21 columns")
|
||||
|
||||
pase <- ds
|
||||
|
||||
|
@ -48,20 +41,10 @@ pase_calc <- function(ds,
|
|||
pase <- do.call(data.frame, lapply(pase, as.character))
|
||||
|
||||
## Missings and incompletes
|
||||
# Cosidered missing if all data is missing
|
||||
missings <- apply(apply(ds, 2, is.na), 1, all)
|
||||
|
||||
# Considered incomplete if any entry in main answers is missing
|
||||
mains <- grep("([0-9]{2}|(09[a-d]))$",colnames(pase))
|
||||
|
||||
if (length(mains)!=13){
|
||||
stop("The supplied dataset does not contain expected variable names.
|
||||
Please run str(stRoke::pase) and format your data accordingly.")
|
||||
}
|
||||
|
||||
incompletes <-
|
||||
apply(sapply(ds[, mains], function(x) {
|
||||
x %in% consider.missing | is.na(x)
|
||||
apply(sapply(ds[, c(1, 3, 5, 7, 9, 11, 13:20)], function(x) {
|
||||
x == "Not available" | is.na(x)
|
||||
}), 1, any)
|
||||
|
||||
names(pase) <- c(
|
||||
|
@ -95,7 +78,7 @@ pase_calc <- function(ds,
|
|||
|
||||
## PASE 2-6
|
||||
|
||||
pase_weights <- list(
|
||||
pase_weigths <- list(
|
||||
"1" = c(
|
||||
"1" = 0.11,
|
||||
"2" = 0.32,
|
||||
|
@ -121,45 +104,37 @@ pase_calc <- function(ds,
|
|||
|
||||
pase_score_26 <- lapply(seq_along(pase_list[2:6]), function(x) {
|
||||
df <- pase_list[2:6][[x]]
|
||||
# score <- c()
|
||||
score <- c()
|
||||
|
||||
## =====================
|
||||
## Checking labelling
|
||||
if (!all(stRoke::str_extract(df[, 1], "^[0-3]") |>
|
||||
as.numeric() |>
|
||||
range(na.rm = TRUE) == c(0, 3))) {
|
||||
stop("Labelling of 02-06 should start with a number ranging 0-3")
|
||||
if (!all(range(suppressWarnings(as.numeric(substr(
|
||||
df[, 1], 1, 1
|
||||
))), na.rm = TRUE) == c(0, 3))) {
|
||||
stop("Labelling of 02-06 should start with a number ranging 1-4")
|
||||
}
|
||||
|
||||
if (!all(stRoke::str_extract(df[, 2], "^[1-4]") |>
|
||||
as.numeric() |>
|
||||
range(na.rm = TRUE) == c(1, 4))) {
|
||||
stop("Labelling of 02-06 subscores should start with a number ranging 1-4")
|
||||
}
|
||||
|
||||
## =====================
|
||||
|
||||
## Extracting the first string element in main entry
|
||||
n1 <- stRoke::str_extract(df[, 1],"^[0-3]") |> as.numeric()
|
||||
## Extracting the first string element in subentry
|
||||
n2 <- stRoke::str_extract(df[, 2],"^[1-4]") |> as.numeric()
|
||||
|
||||
score <- c()
|
||||
for (i in seq_along(n1)) {
|
||||
for (i in seq_len(nrow(df))) {
|
||||
# Setting categories from numbers
|
||||
n1 <- suppressWarnings(as.numeric(substr(df[, 1][i], 1, 1)))
|
||||
|
||||
ind1 <- match(n1[i],seq_along(pase_weights))
|
||||
|
||||
if (is.na(ind1)){
|
||||
score[i] <- n1[i]
|
||||
# Using if statement to calculate row wise
|
||||
if (n1 %in% c(1:3)) {
|
||||
# Second category
|
||||
n2 <- suppressWarnings(as.numeric(substr(df[, 2][i], 1, 1)))
|
||||
score[i] <- pase_weigths[[n1]][n2] * pase_multip_26[x]
|
||||
|
||||
} else if (n1 %in% 0) {
|
||||
score[i] <- 0
|
||||
} else {
|
||||
score[i] <- pase_weights[[ind1]][n2[i]] * pase_multip_26[x]
|
||||
score[i] <- NA
|
||||
}
|
||||
|
||||
}
|
||||
score
|
||||
})
|
||||
|
||||
names(pase_score_26) <- paste0("pase_score_", names(pase_list[2:6]))
|
||||
names(pase_score_26) <- paste0("score_", names(pase_list[2:6]))
|
||||
|
||||
## PASE 7-9d
|
||||
pase_multip_79 <- c(25, 25, 30, 36, 20, 35)
|
||||
|
@ -172,7 +147,7 @@ pase_calc <- function(ds,
|
|||
) * pase_multip_79))
|
||||
|
||||
names(pase_score_79) <-
|
||||
paste0("pase_score_", sub("pase","",names(pase_score_79)))
|
||||
paste0("score_", sub("pase", "", names(pase_score_79)))
|
||||
|
||||
## PASE 10
|
||||
## Completely ignores if 10b is not completed
|
||||
|
@ -182,15 +157,15 @@ pase_calc <- function(ds,
|
|||
# Only includes work time if 10b is != 1
|
||||
pase_score_10[substr(pase_list[[10]][[3]],1,1) == "1"] <- 0
|
||||
# Consequently consider "Not available" in 10b as incomplete
|
||||
incompletes[ds[,21] %in% consider.missing & !incompletes & !is.na(incompletes)] <- TRUE
|
||||
incompletes[ds[,21] == "Not available" & !incompletes & !is.na(incompletes)] <- TRUE
|
||||
}
|
||||
|
||||
pase_score <- cbind(pase_score_26, pase_score_79, pase_score_10)
|
||||
|
||||
data.frame(
|
||||
pase_score,
|
||||
pase_score_sum = rowSums(pase_score, na.rm = TRUE),
|
||||
pase_score_missings = missings,
|
||||
pase_score_incompletes = incompletes
|
||||
score_sum = rowSums(pase_score, na.rm = TRUE),
|
||||
score_missings = missings,
|
||||
score_incompletes = incompletes
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
Andreas
|
||||
CMD
|
||||
Changelog
|
||||
Codecov
|
||||
DDMMYY
|
||||
DOI
|
||||
DataDictionary
|
||||
Gammelgaard
|
||||
Github
|
||||
Kraglund
|
||||
Labelling
|
||||
METACRAN
|
||||
MFI
|
||||
NA's
|
||||
OLR
|
||||
ORCID
|
||||
|
@ -42,13 +43,11 @@ colouring
|
|||
cpr
|
||||
cprs
|
||||
daDoctoR
|
||||
datetime
|
||||
ddmmyy
|
||||
ddmmyyxxxx
|
||||
difftime
|
||||
dk
|
||||
doi
|
||||
dplyr
|
||||
ds
|
||||
eeptools
|
||||
eg
|
||||
|
@ -83,7 +82,6 @@ sapply
|
|||
stackoverflow
|
||||
strsplit
|
||||
teppo
|
||||
tibble
|
||||
vapply
|
||||
vec
|
||||
winP
|
||||
|
|
|
@ -4,14 +4,7 @@
|
|||
\alias{add_padding}
|
||||
\title{Add padding to string}
|
||||
\usage{
|
||||
add_padding(
|
||||
d,
|
||||
length = NULL,
|
||||
after = FALSE,
|
||||
pad = "0",
|
||||
lead = NULL,
|
||||
tail = NULL
|
||||
)
|
||||
add_padding(d, length = NULL, after = FALSE, pad = "0")
|
||||
}
|
||||
\arguments{
|
||||
\item{d}{vector of strings or numbers}
|
||||
|
@ -21,10 +14,6 @@ add_padding(
|
|||
\item{after}{if padding should be added after as opposed to default before}
|
||||
|
||||
\item{pad}{padding string of length 1}
|
||||
|
||||
\item{lead}{leading string for all. Number or character vector. Cycled.}
|
||||
|
||||
\item{tail}{tailing string for all. Number or character vector. Cycled.}
|
||||
}
|
||||
\value{
|
||||
vector or character strings of same length.
|
||||
|
@ -33,5 +22,5 @@ vector or character strings of same length.
|
|||
Add padding to string
|
||||
}
|
||||
\examples{
|
||||
add_padding(sample(1:200,5),tail="AA",lead=c(2,3,"e"))
|
||||
add_padding(sample(1:200,5))
|
||||
}
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/ds2ical.R
|
||||
\name{ds2ical}
|
||||
\alias{ds2ical}
|
||||
\title{Convert data set to ical file}
|
||||
\usage{
|
||||
ds2ical(
|
||||
data,
|
||||
start = start,
|
||||
end = end,
|
||||
location = NULL,
|
||||
summary.glue.string = "ID {id} [{assessor}]",
|
||||
description.glue.string = NULL
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{data set}
|
||||
|
||||
\item{start}{dplyr style event start datetime column name. Data or datetime
|
||||
object.}
|
||||
|
||||
\item{end}{dplyr style event end datetime column name. Data or datetime
|
||||
object.}
|
||||
|
||||
\item{location}{dplyr style event location column name}
|
||||
|
||||
\item{summary.glue.string}{character string to pass to glue::glue() for event
|
||||
name (summary). Can take any column from data set.}
|
||||
|
||||
\item{description.glue.string}{character string to pass to glue::glue() for
|
||||
event description. Can take any column from data set.}
|
||||
}
|
||||
\value{
|
||||
tibble of class "ical"
|
||||
}
|
||||
\description{
|
||||
Convert data set to ical file
|
||||
}
|
||||
\examples{
|
||||
df <- dplyr::tibble(
|
||||
start = c(Sys.time(), Sys.time() + lubridate::days(2)),
|
||||
id = c("1", 3), assessor = "A", location = "111", note = c(NA, "OBS")
|
||||
) |>
|
||||
dplyr::mutate(end = start + lubridate::hours(2))
|
||||
df |> ds2ical()
|
||||
df |> ds2ical(summary.glue.string = "ID {id} [{assessor}] {note}")
|
||||
# Export .ics file: (not run)
|
||||
ical <- df |> ds2ical(start, end, location,
|
||||
description.glue.string = "{note}")
|
||||
# ical |> calendar::ic_write(file=here::here("calendar.ics"))
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/mfi_calc.R
|
||||
\name{mfi_domains}
|
||||
\alias{mfi_domains}
|
||||
\title{MFI domain score calculator}
|
||||
\usage{
|
||||
mfi_domains(
|
||||
ds,
|
||||
reverse = TRUE,
|
||||
reverse.vars = c(2, 5, 9, 10, 13, 14, 16, 17, 18, 19)
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{ds}{data set of MFI scores, 20 columns}
|
||||
|
||||
\item{reverse}{reverse scoring}
|
||||
|
||||
\item{reverse.vars}{variables/columns to reverse}
|
||||
}
|
||||
\value{
|
||||
tibble of domain scores
|
||||
}
|
||||
\description{
|
||||
MFI domain score calculator
|
||||
}
|
||||
\examples{
|
||||
mfi_mess <- data.frame(matrix(
|
||||
sample(c(" 1. ", "2. -A", "3.", " 4 ", "5.", NA),200,replace=TRUE),ncol=20))
|
||||
mfi_mess |> mfi_domains()
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/mfi_calc.R
|
||||
\name{multi_rev}
|
||||
\alias{multi_rev}
|
||||
\title{Title}
|
||||
\usage{
|
||||
multi_rev(d, var)
|
||||
}
|
||||
\arguments{
|
||||
\item{d}{data frame or tibble}
|
||||
|
||||
\item{var}{numeric vector of indices of columns to reverse}
|
||||
}
|
||||
\value{
|
||||
data.frame or tibble depending on input
|
||||
}
|
||||
\description{
|
||||
Title
|
||||
}
|
||||
\examples{
|
||||
# rep_len(sample(1:5),length.out = 100) |> matrix(ncol=10) |> multi_rev(2:4)
|
||||
}
|
|
@ -4,14 +4,12 @@
|
|||
\alias{pase_calc}
|
||||
\title{PASE score calculator}
|
||||
\usage{
|
||||
pase_calc(ds, adjust_work = FALSE, consider.missing = c("Not available"))
|
||||
pase_calc(ds, adjust_work = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{ds}{data set}
|
||||
|
||||
\item{adjust_work}{flag to set whether to include 10b type 1.}
|
||||
|
||||
\item{consider.missing}{character vector of values considered missing.
|
||||
\item{adjust_work}{flag to set whether to include 10b type 1.
|
||||
Default is TRUE.}
|
||||
}
|
||||
\value{
|
||||
|
@ -43,6 +41,5 @@ set \code{TRUE}.
|
|||
}
|
||||
\examples{
|
||||
summary(pase_calc(stRoke::pase)[,13])
|
||||
str(pase_calc(stRoke::pase))
|
||||
|
||||
}
|
||||
|
|
7
renv/.gitignore
vendored
7
renv/.gitignore
vendored
|
@ -1,7 +0,0 @@
|
|||
library/
|
||||
local/
|
||||
cellar/
|
||||
lock/
|
||||
python/
|
||||
sandbox/
|
||||
staging/
|
1180
renv/activate.R
1180
renv/activate.R
File diff suppressed because it is too large
Load Diff
|
@ -1,19 +0,0 @@
|
|||
{
|
||||
"bioconductor.version": null,
|
||||
"external.libraries": [],
|
||||
"ignored.packages": [],
|
||||
"package.dependency.fields": [
|
||||
"Imports",
|
||||
"Depends",
|
||||
"LinkingTo"
|
||||
],
|
||||
"ppm.enabled": null,
|
||||
"ppm.ignored.urls": [],
|
||||
"r.version": null,
|
||||
"snapshot.type": "explicit",
|
||||
"use.cache": true,
|
||||
"vcs.ignore.cellar": true,
|
||||
"vcs.ignore.library": true,
|
||||
"vcs.ignore.local": true,
|
||||
"vcs.manage.ignores": true
|
||||
}
|
87
vignettes/ds2dd.Rmd
Normal file
87
vignettes/ds2dd.Rmd
Normal file
|
@ -0,0 +1,87 @@
|
|||
---
|
||||
title: "ds2dd"
|
||||
date: "`r Sys.Date()`"
|
||||
output: rmarkdown::html_vignette
|
||||
vignette: >
|
||||
%\VignetteIndexEntry{ds2dd}
|
||||
%\VignetteEngine{knitr::rmarkdown}
|
||||
%\VignetteEncoding{UTF-8}
|
||||
---
|
||||
|
||||
```{r, include = FALSE}
|
||||
knitr::opts_chunk$set(
|
||||
collapse = TRUE,
|
||||
comment = "#>"
|
||||
)
|
||||
```
|
||||
|
||||
```{r setup}
|
||||
library(stRoke)
|
||||
```
|
||||
|
||||
# Easy data set to data base workflow
|
||||
|
||||
This function can be used as a simple tool for creating at data base metadata file for REDCap (called a DataDictionary) based on a given data set file.
|
||||
|
||||
## Step 1 - Load your data set
|
||||
|
||||
Here we'll use the sample TALOS dataset included with the package.
|
||||
|
||||
```{r}
|
||||
data("talos")
|
||||
ds <- talos
|
||||
# As the data set lacks an ID column, one is added
|
||||
ds$id <- seq_len(nrow(ds))
|
||||
```
|
||||
|
||||
## Step 2 - Create the DataDictionary
|
||||
|
||||
```{r}
|
||||
datadictionary <- ds2dd(ds,record.id = "id",include.column.names = TRUE)
|
||||
```
|
||||
|
||||
Now additional specifications to the DataDictionary can be made manually, or it can be uploaded and modified manually in the graphical user interface on the web page.
|
||||
|
||||
The function will transform column names to lower case and substitute spaces for underscores. The output is a list with the DataDictionary and a vector of new column names for the dataset to fit the meta data.
|
||||
|
||||
## Step 3 - Meta data upload
|
||||
|
||||
Now the DataDictionary can be exported as a spreadsheet and uploaded or it can be uploaded using the `REDCapR` package (only projects with "Development" status).
|
||||
|
||||
Use one of the two approaches below:
|
||||
|
||||
### Manual upload
|
||||
|
||||
```{r eval=FALSE}
|
||||
write.csv(datadictionary$DataDictionary,"datadictionary.csv")
|
||||
```
|
||||
|
||||
### Upload with `REDCapR`
|
||||
|
||||
```{r eval=FALSE}
|
||||
REDCapR::redcap_metadata_write(
|
||||
datadictionary$DataDictionary,
|
||||
redcap_uri = keyring::key_get("DB_URI"),
|
||||
token = keyring::key_get("DB_TOKEN")
|
||||
)
|
||||
```
|
||||
|
||||
In the ["REDCap R Handbook"](https://agdamsbo.github.io/redcap-r-handbook/) more is written on interfacing with REDCap in R using the `library(keyring)`to store credentials in [chapter 1.1](https://agdamsbo.github.io/redcap-r-handbook/access.html#sec-getting-access).
|
||||
|
||||
## Step 4 - Data upload
|
||||
|
||||
The same two options are available for data upload as meta data upload: manual or through `REDCapR`.
|
||||
|
||||
Only the latter is shown here.
|
||||
|
||||
```{r eval=FALSE}
|
||||
# new column names are applied
|
||||
colnames(ds) <- datadictionary$`Column names`
|
||||
|
||||
REDCapR::redcap_write(
|
||||
ds,
|
||||
redcap_uri = keyring::key_get("DB_URI"),
|
||||
token = keyring::key_get("DB_TOKEN")
|
||||
)
|
||||
```
|
||||
|
Loading…
Reference in New Issue
Block a user