# Load required libraries ----------------------------------------------------------------------------------------- init <- function(){ library(devtools) library(rio) library(plyr) library(tidyverse) library(nonlinearTseries) library(zoo) library(xts) library(Matrix) library(flexclust) #library(NetComp) library(igraph) library(lattice) library(latticeExtra) library(grid) library(gridExtra) } ts.I <-function(y){ #require(zoo) if(!all(is.numeric(y),is.null(dim(y)))){ warning("Need a 1D numeric vector! Trying to convert...") y <- as.numeric(as.vector(y)) } idOK <- !is.na(y) t <- index(y) t[!idOK] <- NA yy <- c(y[idOK][1],y[idOK]) tt <- c(t[idOK][1],t[idOK]) yc <- y yc[t[idOK]] <- cumsum(yy[-length(yy)]*diff(tt)) # recover initial diff value yc <-yc + y[1] return(yc) } find_peaks <- function (x, m = 3, wells = FALSE){ shape <- diff(sign(diff(ifelse(wells,-1,1)*x, na.pad = FALSE))) pks <- sapply(which(shape < 0), FUN = function(i){ z <- i - m + 1 z <- ifelse(z > 0, z, 1) w <- i + m + 1 w <- ifelse(w < length(x), w, length(x)) if(all(x[c(z : i, (i + 2) : w)] <= x[i + 1])) return(i + 1) else return(numeric(0)) }) pks <- unlist(pks) pks } Msize <- function(mat, auto=NULL){ if(is.null(auto)){ auto <- isSymmetric(unname(mat)) } return(cumprod(dim(mat) + ifelse(auto, c(0,-1), c(0,0)))[2] / ifelse(auto,2,1)) } repmat <- function(X,m,n){ # % REPMAT R equivalent of repmat (matlab) # % FORMAT # % DESC # % description not available. if (is.matrix(X)) { mx = dim(X)[1] nx = dim(X)[2] out <- matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T) } else if (is.vector(X)) { mx = 1 nx = length(X) out <- matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T) } else if (length(X) == 1) { out <- matrix(X,m, n) } return (out) } # spdiags <- function(B, d, m, n){ # # % SPDIAGS description not available. # # % FORMAT # # % DESC # # % description not available # # # if (is.matrix(d)) # d <- matrix(d,dim(d)[1]*dim(d)[2],1) # else # d <- arg2 # # p <- length(d) # # A <- sparseMatrix(i = 1:arg3, j = 1:arg3, x = 0, dims= c(arg3,arg4)) # cat("in spdiags.R ") # print(A) # cat(" arg3 ") # print(arg3) # cat(" arg4 ") # pring(arg4) # # m <- dim(A)[1] # n <- dim(A)[2] # # len<-matrix(0,p+1,1) # for (k in 1:p) # len[k+1] <- len[k]+length(max(1,1-d[k]):min(m,n-d[k])) # # a <- matrix(0, len[p+1],3) # for (k in 1:p) # { # # % Append new d(k)-th diagonal to compact form # i <- t(max(1,1-d[k]):min(m,n-d[k])) # a[(len[k]+1):len[k+1],] <- c(i, i+d[k], B[(i+(m>=n)*d[k]),k]) # } # # res1 <- sparseMatrix(i = a[,1], j = a[,2], x = a[,3], dims = c(m,n)) # # return (res1) # } # (C)RQA ---------------------------------------------------------------------------------------------------------- clRQA <- function(y1, y2, eDim, eLag, eRad, DLmin, VLmin, theiler, JRP, returnMeasures, returnRP, returnDist, path_to_rp, saveOut, path_out, file_ID = 0){ # -i data filename (input) # -j filename of additional data for CRP/JRP (input) # -J compute JRP instead of default CRP (only if 2nd data is given) # -r filename recurrence plot (output) # -o filename RQA measures (output) # -p filename histogramme diagonal line lengths (output) # -c cummulative histogramme # -f format for recurrence plot file (ASCII, TIF), default=ASCII # -n distance norm (EUCLIDEAN, MAX, MIN, OP), default=EUCLIDEAN # -m embedding dimension, default=1 # -t embedding delay, default=1 # -e threshold, default=1 (if negative, a distance plot will be created) # -l minimal diagonal line, default=2 # -v minimal vertical line, default=2 # -w Theiler corrector, default=1 # -D delimiter in data file, default=TAB # -d delimiter for RQA file, default=TAB # -s silent (no messages displayed) # -V print version information # -h print this help text # set.seed(123456) # rndID <- round(runif(1)*10000) # # rndID=1 #if(is.null(path_out)){path_out <- path_to_rp} path_out <- path_to_rp if(is.null(file_ID)){ fnames <- list.files(path=path_out) rqaID <- grepl("(RQAplot|RQAhist|RQAmeasures)",fnames) if(any(rqaID)){ file_ID <- max(as.numeric(gsub("(RQAplot|RQAhist|RQAmeasures|txt|[[:punct:]])+","",fnames[rqaID]),"[_]"), na.rm = TRUE) } } if(any(is.infinite(file_ID),is.null(file_ID))){file_ID <-0} file_ID <- file_ID + 1 plotOUT <- paste0("'",path_out,"RQAplot_",file_ID, ".txt'") measOUT <- paste0("'",path_out,"RQAmeasures_",file_ID, ".txt'") histOUT <- paste0("'",path_out,"RQAhist_",file_ID, ".txt'") tmpd <- tempdir() tmpf1 <- tempfile(tmpdir = tmpd, fileext = ".csv") write.table(as.data.frame(y1), tmpf1, col.names = FALSE, row.names = FALSE) if(is.null(y2)){ opts <- paste("-i", tmpf1, "-r ", plotOUT, "-o ", measOUT, "-p ", histOUT, "-m", eDim, "-t", eLag, "-e", eRad, "-l", DLmin, "-v", VLmin, "-w", theiler, "-s") } else { tmpf2 <- tempfile(tmpdir = tmpd, fileext = ".csv") write.table(as.data.frame(y2), tmpf2, col.names = FALSE, row.names = FALSE) opts <- paste("-i", tmpf1, "-j", tmpf2, "-r ", plotOUT, "-o ", measOUT, "-p ", histOUT, "-m", eDim, "-t", eLag, "-e", eRad, "-l", DLmin, "-v", VLmin, "-w", theiler, "-s") } closeAllConnections() RCMD("./rp", options = opts, path = path_to_rp, quiet = FALSE) measures = import(gsub("[']+","",measOUT)) rpMAT = import(gsub("[']+","",plotOUT)) disthist = import(gsub("[']+","",histOUT)) cat(paste0("[ID ",file_ID,"] Analysis completed... ")) if(!saveOut){ RCMD("rm", options = paste("-f",measOUT), path = path_out, quiet = TRUE) RCMD("rm", options = paste("-f",plotOUT), path = path_out, quiet = TRUE) RCMD("rm", options = paste("-f",histOUT), path = path_out, quiet = TRUE) cat("temporary files removed ...\n") } else { cat("files saved ...\n") } return(list(measures = measures, rpMAT, disthist)) } fastRQA <- function(y1, y2 = NULL, eDim = 1, eLag = 1, eRad = 1, DLmin = 2, VLmin = 2, theiler = 1, win = min(length(y1),length(y2), na.rm = TRUE), step = 1, JRP = FALSE, returnMeasures = TRUE, returnRP = TRUE, returnDist = FALSE, path_to_rp = "~/Library/Mobile Documents/com~apple~CloudDocs/Coding/Python/rqa/", saveOut = FALSE, path_out = NULL, file_ID = NULL, ...){ # pat_to_rp = location of rp binary # Catch parameters # params <- list(eDim = eDim, # eLag = eLag, # eRad = eRad, # DLmin = DLmin, # VLmin = VLmin, # theiler = theiler, # JRP = JRP, # returnMeasures = returnMeasures, # returnRP = returnRP, # returnDist = returnDist, # path_to_rp = path_to_rp, # saveOut = saveOut, # path_out = path_out, # file_ID = file_ID) # Begin iput checks ----------------------------------------------------------------------------------------------- if(!is.null(y2)){ y2 <- as.zoo(y2) N1 <- NROW(y1) N2 <- NROW(y2) if(N1>N2){ params$y2[N2:(N1+(N1-N2))] <- 0 } if(N1% mutate(win = win, step = step, index = attr(wlist, "index")) rqa_rpvector <- ldply(wlist[,2]) %>% mutate(win = win, step = step, index = attr(wlist, "index")) rqa_diagdist <- ldply(wlist[,3]) %>% mutate(win = win, step = step, index = attr(wlist, "index")) out <- list(rqa_measures = rqa_measures, rqa_rpvector = rqa_rpvector, rqa_diagdist = rqa_diagdist) return(out[c(returnMeasures,returnRP,returnDist)]) } di2bi <- function(BImat, radius, convMat = FALSE){ # require(NetComp) if(grepl("Matrix",class(BImat))){ BImat <- as.matrix(BImat) convMat <- TRUE } # RP <- NetComp::matrix_threshold(BImat,threshold = radius, minval = 1, maxval = 0) RP <- matrix(0,dim(BImat)[1],dim(BImat)[2]) RP[BImat <= radius] <- 1 if(!all(as.vector(RP)==0|as.vector(RP)==1)){warning("Matrix did not convert to a binary (0,1) matrix!!")} if(convMat){RP <- Matrix(RP)} return(RP) } #' tau #' #' @param y Time series #' @param selection.methods Selecting an optimal embedding lag (default: Return "first.e.decay", "first.zero", "first.minimum", "first.value", where value is 1/exp(1)) #' @param value Threshold value for selection method first.value (default: 1/exp(1)) #' @param maxLag Maximal lag to consider (default: 1/5 of timeseries length) #' @param nbins Number of bins for average mutual information function (default: 1/3 of number of unique values in timeseries) #' @param ... #' #' @description A wrapper for nonlinearTseries::timeLag #' #' @return #' @export #' #' @examples tau <- function(y, selection.methods = c("first.minimum"), maxLag = round(length(y)/(maxDim+1)), ...){ y <- y[!is.na(y)] tmi <- mutualInformation(y, lag.max = maxLag, do.plot = FALSE) lags <- laply(selection.methods, function(s.m){ nonlinearTseries::timeLag(y, technique = "ami", selection.method = s.m, lag.max = maxLag, do.plot = FALSE)} ) #id.peaks <- find_peaks(tmi$mutual.information, m = 3, wells = TRUE) id.min <- tmi$time.lag[which.min(tmi$mutual.information)] #as.numeric(tmi$mutual.information[id.peaks[id.min]]) out <- cbind.data.frame(selection.method = c(selection.methods, "global.minimum", "maximum.lag"), opLag = c(lags, id.min, maxLag) ) tmi$mutual.information[tmi$time.lag%in%out$opLag] out$ami <- sapply(out$opLag, function(r) tmi$mutual.information[r]) #tout <- summarise(group_by(out, opLag, ami), selection.method = paste0(unique(selection.method), collapse="|") return(out) } #' eDim #' #' @param y Time series #' @param delay Embeding lag #' @param maxDim Maximum number of embedding dimensions #' @param ... #' #' @description A wrapper for nonlinearTseries::estimateEmbeddingDim #' #' @return #' @export #' #' @examples est.eDim <- function(y, delay = tau(y), maxDim = 20, threshold = .95, max.relative.change = .1, ...){ cbind.data.frame(EmbeddingLag = delay, EmbeddingDim = estimateEmbeddingDim(y, time.lag = delay, threshold = threshold, max.relative.change = max.relative.change, max.embedding.dim = maxDim) ) } #' nzdiags #' #' @description Get all nonzero diagonals of a binary matrix, or, diagonals specified as a vector by argument \code{d}. Output mimics Matlab's \code{[B,d] = spdiags(A)} function. #' #' @param A A binary (0,1) matrix. #' @param d An optional vecotor of diagonals to extract. #' #' @author Fred Hasselman #' #' @return #' @export #' #' @examples nzdiags <- function(A=NULL, d=NULL){ # Loosely based on MATLAB function spdiags() by Rob Schreiber - Copyright 1984-2014 The MathWorks, Inc. #require(Matrix) if(grepl("matrix",class(A),ignore.case = TRUE)){ if(all(A>0)){stop("All matrix elements are nonzero.")} # create an indicator for all diagonals in the matrix ind <- col(A)-row(A) # Split the matrix! spd <- split(A, ind) if(is.null(d)){ # Get diagonals which have nonzero elements keepID <- ldply(spd, function(di) any(di>0)) nzdiag <- spd[keepID$V1] # Indices of nonzero diagonals dvec <- as.numeric(keepID$.id[keepID$V1]) } else { # Diagonals are specified d <- sort(as.vector(d)) keepID <- names(spd)%in%d nzdiag <- spd[keepID] } # Deal with uneven rows and cols m <- NROW(A) n <- NCOL(A) p <- length(d) if(is.logical(A)){ B <- matrix(FALSE,nrow = min(c(m,n), na.rm = TRUE), ncol = p) } else { B <- matrix(0, nrow = min(c(m,n), na.rm = TRUE), ncol = p) } for(i in seq_along(d)){ B[index(nzdiag[[i]]),i] <- nzdiag[[i]] } colnames(B) <- paste(d) return(list(B = B, d = dvec)) } } nzdiags.boot <- function(RP,d=NULL){ # Loosely based on MATLAB function spdiags() by Rob Schreiber - Copyright 1984-2014 The MathWorks, Inc. #require(Matrix) if(grepl("matrix",class(RP),ignore.case = TRUE)){ A <- RP if(all(A>0)){stop("All matrix elements are nonzero.")} # create an indicator for all diagonals in the matrix ind <- col(A)-row(A) # Split the matrix! spd <- split(A, ind) if(is.null(d)){ # Get diagonals which have nonzero elements keepID <- ldply(spd, function(di) any(di>0)) nzdiag <- spd[keepID$V1] # Indices of nonzero diagonals d <- as.numeric(keepID$.id[keepID$V1]) } else { # Diagonals are specified d <- sort(as.vector(d)) keepID <- names(spd)%in%d nzdiag <- spd[keepID] } # Deal with uneven rows and cols m <- NROW(A) n <- NCOL(A) p <- length(d) if(is.logical(A)){ B <- matrix(FALSE,nrow = min(c(m,n), na.rm = TRUE), ncol = p) } else { B <- matrix(0, nrow = min(c(m,n), na.rm = TRUE), ncol = p) } for(i in seq_along(d)){ B[index(nzdiag[[i]]),i] <- nzdiag[[i]] } colnames(B) <- paste(d) return(B) } } nzdiags.chroma <- function(RP, d=NULL){ # Loosely based on MATLAB function spdiags() by Rob Schreiber - Copyright 1984-2014 The MathWorks, Inc. #require(Matrix) if(grepl("matrix",class(RP),ignore.case = TRUE)){ A <- RP if(all(A>0)){stop("All matrix elements are nonzero.")} # create an indicator for all diagonals in the matrix ind <- col(A)-row(A) # Split the matrix! spd <- split(A, ind) if(is.null(d)){ # Get diagonals which have nonzero elements keepID <- ldply(spd, function(di) any(di>0)) nzdiag <- spd[keepID$V1] # Indices of nonzero diagonals d <- as.numeric(keepID$.id[keepID$V1]) } else { # Diagonals are specified d <- sort(as.vector(d)) keepID <- names(spd)%in%d nzdiag <- spd[keepID] } # Deal with uneven rows and cols m <- NROW(A) n <- NCOL(A) p <- length(d) if(is.logical(A)){ B <- matrix(FALSE,nrow = min(c(m,n), na.rm = TRUE), ncol = p) } else { B <- matrix(0, nrow = min(c(m,n), na.rm = TRUE), ncol = p) } for(i in seq_along(d)){ B[index(nzdiag[[i]]),i] <- nzdiag[[i]] } colnames(B) <- paste(d) return(B) } } #' lineDists #' #' @param RP A thresholded recurrence matrix (binary: 0 - 1) #' @param d Vector of diagonals to be extracted from matrix \code{RP} before line length distributions are calculated. A one element vector will be interpreted as a windowsize, e.g., \code{d = 50} will extract the diagonal band \code{-50:50}. A two element vector will be interpreted as a band, e.g. \code{d = c(-50,100)} will extract diagonals \code{-50:100}. If \code{length(d) > 2}, the numbers will be interpreted to refer to individual diagonals, \code{d = c(-50,50,100)} will extract diagonals \code{-50,50,100}. #' @param theiler Size of the theiler window, e.g. (\code{theiler = 1} removes diagonal bands -1,0,1 from the matrix. If \code{length(d)} is \code{NULL}, 1 or 2, the theiler window is applied before diagonals are extracted. The theiler window is ignored if \code{length(d)>2, or if it is larger than the matrix or band indicated by parameter \code{d}.) #' @param invert Relevant for Recurrence Time analysis: Return the distribution of 0 valued segments in nonzero diagonals/verticals/horizontals. This indicates the time between subsequent line structures. #' #' @description Extract lengths of diagonal and vertical line segments from a recurrence matrix. #' @details Based on the Matlab function \code{lineDists} by Stefan Schinkel, Copyright (C) 2009 Stefan Schinkel, University of Potsdam, http://www.agnld.uni-potsdam.de #' #'% References: #'% S. Schinkel, N. Marwan, O. Dimigen & J. Kurths (2009): #'% "Confidence Bounds of recurrence-based complexity measures #'% Physics Letters A, 373(26), pp. 2245-2250 #'% #'% Copyright (C) 2009 Stefan Schinkel, University of Potsdam #'% http://www.agnld.uni-potsdam.de #' #' @author Fred Hasselman #' @return A list object with distribution of line lengths. #' @export #' #' @examples lineDists <- function(RP, d = NULL, theiler = NULL, Chromatic = FALSE, invert = FALSE, AUTO = NULL, chromatic = FALSE, matrices = FALSE, doHalf = FALSE){ require(plyr) require(dplyr) require(tidyr) # For boot() # RP <- RP[indices,] if(!all(as.vector(RP)==0|as.vector(RP)==1)){stop("Matrix should be a binary (0,1) matrix!!")} if(!is.null(d)){ if(length(d)==1){d <- -d:d} if(length(d)==2){d <- d:d} } if(!is.null(theiler)){ if(length(d)=maxIter))){ exitIter <- TRUE } if(round(RR,digits = 2)>round(targetRR,digits = 2)){ tryRadius <- tryRadius*0.8 } else { tryRadius <- tryRadius*2.2 } } iterList$Converged[iter] <- TRUE if(iter>=maxIter){ warning("Max. iterations reached.") iterList$Converged[iter] <- FALSE } if(!dplyr::between(RR,(targetRR*(1-tol)),(targetRR*(tol+1)))){ warning("Target RR not found, try increasing tolerance, or change starting value.") iterList$Converged[iter] <- FALSE } ifelse(histIter,id<-c(1:iter),id<-iter) return(iterList[id,]) } recMat <- function(y1, y2, emDim = 1, emLag = 1, to.ts = NULL, to.sparse = FALSE, order.by = NULL, method = "Manhattan", ...){ require(proxy) require(scales) if(!all(is.data.frame(y1),is.data.frame(y2))){ y1 <- as.data.frame(y1) y2 <- as.data.frame(y2) } et1 <- lagEmbed(y1, emDim, emLag) et2 <- lagEmbed(y2, emDim, emLag) # colnames(et1) <- colnames(y1) # colnames(et2) <- colnames(y2) dmat <- proxy::dist(x = et1, y = et2, method = method, diag = ifelse(identical(et1,et2),FALSE,TRUE)) #dmat <- as.data.frame(dmat) dmat <- unclass(dmat) #rmat <- scales::rescale(rmat) if(all(!is.null(to.ts),!is.null(order.by))){ dmat <- switch(to.ts, "xts" = xts(dmat, order.by = as_datetime(order.by)), "zoo" = zoo(dmat, order.by = as_datetime(order.by)) ) } if(!is.null(order.by)){ colnames(dmat) <- paste(order.by) rownames(dmat) <- paste(order.by) } # if(is.null(to.ts)){ # dmat <- Matrix(dmat) # } if(to.sparse){ dmat <- Matrix(dmat) } attr(dmat,"eDims1") <- et1 attr(dmat,"eDims2") <- et2 attr(dmat,"AUTO") <- ifelse(identical(et1,et2),TRUE,FALSE) # RM <- structure(.Data = dmat, # emDims1.name = colnames(y1), # emDims2.name = colnames(y2), # eDims1 = et1, # eDims2 = et2, # AUTO = ifelse(identical(et1,et2),TRUE,FALSE)) # #class(RM) <- "recmat" return(dmat) } empty.crp <- function(){ data.frame( Radius = NA, RT = NA, RR = NA, DET = NA, MEAN_dl = NA, MAX_dl = NA, ENT_dl = NA, ENTrel_dl= NA, REP_tot = NA, CoV_dl = NA, DIV_dl = NA, SING_dl = NA, N_dl = NA, ANI = NA, LAM_vl = NA, TT_vl = NA, MAX_vl = NA, ENT_vl = NA, ENTrel_vl= NA, CoV_vl = NA, REP_vl = NA, DIV_vl = NA, SING_vl = NA, N_vl = NA, LAM_hl = NA, TT_hl = NA, MAX_hl = NA, ENT_hl = NA, ENTrel_hl= NA, CoV_hl = NA, REP_hl = NA, DIV_hl = NA, SING_hl = NA, N_hl = NA) } calc.crp <- function(RM, radius= NULL, DLmin = 2, VLmin = 2, HLmin = 2, DLmax = length(diag(rmat))-1, VLmax = length(diag(rmat))-1, HLmax = length(diag(rmat))-1, AUTO = FALSE, chromatic = FALSE, matrices = FALSE){ RMsize <- Msize(RM, auto=AUTO) #Total nr. recurrent points RT <- Matrix::nnzero(RM, na.counted = FALSE) #Proportion recurrence / Recurrence Rate RR <- RT/RMsize #Get line segments lineSegments <- lineDists(RM) dlines <- lineSegments$diagonals.dist vlines <- lineSegments$verticals.dist hlines <- lineSegments$horizontals.dist #Frequency tables of line lengths freq_dl <- table(dlines) freq_vl <- table(vlines) freq_hl <- table(hlines) freqvec_dl <- as.numeric(names(freq_dl)) freqvec_vl <- as.numeric(names(freq_vl)) freqvec_hl <- as.numeric(names(freq_hl)) #Number of recurrent points on diagonal, vertical and horizontal lines N_dl <- sum(freq_dl[dplyr::between(freqvec_dl,DLmin, DLmax)], na.rm = TRUE) N_vl <- sum(freq_vl[dplyr::between(freqvec_vl,VLmin, VLmax)], na.rm = TRUE) N_hl <- sum(freq_hl[dplyr::between(freqvec_hl,HLmin, HLmax)], na.rm = TRUE) #Determinism / Horizontal and Vertical Laminarity DET <- N_dl/RT LAM_vl <- N_vl/RT LAM_hl <- N_hl/RT #anisotropy ratio ANI <- (N_vl-N_hl)/N_dl # Singularities SING_dl <- sum(freq_dl[dplyr::between(as.numeric(names(freq_dl)),1,(DLmin-1))],na.rm = TRUE)/RT SING_vl <- sum(freq_vl[dplyr::between(as.numeric(names(freq_vl)),1,(VLmin-1))],na.rm = TRUE)/RT SING_hl <- sum(freq_hl[dplyr::between(as.numeric(names(freq_hl)),1,(HLmin-1))],na.rm = TRUE)/RT #Array of probabilities that a certain line length will occur (all >1) P_dl <- freq_dl[dplyr::between(as.numeric(names(freq_dl)), DLmin, DLmax)]/N_dl P_vl <- freq_vl[dplyr::between(as.numeric(names(freq_vl)), VLmin, VLmax)]/N_vl P_hl <- freq_hl[dplyr::between(as.numeric(names(freq_hl)), HLmin, HLmax)]/N_hl #Entropy of line length distributions ENT_dl <- -1 * sum(P_dl * log(P_dl)) ENT_vl <- -1 * sum(P_vl * log(P_vl)) ENT_hl <- -1 * sum(P_hl * log(P_hl)) #Relative Entropy (Entropy / Max entropy) ENTrel_dl = ENT_dl/(-1 * log(1/DLmax)) ENTrel_vl = ENT_vl/(-1 * log(1/VLmax)) ENTrel_hl = ENT_hl/(-1 * log(1/HLmax)) #Meanline MEAN_dl = mean(freq_dl) MEAN_vl = mean(freq_vl) MEAN_hl = mean(freq_hl) #Maxline MAX_dl = max(freq_dl) MAX_vl = max(freq_vl) MAX_hl = max(freq_hl) # REPetetiveness REP_tot <-(N_hl+N_vl-N_dl)/N_dl REP_hl <- N_hl/N_dl REP_vl <- N_vl/N_dl #Coefficient of determination CoV_dl = sd(freq_dl)/mean(freq_dl) CoV_vl = sd(freq_vl)/mean(freq_vl) CoV_hl = sd(freq_hl)/mean(freq_hl) #Divergence DIV_dl = 1/MAX_dl DIV_vl = 1/MAX_vl DIV_hl = 1/MAX_hl crqaMatrices <- list() crqaMatrices <- list() #Output out <- data.frame( Radius = radius, RT = RT, RR = RR, DET = DET, MEAN_dl = MEAN_dl, MAX_dl = MAX_dl, ENT_dl = ENT_dl, ENTrel_dl= ENTrel_dl, REP_tot = REP_tot, CoV_dl = CoV_dl, DIV_dl = DIV_dl, SING_dl = SING_dl, N_dl = N_dl, ANI = ANI, LAM_vl = LAM_vl, TT_vl = MEAN_vl, MAX_vl = MAX_vl, ENT_vl = ENT_vl, ENTrel_vl= ENTrel_vl, CoV_vl = CoV_vl, REP_vl = REP_vl, DIV_vl = DIV_vl, SING_vl = SING_vl, N_vl = N_vl, LAM_hl = LAM_hl, TT_hl = MEAN_hl, MAX_hl = MAX_hl, ENT_hl = ENT_hl, ENTrel_hl= ENTrel_hl, CoV_hl = CoV_hl, REP_hl = REP_hl, DIV_hl = DIV_hl, SING_hl = SING_hl, N_hl = N_hl) if(matrices){ return(list( crqaMeasures = out, crqaMatrices = list(RM = RM, dlines = dlines, vlines = vlines, hlines = hlines, freq_dl = freq_dl, freq_vl = freq_vl, freq_hl = freq_hl) ) ) } else { return(out) } } prep.crp <- function(RP, radius= NULL, DLmin = 2, VLmin = 2, HLmin = 2, DLmax = length(diag(rmat))-1, VLmax = length(diag(rmat))-1, HLmax = length(diag(rmat))-1, AUTO = FALSE, chromatic = FALSE, matrices = FALSE, doHalf = FALSE){ out<-calc.crp(RP, radius= radius, DLmin = DLmin, VLmin = VLmin, HLmin = HLmin, DLmax = DLmax, VLmax = VLmax, HLmax = HLmax, AUTO = AUTO, chromatic = chromatic, matrices = matrices) if(doHalf){ if(!AUTO){ outLo <- calc.crp(Matrix::tril(RP,-1), radius= radius, DLmin = DLmin, VLmin = VLmin, HLmin = HLmin, DLmax = DLmax, VLmax = VLmax, HLmax = HLmax, AUTO = AUTO, chromatic = chromatic, matrices = matrices) outUp <- calc.crp(Matrix::triu(RP,-1), radius= radius, DLmin = DLmin, VLmin = VLmin, HLmin = HLmin, DLmax = DLmax, VLmax = VLmax, HLmax = HLmax, AUTO = AUTO, chromatic = chromatic, matrices = matrices) out <- cbind.data.frame(full = out, lower = outLo, upper = outUp) } else { out<- cbind.data.frame(full = out, lower = empty.crp(), upper = empty.crp()) } } return(out) } crqa_mat_measures <- function(RM, DLmin = 2, VLmin = 2, HLmin = 2, DLmax = length(diag(rmat))-1, VLmax = length(diag(rmat))-1, HLmax = length(diag(rmat))-1, AUTO = NULL, chromatic = FALSE, matrices = FALSE, doHalf = FALSE, Nboot = NULL, CL = .95){ # DLmin = 2 # VLmin = 2 # HLmin = 2 # DLmax = length(diag(rmat))-1 # VLmax = length(diag(rmat))-1 # HLmax = length(diag(rmat))-1 # chromatic = FALSE # matrices = FALSE # CL = .95 require(dplyr) if(is.null(Nboot)){Nboot = 1} NRows <- NROW(RM) NCols <- NCOL(RM) mc.cores <- detectCores() if(Nboot1){ cat(paste0("Bootstrapping Recurrence Matrix... ",Nboot," iterations...\n")) cat(paste0("Estimated duration: ", round((difftime(tend,tstart, unit = "mins")*Nboot)/max((round(mc.cores/2)-1),1), digits=1)," min.\n")) tstart <-Sys.time() bootOut <- mclapply(1:Nboot, function(i){ replicate <- as.data.frame(prep.crp(matrix(RM[ceiling(NCols*NRows*runif(NCols*NRows))], ncol=NCols, nrow = NRows), radius= radius, DLmin = DLmin, VLmin = VLmin, HLmin = HLmin, DLmax = DLmax, VLmax = VLmax, HLmax = HLmax, AUTO = AUTO, chromatic = chromatic, matrices = matrices, doHalf = doHalf)) replicate$replicate = i return(replicate) }, mc.cores = mc.cores ) tend <- Sys.time() cat(paste0("Actual duration: ", round(difftime(tend,tstart, unit = "mins"), digits=1)," min.\n")) dfrepl <- ldply(bootOut) dfrepl <- gather(dfrepl, key = measure, value = value, -replicate) if(length(CL)==1){ ci.lo <- (1-CL)/2 ci.hi <- CL + ci.lo } else { ci.lo <- CL[1] ci.hi <- CL[2] } rqout <- dfrepl %>% group_by(measure) %>% summarize( val = NA, ci.lower = quantile(value, ci.lo, na.rm = TRUE), ci.upper = quantile(value, ci.hi, na.rm = TRUE), mean = mean(value, na.rm = TRUE), sd = sd(value, na.rm = TRUE), var = var(value, na.rm = TRUE), N = n(), se = sd(value, na.rm = TRUE)/sqrt(n()), median = mean(value, na.rm = TRUE), mad = mad(value, na.rm = TRUE) ) for(m in 1:nrow(rqout)){ rqout$val[rqout$measure%in%dfori$measure[m]] <- dfori$value[m] } } else { rqout <- dfori } return(rqout) } # sizeIn <- dim(RM) # mc.cores <- detectCores() # if(Nboot% # bootstrap(Nboot) %>% # do(as.data.frame( ) # ) #' crqa_mat #' #' @param rp Recurrence Matrix #' @param radius #' @param DLmin #' @param VLmin #' @param HLmin #' @param DLmax #' @param VLmax #' @param HLmax #' @param AUTO Auto-recurrence? (default: FALSE) #' @param matrices #' #' @return #' @export #' #' @examples crqa_mat <- function(rmat, radius= NULL, DLmin = 2, VLmin = 2, HLmin = 2, DLmax = length(diag(rmat))-1, VLmax = length(diag(rmat))-1, HLmax = length(diag(rmat))-1, AUTO = FALSE, doHalf = FALSE, chromatic = FALSE, matrices = FALSE, Nboot = NULL, CL = .95){ # require(boot) require(dplyr) require(broom) # Input should be a distance matrix, or a matrix of zeroes and ones with radius = NULL, output is a list # Fred Hasselman (me@fredhasselman.com) - August 2013 if(is.null(AUTO)){ AUTO <- ifelse(identical(as.vector(tril(rmat,-1)),as.vector(tril(t(rmat),-1))),TRUE,FALSE) } #uval <- unique(as.vector(rmat)) if(all(as.vector(rmat)==0|as.vector(rmat)==1)){ RM <- rmat } else { if(!is.null(radius)){ RM <- di2bi(rmat,radius) } else{ if(!chromatic){ stop("Expecting a binary (0,1) matrix.\nUse 'get.radius()', or set 'chromatic = TRUE'") } else { stop("Chromatic RQA not implemented yet.") } } } rm(rmat) out <- crqa_mat_measures(RM, DLmin = DLmin, VLmin = VLmin, HLmin = HLmin, DLmax = DLmax, VLmax = VLmax, HLmax = HLmax, AUTO = AUTO, chromatic = chromatic, matrices = matrices, doHalf = doHalf, Nboot = Nboot, CL = CL) # # if(is.null(Nboot)){Nboot = 1} # # out <- crqa_mat_measures(RM, # radius= radius, # DLmin = DLmin, # VLmin = VLmin, # HLmin = HLmin, # DLmax = DLmax, # VLmax = VLmax, # HLmax = HLmax, # AUTO = AUTO, # chromatic = chromatic, # matrices = matrices, # doHalf = doHalf, # Nboot = Nboot, # CL = CL) # # dfori <- gather(out, key = measure, value = value) # # col.ind <- tbl_df(index(RM)) # row.ind <- tbl_df(sample(index(RM),size=nrow(RM))) # # if(Nboot>1){cat(paste0("Bootstrapping Recurrence Matrix... ",Nboot," iterations.\n")) # bootout <- col.ind %>% # bootstrap(Nboot) %>% # do(crqa_mat_measures(RM[row.ind,unlist(.)], # radius= radius, # DLmin = DLmin, # VLmin = VLmin, # HLmin = HLmin, # DLmax = DLmax, # VLmax = VLmax, # HLmax = HLmax, # AUTO = AUTO, # chromatic = chromatic, # matrices = matrices, # doHalf = doHalf)) # # dfrepl <- gather(bootout, key = measure, value = value, -replicate) # # if(length(CL)==1){ # ci.lo <- (1-CL)/2 # ci.hi <- CL + ci.lo # } else { # ci.lo <- CL[1] # ci.hi <- CL[2] # } # # rqout <- dfrepl %>% group_by(measure) %>% # summarize( # val = NA, # ci.low = quantile(value, ci.lo, na.rm = TRUE), # ci.high = quantile(value, ci.hi, na.rm = TRUE), # mean = mean(value, na.rm = TRUE), # sd = sd(value, na.rm = TRUE), # var = var(value, na.rm = TRUE), # N = n(), # se = sd(value, na.rm = TRUE)/sqrt(n()), # median = mean(value, na.rm = TRUE), # mad = mad(value, na.rm = TRUE) # ) # # for(m in 1:nrow(rqout)){ # rqout$val[rqout$measure%in%dfori$measure[m]] <- dfori$value[m] # } # } else { # rqout <- dfori # } return(out) } plotRecMat <- function(rmat, PhaseSpaceScale= TRUE){ require(grid) require(ggplot2) require(gtable) AUTO <-ifelse(identical(as.vector(tril(rmat,-1)),as.vector(tril(t(rmat),-1))),TRUE,FALSE) meltRP <- reshape2::melt(rmat) if(!all(as.vector(meltRP$value[!is.na(meltRP$value)])==0|as.vector(meltRP$value[!is.na(meltRP$value)])==1)){ unthresholded = TRUE # For presentation purpose only if(PhaseSpaceScale){ meltRP$value <- scales::rescale(meltRP$value) } } else { unthresholded = FALSE meltRP$value <- factor(meltRP$value) } gRP <- ggplot(aes(x=Var1, y=Var2, fill = value), data= meltRP) + geom_raster() if(unthresholded){ gRP <- gRP + scale_fill_gradient2(low = "red3", high = "steelblue", mid = "white", na.value = scales::muted("slategray4"), midpoint = .5, limit = c(min(meltRP$value),max(meltRP$value)), space = "Lab", name = "") rptheme <- theme(panel.background = element_rect("grey99"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.background = element_blank(), legend.key = element_blank(), panel.border = element_blank(), axis.ticks = element_blank(), axis.text = element_blank(), axis.title.x =element_blank(), axis.title.y =element_blank(), plot.margin = margin(0,0,0,0)) } else { gRP <- gRP + scale_fill_manual(values = c("white","black"), na.value = scales::muted("slategray4"), name = "", guide = "none") rptheme <- theme( panel.background = element_rect("grey50"), panel.border = element_rect("grey50",fill=NA), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.background = element_blank(), legend.key = element_blank(), axis.ticks = element_blank(), axis.text = element_blank(), axis.title.x =element_blank(), axis.title.y =element_blank(), plot.margin = margin(3,3,3,3)) } gRP <- gRP + geom_abline(slope = 1,colour = "grey50", size = 1) + ggtitle(label=ifelse(AUTO,"Auto-recurrence plot","Cross-recurrence plot")) + rptheme + coord_fixed(expand = FALSE) if(!is.null(attr(rmat,"eDims1"))){ y1 <- data.frame(t1=attr(rmat,"eDims1")) y2 <- data.frame(t2=attr(rmat,"eDims2")) y1[,1] <- rescale(y1[,1]) gy1 <- ggplot(y1, aes(y=y1[,1], x= index(y1))) + geom_line() + xlab(colnames(y1)) + ylab("") + geom_vline(xintercept = index(y1)[is.na(y1[,1])], colour = scales::muted("slategray4"),alpha=.1, size=.5) + theme(panel.background = element_rect("grey99"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.background = element_blank(), legend.key = element_blank(), panel.border = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title.x =element_text(colour = "black",angle = 0, vjust = +3), axis.title.y =element_blank(), plot.margin = margin(0,0,0,0) ) + coord_cartesian(expand = FALSE) # + coord_fixed(1/10) y2[,1] <- rescale(y2[,1]) gy2 <- ggplot(y2, aes(y=y2[,1], x= index(y2))) + geom_line() + xlab(colnames(y2)) + ylab("") + geom_vline(xintercept = index(y2)[is.na(y2[,1])], colour = scales::muted("slategray4"),alpha=.1, size=.5) + theme(panel.background = element_rect("grey99"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.background = element_blank(), legend.key = element_blank(), panel.border = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title.x =element_blank(), axis.title.y =element_text(colour = "black",angle = 90, vjust = -2), plot.margin = margin(0,0,0,0)) + coord_flip(expand = FALSE) + scale_y_reverse() #coord_fixed(1/2) + } else { gy1 <- gg.plotHolder() gy2 <- gg.plotHolder() } gr <- ggplotGrob(gRP) gindex <- subset(gr$layout, name == "panel") g <- gtable_add_cols(gr, unit(2, "cm"),0) g <- gtable_add_rows(g, unit(2,"cm")) g <- gtable_add_grob(g, ggplotGrob(gy2), t=gindex$t, l=1, b=gindex$b, r=gindex$l) g <- gtable_add_grob(g, ggplotGrob(gy1), t=9, l=2, b=11, r=6) grid.newpage() grid.draw(g) } # plotRM.crqa <- function(RM, # useLattice = FALSE, # plotVars = NULL, # plotTime = NULL, # plotMatrix = TRUE){ # require(scales) # # # RP dimensions # nr <- dim(RM)[1] # nc <- dim(RM)[2] # # # Do some checks # plotVarsOK <- FALSE # plotTimeOK <- FALSE # # if(any(class(RM)=="xts"|class(RM)=="zoo")){ # if(!is.null(attr(RM,"eDims1"))){ # plotVarsOK <- TRUE # plotTimeOK <- TRUE # plotVars <- list(attributes(RM)$eDims1,attributes(RM)$eDims2) # matNAmes <- c(attr(RM,"emDims1.name"),attr(RM,"emDims1.name")) # AUTO <- attr(RM,"AUTO") # RM <- unclass(RM) # } # } else { # # AUTO <- ifelse(isSymmetric(RM),TRUE,FALSE) # # if(is.list(plotVars)){ # if(all(lengths(plotVars)==c(nr,nc))){ # yr <- plotVars[[1]] # yc <- plotVars[[2]] # plotVarsOK <- TRUE # if(any(ncol(yr)>10,ncol(yc)>10)){ # yr <- yr[,1:10] # yc <- yc[,1:10] # warning("Detected more than 10 embedding dims, will plot 1-10.")} # } else {warning("Expecting: length(plotVars[[1]]) == dims(RM)[1] & length(plotVars[[2]]) == dims(RM)[2]\nFound: nrows = ",paste(lengths(plotVars),collapse ="; cols = "))} # } else {warning("plotVars not a list.\n Not plotting dimensions.")} # # # Plot Time? # if(!is.null(plotTime)){ # if(is.POSIXct(as_datetime(dimnames(RM)[[2]]))){ # plotTimeOK <- TRUE # } # } # } # # # if(!is.list(plotTime)){ # # if(nc!=nr){ # # warning("Unequal rows and columns, expecting a list with 2 time variables.") # # } else { # # if(NROW(plotTime==nr)){ # # plotTimeRow <- plotTimeCol <- plotTime # # plotTimeOK <- TRUE # # } else { # # warning("Time variable length should equal nrows(RM) & ncols(RM).") # # } # if rows are not equal to length # # } #if row and columns equal # # } else { # # if(all(lengths(plotTime)==c(nr,nc))){ # # plotTimeOK <- TRUE # # plotTimeRow <- plotTime[[1]] # # plotTimeCol <- plotTime[[2]] # # } else { # # warning("Expecting: length(plotTime[[1]]) == dims(RM)[1] & length(plotTime[[2]]) == dims(RM)[2]\nFound: nrows = ",paste(lengths(plotTime),collapse ="; cols = ")) # # } # # } # # if(plotTimeOK){ # # if(is.POSIXt(plotTimeRow)&is.POSIXt(plotTimeCol)){ # # plyr::amv_dimnames(RM) <-list(plotTimeRow, plotTimeCol) # # } else { # # warning("plotTime is not a POSIXct or POSIXlt object.") # # } # # } # # } # # # AUTO or CROSS? # if(AUTO){ # RM <- Matrix::t(RM) # } # # if(nc*nr>10^6){ # message(paste("\nLarge RM with",nc*nr,"elements... \n >> This could take some time to plot!\n")) # } # # if(useLattice){ # require(lattice) # require(latticeExtra) # # ifelse(AUTO, # Titles <- list(main="Auto Recurrence Plot", X = expression(Y[t]), Y = expression(Y[t])), # Titles <- list(main="Cross Recurrence Plot", X = expression(X[t]), Y = expression(Y[t])) # ) # # #binned <- ftable(as.vector(RM)) # dimnames(RM) <- list(NULL,NULL) # # #Thresholded or Distance matrix? # ifelse((all(as.vector(RM)==0|as.vector(RM)==1)), # distPallette <- colorRampPalette(c("white", "black"))(2), # distPallette <- colorRampPalette(colors = c("red3", "snow", "steelblue"), # space = "Lab", # interpolate = "spline") # ) # # lp <- levelplot(as.matrix(RM), # colorkey = !AUTO, # region = TRUE, # col.regions = distPallette, # useRaster = TRUE, # aspect = nc/nr, # main = Titles$main, # xlab = Titles$X, # ylab = Titles$Y) # # # if(plotVarsOK){ # lp # } # # if(plotMatrix){ # lp # #grid.arrange(lp, txt, ncol=2, nrow=1, widths=c(4, 1), heights=c(4)) # } # # return(lp) # # } else { # # require(gridExtra) # require(ggplot2) # require(reshape2) # # meltRP <- reshape2::melt((RM)) # # if(!all(dplyr::between(meltRP$value[!is.na(meltRP$value)],0,1))){ # meltRP$value <- scales::rescale(meltRP$value) # } # # # # if(plotTimeOK){ # # #if( (year(last(paste0(meltRP$Var1)))- year(meltRP$Var1[1]))>=1){ # # meltRP$Var1 <- parse_date_time(as.character(meltRP$Var1), c("ymd", "ymd HM", "ymd HMS")) # # meltRP$Var2 <- parse_date_time(as.character(meltRP$Var2), c("ymd", "ymd HM", "ymd HMS")) # # #} # # # } else { # # meltRP$Var1 <- index(meltRP$value) # # meltRP$Var2 <- index(meltRP$value) # # #} # # ## colnames(meltRP)[1:2] <- matNAmes # # grp <- ggplot(data = meltRP, aes(x = meltRP[,2], # y = meltRP[,1], # fill = value)) + geom_raster() # # if(all(as.vector(RM)==0|as.vector(RM)==1)){ # grp <- grp + scale_fill_grey(name="Manhattan") # } else { # grp <- grp + scale_fill_gradient2(low = "red3", # high = "steelblue", # mid = "white", # na.value = scales::muted("slategray4"), # midpoint = .5, # limit = c(0,1), # space = "Lab", # name = "Manhattan") # } # # if(AUTO){ # grp <- grp + labs(title = "Auto-Recurrence Plot\n") # } else { # grp <- grp + labs("Cross-Recurrence Plot\n") # } # # # if(plotTime){ # grp <- grp + # scale_x_discrete(limits = c(first(meltRP$Var1),last(meltRP$Var2))) + #breaks = meltRP$Var2[seq(1,nrow(meltRP),round(nrow(meltRP)/5))]) + # scale_y_discrete(limits = c(first(meltRP$Var1),last(meltRP$Var2))) #breaks = meltRP$Var1[seq(1,nrow(meltRP),round(nrow(meltRP)/5))]) # # grp <- grp + theme_bw() + coord_fixed() # # # theme(axis.text.x = element_text(angle = 45, vjust = 1, size = 8, hjust = 1), # # axis.text.y = element_text(size = 8)) + # # # # if(plotVarsOK){ # grp # } # # # if(plotMatrix){ # grp # #grid.arrange(lp, txt, ncol=2, nrow=1, widths=c(4, 1), heights=c(4)) # } # # return(grp) # } # # # } else { # # warning("\nInput is not list output from function crqa().\n") # # } # # } #' plotRP #' #' @param crqaOutput List output from \code{\link[crqa]{crqa}} #' #' @return #' @export #' @author Fred Hasselman #' @description Creates a recurrence plot from the sparse matrix output generated by \code{\link[crqa]{crqa}}. #' @examples plotRP.crqa <- function(crqaOutput){ require(Matrix) require(lattice) require(grid) require(gridExtra) AUTO <- FALSE # Is is crqa output? if(all(names(crqaOutput)%in%c("RR","DET","NRLINE","maxL","L","ENTR","rENTR","LAM","TT","RP"))){ # RP dimensions nr <- dim(crqaOutput$RP)[1] nc <- dim(crqaOutput$RP)[2] RP <- crqaOutput$RP # AUTO or CROSS? if(class(RP)=="dtCMatrix"){ message("\nRP in crqa output is a Triangular Sparse Matrix, this implies auto-recurrence...\n") AUTO <- TRUE } if(isSymmetric(RP)){ message("\nRP in crqa output is a Symmetric Sparse Matrix, this implies auto-recurrence: \n >> RP Measures will include the Line of Identity, unless:\n >> crqa() with side = 'upper' or 'lower' was used.\n >> crqa() with Theiler window (tw) of 1 was used.") AUTO <- TRUE} if(nc*nr>10^6){ message(paste("\nLarge RP with",nc*nr,"elements... \n >> This could take some time to plot!\n")) } ifelse(AUTO, Titles <- list(main="Auto Recurrence Plot", X = expression(Y[t]), Y = expression(Y[t])), Titles <- list(main="Cross Recurrence Plot", X = expression(X[t]), Y = expression(Y[t])) ) # Thresholded or Distance matrix? ifelse(is.logical(as.array(RP)), distPallette <- colorRampPalette(c("white", "black"))(2), distPallette <- colorRampPalette(c("red", "white", "blue")) #( length(unique(as.vector(RP)))) ) #distPallette <- colorRampPalette(c("white", "black"))(2) lp <- levelplot(Matrix::as.matrix(RP), #colorkey = TRUE, region = TRUE, col.regions = distPallette, useRaster = TRUE, aspect = nc/nr, main = Titles$main, xlab = Titles$X, ylab = Titles$Y) txt <- grid.text(paste0("\n RR: ", round(crqaOutput$RR, digits = 1), "\n DET: ", round(crqaOutput$DET, digits = 1), "\nmeanL: ", round(crqaOutput$maxL, digits = 1), "\n maxL: ", round(crqaOutput$NRLINE, digits = 1), "\n LAM: ", round(crqaOutput$LAM, digits = 1), "\n TT: ", round(crqaOutput$TT, digits = 1), "\n ENT: ", round(crqaOutput$ENT, digits = 1), "\n rENT: ", round(crqaOutput$rENTR, digits = 1)), .02,.7, just = "left", draw = FALSE, gp = gpar(fontfamily = "mono", cex = .7)) # ,) grid.arrange(lp, txt, ncol=2, nrow=1, widths=c(4, 1), heights=c(4)) } else { warning("\nInput is not list output from function crqa().\n") } } plotRP.fnn <- function(FNNoutput){ plot(FNNoutput["combined",],type="b",pch=16, cex=2, col="grey80", ylim=c(0,100), xaxt="n", xlab = "Embedding Dimension", ylab = "False Nearest Neighbours") lines(FNNoutput["atol",],type="b",pch="a",col="grey30", lty=2) lines(FNNoutput["rtol",],type="b",pch="r", col="grey30",lty=2) Axis(side=1,at=seq_along(FNNoutput[1,]),labels = dimnames(FNNoutput)[[2]]) legend("topright",c("Combined","atol","rtol"), pch = c(16,97,114), lty = c(1,2,2), col = c("grey80","grey30","grey30"), pt.cex=c(2,1,1)) } # Toy models ------------------------------------------------------------------------------------------------------ #' Autocatlytic Growth: Iterating differential equations (maps) #' #' @title Autocatlytic Growth: Iterating differential equations (maps) #' #' @param Y0 Initial value. #' @param r Growth rate parameter. #' @param k Carrying capacity. #' @param N Length of the time series. #' @param type One of: "driving" (default), "damping", "logistic", "vanGeert1991". #' #' @return A timeseries object of length N. #' @export #' #' @author Fred Hasselman #' #' @family autocatalytic growth functions #' #' @examples #' # The logistic map in the chaotic regime #' growth.ac(Y0 = 0.01, r = 4, type = "logistic") growth.ac <- function(Y0 = 0.01, r = 1, k = 1, N = 100, type = c("driving", "damping", "logistic", "vanGeert")[1]){ # Create a vector Y of length N, which has value Y0 at Y[1] if(N>1){ Y <- as.numeric(c(Y0, rep(NA,N-2))) # Conditional on the value of type ... switch(type, # Iterate N steps of the difference function with values passed for Y0, k and r. driving = sapply(seq_along(Y), function(t) Y[[t+1]] <<- r * Y[t] ), damping = k + sapply(seq_along(Y), function(t) Y[[t+1]] <<- - r * Y[t]^2 / k), logistic = sapply(seq_along(Y), function(t) Y[[t+1]] <<- r * Y[t] * ((k - Y[t]) / k)), vanGeert = sapply(seq_along(Y), function(t) Y[[t+1]] <<- Y[t] * (1 + r - r * Y[t] / k)) )} return(ts(Y)) } #' Conditional Autocatlytic Growth: Iterating differential equations (maps) #' #' @param Y0 Initial value #' @param r Growth rate parameter #' @param k Carrying capacity #' @param cond Conditional rules passed as a data.frame of the form: cbind.data.frame(Y = ..., par = ..., val = ...) #' @param N Length of the time series #' #' @export #' #' @author Fred Hasselman #' #' @family autocatalytic growth functions #' #' @examples #' # Plot with the default settings #' xyplot(growth.ac.cond()) #' #' # The function such that it can take a set of conditional rules and apply them sequentially during the iterations. #' # The conditional rules are passed as a `data.frame` #' (cond <- cbind.data.frame(Y = c(0.2, 0.6), par = c("r", "r"), val = c(0.5, 0.1))) #' xyplot(growth.ac.cond(cond=cond)) #' #' # Combine a change of `r` and a change of `k` #' (cond <- cbind.data.frame(Y = c(0.2, 1.99), par = c("r", "k"), val = c(0.5, 3))) #' xyplot(growth.ac.cond(cond=cond)) #' #' # A fantasy growth process #' (cond <- cbind.data.frame(Y = c(0.1, 1.99, 1.999, 2.5, 2.9), par = c("r", "k", "r", "r","k"), val = c(0.3, 3, 0.9, 0.1, 1.3))) #' xyplot(growth.ac.cond(cond=cond)) growth.ac.cond <- function(Y0 = 0.01, r = 0.1, k = 2, cond = cbind.data.frame(Y = 0.2, par = "r", val = 2), N = 100){ # Create a vector Y of length N, which has value Y0 at Y[1] Y <- c(Y0, rep(NA, N-1)) # Iterate N steps of the difference equation with values passed for Y0, k and r. cnt <- 1 for(t in seq_along(Y)){ # Check if the current value of Y is greater than the threshold for the current conditional rule in cond if(Y[t] > cond$Y[cnt]){ # If the threshold is surpassed, change the parameter settings by evaluating: cond$par = cond$val eval(parse(text = paste(cond$par[cnt], "=", cond$val[cnt]))) # Update the counter if there is another conditional rule in cond if(cnt < nrow(cond)){cnt <- cnt + 1} } # Van Geert growth model Y[[t+1]] <- Y[t] * (1 + r - r * Y[t] / k) } return(ts(Y)) } # Complex Networks--------------------------------------------------------------- graph2svg <- function(TDM,pname){ # Create weighted Term-Term matrix tTM <- as.matrix(TDM) TTM <- tTM %*% t(tTM) TTM <- log1p(TTM) g <- graph.adjacency(TTM,weighted=T,mode="undirected",diag=F) g <- simplify(g) # Remove vertices that were used in the search query Vrem <- which(V(g)$name %in% c("~dev~","~dys~","~sld~","development","children","dyslexia")) g <- (g - V(g)$name[Vrem]) # Set colors and sizes for vertices V(g)$degree <- degree(g) rev <- scaleRange(log1p(V(g)$degree)) rev[rev<=0.3]<-0.3 V(g)$color <- rgb(scaleRange(V(g)$degree), 1-scaleRange(V(g)$degree), 0, rev) V(g)$size <- 10*scaleRange(V(g)$degree) V(g)$frame.color <- NA # set vertex labels and their colors and sizes V(g)$label <- V(g)$name V(g)$label.color <- rgb(0, 0, 0, rev) V(g)$label.cex <- scaleRange(V(g)$degree)+.1 # set edge width and color rew <- scaleRange(E(g)$weight) rew[rew<=0.3]<-0.3 E(g)$width <- 2*scaleRange(E(g)$weight) E(g)$color <- rgb(.5, .5, 0, rew) set.seed(958) svg(paste(pname,sep=""),width=8,height=8) plot(g, layout=layout.fruchterman.reingold(g)) dev.off() return(g) } # Plot vertex neighbourhood hoodGraph2svg <- function(TDM,Vname,pname){ # Create weighted Term-Term matrix tTM <- as.matrix(TDM) TTM <- tTM %*% t(tTM) TTM <- log1p(TTM) ig <- graph.adjacency(TTM,weighted=T,mode="undirected",diag=F) ig <- simplify(ig) # Remove vertices that were used in the search query Vrem <- which(V(ig)$name %in% c("~dev~","~dys~","~sld~","development","children","dyslexia")) ig <- (ig - V(ig)$name[Vrem]) # This is a deletion specific for the Neighbourhood graphs Vrem <- which(V(ig)$name %in% c("~rdsp~","~imp~","~som~","~bod~","~mlt~")) ig <- ig - V(ig)$name[Vrem] idx <- which(V(ig)$name==Vname) sg <- graph.neighborhood(ig, order = 1, nodes=V(ig)[idx], mode = 'all')[[1]] # set colors and sizes for vertices V(sg)$degree <- degree(sg) rev<-scaleRange(log1p(V(sg)$degree)) rev[rev<=0.3]<-0.3 V(sg)$color <- rgb(scaleRange(V(sg)$degree), 1-scaleRange(log1p(V(sg)$degree*V(sg)$degree)), 0, rev) V(sg)$size <- 35*scaleRange(V(sg)$degree) V(sg)$frame.color <- NA # set vertex labels and their colors and sizes V(sg)$label <- V(sg)$name V(sg)$label.color <- rgb(0, 0, 0, rev) V(sg)$label.cex <- scaleRange(V(sg)$degree) # set edge width and color rew<-scaleRange(E(sg)$weight) rew[rew<=0.3]<-0.3 E(sg)$width <- 6*scaleRange(E(sg)$weight) E(sg)$color <- rgb(.5, .5, 0, rew) idV <- which(V(sg)$name==Vname) idE <- incident(sg,V(sg)[[idV]]) E(sg)$color[idE] <- rgb(0, 0, 1 ,0.8) set.seed(958) idx <- which(V(sg)$name==Vname) svg(paste(pname,sep=""),width=8,height=8) plot(sg,layout=layout.star(sg,center=V(sg)[idx])) dev.off() return(sg) } lagEmbed <- function (y, emDim, emLag){ require(lubridate) if(any(is.ts(y), is.zoo(y), is.xts(y))){ timeVec <- time(y) emTime <- as_datetime(timeVec[emLag+1])- as_datetime(timeVec[1]) } else { timeVec <- index(y) emTime <- emLag } y <- as.numeric(unlist(y)) N <- NROW(y) if(emDim > 1){ lag.id <- seq(1, (emDim*emLag), emLag) maxN <- N - max(lag.id) emY <- matrix(nrow = maxN, ncol = emDim) for(tau in seq_along(lag.id)){ emY[,tau] = y[lag.id[tau]:(N-(rev(lag.id)[tau]))] } colnames(emY) <- paste0("tau.",0:(emDim-1)) } else { emY <- matrix(nrow = N, ncol = 1, byrow = TRUE, dimnames = list(NULL,"tau.0")) emY <- y } id <- deparse(substitute(y)) attr(emY, "embedding.dims") = emDim attr(emY, "embedding.lag") = emLag attr(emY, "embedding.time") = emTime attr(emY, "id") = id return(emY) } sliceTS<-function(TSmat,epochSz=1) { # Slice columns of TSmat in epochs of size = epochSz require(plyr) N<-dim(TSmat) return(llply(seq(1,N[1],epochSz),function(i) TSmat[i:min(i+epochSz-1,N[1]),1:N[2]])) } fltrIT <- function(TS,f){ # Apply filtfilt to TS using f (filter settings) require("signal") return(filtfilt(f=f,x=TS)) } SWtest0 <- function(g){ Nreps <- 10; histr <- vector("integer",Nreps) target<- round(mean(degree(g))) now <- target/2 for(i in 1:Nreps){ gt <- watts.strogatz.game(dim=1, size=length(degree(g)), nei=now, 0) histr[i] <- round(mean(degree(gt))) ifelse(histr[i] %in% histr,break,{ ifelse(histr[i]>target,{now<-now-1},{ ifelse(histr[i]100){stop("Vertices > 100, no need to use PLFsmall, use a binning procedure")} d <- degree(g) y <- hist(d,breaks=0.5:(max(d)+0.5),plot=FALSE)$counts if(length(y)<2){ warning("Less than 2 points in Log-Log regression... alpha=0") alpha <- 0 } else { if(length(y)==2){ warning("Caution... Log-Log slope is a bridge (2 points)") chop <- 0 } else { chop <- 1 } alpha <- coef(lm(rev(log1p(y)[1:(length(y)-chop)]) ~ log1p(1:(length(y)-chop))))[2] } return(alpha) } plotSW <- function(n,k,p){ g <- watts.strogatz.game(1, n, k, p) V(g)$degree <- degree(g) # set colors and sizes for vertices rev<-scale.R(log1p(V(g)$degree)) rev[rev<=0.2]<-0.2 rev[rev>=0.9]<-0.9 V(g)$rev <- rev$x V(g)$color <- rgb(V(g)$rev, 1-V(g)$rev, 0, 1) V(g)$size <- 25*V(g)$rev # set vertex labels and their colors and sizes V(g)$label <- "" E(g)$width <- 1 E(g)$color <- rgb(0.5, 0.5, 0.5, 1) return(g) } plotBA <- function(n,pwr,out.dist){ #require("Cairo") g <- barabasi.game(n,pwr,out.dist=out.dist,directed=FALSE) V(g)$degree <- degree(g) # set colors and sizes for vertices rev<-scale.R(log1p(V(g)$degree)) rev[rev<=0.2] <- 0.2 rev[rev>=0.9] <- 0.9 V(g)$rev <- rev$x V(g)$color <- rgb(V(g)$rev, 1-V(g)$rev, 0, 1) V(g)$size <- 25*V(g)$rev # V(g)$frame.color <- rgb(.5, .5, 0, .4) # set vertex labels and their colors and sizes V(g)$label <- "" E(g)$width <- 1 E(g)$color <- rgb(0.5, 0.5, 0.5, 1) return(g) } FDrel <- function(g){ d<-degree(g,mode="all") nbreaks <- round(length(V(g))/2)-1 y<-hist(d,breaks=nbreaks,plot=F)$density y<-y[y>0] return(FD <- -sum(y*log2(y))/-(log2(1/length(y)))) } sa2fd <- function(sa, ...) UseMethod("sa2fd") sa2fd.default <- function(sa, ...){ cat("No type specified.") } #' Informed Dimension estimate from Spectral Slope (aplha) #' #' @description Conversion formula: From periodogram based self-affinity parameter estimate (\code{sa}) to an informed estimate of the (fractal) dimension (FD). #' @param sa Self-Affinity parameter estimate based on PSD slope (e.g., \code{\link{fd.psd}})). #' #' @return An informed estimate of the Fractal Dimension, see Hasselman(2013) for details. #' @export #' #' @details The spectral slope will be converted to a dimension estimate using: #' #' \deqn{D_{PSD}\approx\frac{3}{2}+\frac{14}{33}*\tanh\left(Slope * \ln(1+\sqrt{2})\right)} #' #' @author Fred Hasselman #' @references Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model inference based on empirical waveforms. Frontiers in Physiology, 4, 75. \url{http://doi.org/10.3389/fphys.2013.00075} #' #' @family SA to FD converters #' #' @examples #' # Informed FD of white noise #' sa2fd.psd(0) #' #' # Informed FD of Brownian noise #' sa2fd.psd(-2) #' #' # Informed FD of blue noise #' sa2fd.psd(2) sa2fd.psd <- function(sa){return(round(3/2 + ((14/33)*tanh(sa*log(1+sqrt(2)))), digits = 2))} #' Informed Dimension estimate from DFA slope (H) #' #' @description Conversion formula: Detrended Fluctuation Analysis (DFA) estimate of the Hurst exponent (a self-affinity parameter \code{sa}) to an informed estimate of the (fractal) dimension (FD). #' #' @param sa Self-Afinity parameter estimate based on DFA slope (e.g., \code{\link{fd.sda}})). #' #' @return An informed estimate of the Fractal Dimension, see Hasselman(2013) for details. #' #' @export #' #' @details The DFA slope (H) will be converted to a dimension estimate using: #' #' \deqn{D_{DFA}\approx 2-(\tanh(\log(3)*sa)) }{D_{DFA} ≈ 2-(tanh(log(3)*sa)) } #' #' @family SA to FD converters #' #' @author Fred Hasselman #' @references Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model inference based on empirical waveforms. Frontiers in Physiology, 4, 75. \url{http://doi.org/10.3389/fphys.2013.00075} #' #' @examples #' # Informed FD of white noise #' sa2fd.dfa(0.5) #' #' # Informed FD of Pink noise #' sa2fd.dfa(1) #' #' # Informed FD of blue noise #' sa2fd.dfa(0.1) sa2fd.dfa <- function(sa){return(round(2-(tanh(log(3)*sa)), digits = 2))} #' Informed Dimension estimate from SDA slope. #' #' @description Conversion formula: Standardised Dispersion Analysis (SDA) estimate of self-affinity parameter (\code{SA}) to an informed estimate of the fractal dimension (FD). #' #' @param sa Self-afinity parameter estimate based on SDA slope (e.g., \code{\link{fd.sda}})). #' #' @details #' #' #' Note that for some signals different PSD slope values project to a single SDA slope. That is, SDA cannot distinguish dplyr::between all variaties of power-law scaling in the frequency domain. #' #' @return An informed estimate of the Fractal Dimension, see Hasselman(2013) for details. #' @export #' #' @author Fred Hasselman #' @references Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model inference based on empirical waveforms. Frontiers in Physiology, 4, 75. \url{http://doi.org/10.3389/fphys.2013.00075} #' #' @family SA to FD converters #' #' @examples #' # Informed FD of white noise #' sa2fd.sda(-0.5) #' #' # Informed FD of Brownian noise #' sa2fd.sda(-1) #' #' # Informed FD of blue noise #' sa2fd.sda(-0.9) sa2fd.sda <- function(sa){return(1-sa)} # fd estimators --------------------------------------------------------------------------------------------------- fd <- function(y, ...) UseMethod("fd") fd.default <- function(y, ...){ cat("No type specified.\nReturning exponential growth power law.") r = 1.01 y <- growth.ac(Y0=0.001, r=r, N=2048, type = "driving") tsp(y) <-c(1/500,2048/500,500) bulk <- log1p(hist(y,plot = F, breaks = seq(0,max(y),length.out = 129))$counts) size <- log1p(seq(0,2047,length.out = 128)) id<-bulk==0 lmfit <- lm(bulk[!id] ~ size[!id]) old <- ifultools::splitplot(2,1,1) plot(y, ylab = "Y", main = paste0('Exponential growth sap: ', round(coef(lmfit)[2],digits=2), ' | r:', r)) ifultools::splitplot(2,1,2) plot(size[!id],bulk[!id], xlab="Size = log(bin(Time))", ylab = "Bulk = logbin(Y)", pch=21, bg="grey60", pty="s") lines(size[!id], predict(lmfit),lwd=4,col="darkred") #legend("bottomleft",c(paste0("Range (n = ",sum(powspec$size<=0.25),")"), paste0("Hurvic-Deo estimate (n = ",nr,")")), lwd=c(3,3),col=c("darkred","darkblue"), cex = .8) par(old) } # PSD ------------------------------------------------------------------------------------------------------------- #' @title Power Spectral Density Slope (PSD). #' @description Estimate Alpha, Hurst Exponent and Fractal Dimension through log-log slope. #' #' @param y A numeric vector or time series object. #' @param normalize Normalize the series (default). #' @param detrend Subtract linear trend from the series (default). #' @param plot Return the log-log spectrum with linear fit (default). #' #' @author Fred Hasselman #' @references Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model inference based on empirical waveforms. Frontiers in Physiology, 4, 75. \url{http://doi.org/10.3389/fphys.2013.00075} #' #' @return A list object containing: #' \itemize{ #' \item A data matrix \code{PLAW} with columns \code{freq.norm}, \code{size} and \code{bulk}. #' \item Estimate of scaling exponent \code{alpha} based on a fit over the lowest 25\% frequencies (\code{low25}), or using the HD estimate \code{HD}. #' \item Estimate of the the Fractal Dimension (\code{FD}) using conversion formula's reported in Hasselman(2013). #' \item Information output by various functions. #' } #' #' @family FD estimators #' #' @export #' #' @details Calls function \code{\link[sapa]{SDF}} to estimate the scaling exponent of a timeseries based on the periodogram frequency spectrum. After detrending and normalizing the signal (if requested), \code{SDF} is called using a Tukey window (\code{raised cosine \link[sapa]{taper}}). #' #' A line is fitted on the periodogram in log-log coordinates. Two fit-ranges are used: The 25\% lowest frequencies and the Hurvich-Deo estimate (\code{\link[fractal]{HDEst}}). #' #' @examples #' fd.psd(rnorm(2048), plot = TRUE) fd.psd <- function(y, fs = NULL, normalize = TRUE, dtrend = TRUE, plot = FALSE){ require(pracma) require(fractal) require(sapa) require(ifultools) if(!is.ts(y)){ if(is.null(fs)){fs <- 1} y <- ts(y, frequency = fs) cat("\n\nfd.psd:\tSample rate was set to 1.\n\n") } N <- length(y) # Simple linear detrending. if(dtrend) y <- ts(pracma::detrend(as.vector(y), tt = 'linear'), frequency = fs) # Normalize using N instead of N-1. if(normalize) y <- (y - mean(y, na.rm = TRUE)) / (sd(y, na.rm = TRUE)*sqrt((N-1)/N)) # Number of frequencies estimated cannot be set! (defaults to Nyquist) # Use Tukey window: cosine taper with r = 0.5 # fast = TRUE ensures padding with zeros to optimize FFT to highly composite number. # However, we just pad to nextPow2, except if length already is a power of 2. npad <- 1+(stats::nextn(N,factors=2)-N)/N npad <- stats::nextn(N) # if(N==npad) npad = 0 # psd <- stats::spec.pgram(y, fast = FALSE, demean=FALSE, detrend=FALSE, plot=FALSE, pad=npad, taper=0.5) Tukey <- sapa::taper(type="raised cosine", flatness = 0.5, n.sample = npad) psd <- sapa::SDF(y, taper. = Tukey, npad = npad) powspec <- cbind.data.frame(freq.norm = attr(psd, "frequency")[-1], size = attr(psd, "frequency")[-1]*frequency(y), bulk = as.matrix(psd)[-1]) # First check the global slope for anti-persistent noise (GT +0.20) # If so, fit the line starting from the highest frequency nr <- length(powspec[,1]) lsfit <- lm(log(powspec$bulk[1:nr]) ~ log(powspec$size[1:nr])) glob <- coef(lsfit)[2] # General guideline: fit over 25% frequencies # If signal is continuous (sampled) consider Wijnants et al. (2013) log-log fitting procedure nr <- fractal::HDEst(NFT = length(powspec[,1]), sdf = psd) exp1 <- fractal::hurstSpec(y, sdf.method = "direct", freq.max = 0.25, taper. = Tukey ) exp2 <- fractal::hurstSpec(y, sdf.method = "direct", freq.max = powspec$freq.norm[nr], taper. = Tukey) ifelse((glob > 0.2), { lmfit1 <- lm(log(rev(powspec$bulk[powspec$size<=0.25])) ~ log(rev(powspec$size[powspec$size<=0.25]))) lmfit2 <- lm(log(rev(powspec$bulk[1:nr])) ~ log(rev(powspec$size[1:nr]))) },{ lmfit1 <- lm(log(powspec$bulk[powspec$size<=0.25]) ~ log(powspec$size[powspec$size<=0.25])) lmfit2 <- lm(log(powspec$bulk[1:nr]) ~ log(powspec$size[1:nr])) }) if(plot){ old<- ifultools::splitplot(2,1,1) plot(y,ylab = "Y", main = paste0('Lowest 25% sap: ', round(coef(lmfit1)[2],digits=2), ' | H:', round(exp1,digits=2), ' | FD:',round(sa2fd.psd(coef(lmfit1)[2]),digits=2),'\nHurvic-Deo sap: ', round(coef(lmfit2)[2],digits=2), ' | H:', round(exp2,digits=2), ' | FD:',round(sa2fd.psd(coef(lmfit2)[2]),digits=2))) ifultools::splitplot(2,1,2) plot(log(powspec$bulk) ~ log(powspec$size), xlab="log(Frequency)", ylab = "log(Power)") lines(log(powspec$size[powspec$size<=0.25]), predict(lmfit1),lwd=3,col="darkred") lines(log(powspec$size[1:nr]), predict(lmfit2),lwd=3,col="darkblue") legend("bottomleft",c(paste0("lowest 25% (n = ",sum(powspec$size<=0.25),")"), paste0("Hurvic-Deo estimate (n = ",nr,")")), lwd=c(3,3),col=c("darkred","darkblue"), cex = .8) par(old) } return(list( PLAW = powspec, low25 = list(sap = coef(lmfit1)[2], H = exp1, FD = sa2fd.psd(coef(lmfit1)[2]), fitlm1 = lmfit1), HD = list(sap = coef(lmfit2)[2], H = exp2, FD = sa2fd.psd(coef(lmfit2)[2]), fitlm2 = lmfit2), info = psd) ) } # SDA ------------------------------------------------- #' fd.sda #' #' @title Standardised Dispersion Analysis (SDA). #' #' @param y A numeric vector or time series object. #' @param normalize Normalize the series (default). #' @param plot Return the log-log spectrum with linear fit (default). #' #' @author Fred Hasselman #' @references Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model inference based on empirical waveforms. Frontiers in Physiology, 4, 75. \url{http://doi.org/10.3389/fphys.2013.00075} #' #' @return A list object containing: #' \itemize{ #' \item A data matrix \code{PLAW} with columns \code{freq.norm}, \code{size} and \code{bulk}. #' \item Estimate of scaling exponent \code{sap} based on a fit over the standard range (\code{fullRange}), or on a user defined range \code{fitRange}. #' \item Estimate of the the Fractal Dimension (\code{FD}) using conversion formula's reported in Hasselman(2013). #' \item Information output by various functions. #' } #' #' @export #' #' @family FD estimators #' @examples fd.sda <- function(y, fs = NULL, normalize = TRUE, dtrend = FALSE, scales = dispersion(y)$scale, fitRange = c(scales[1], scales[length(scales)-2]), plot = FALSE){ require(pracma) require(fractal) if(!is.ts(y)){ if(is.null(fs)){fs <- 1} y <- ts(y, frequency = fs) cat("\n\nfd.sda:\tSample rate was set to 1.\n\n") } N <- length(y) # Simple linear detrending. if(dtrend) y <- ts(pracma::detrend(as.vector(y), tt = 'linear'), frequency = fs) # Normalize using N instead of N-1. if(normalize) y <- (y - mean(y, na.rm = TRUE)) / (sd(y, na.rm = TRUE)*sqrt((N-1)/N)) bins <- which(fitRange[1]==scales):which(fitRange[2]==scales) out <- dispersion(y, front = FALSE) lmfit1 <- lm(log(out$sd) ~ log(out$scale)) lmfit2 <- lm(log(out$sd[bins]) ~ log(out$scale[bins])) if(plot){ old<- ifultools::splitplot(2,1,1) plot(y,ylab = "Y", main = paste0('Full sap: ', round(coef(lmfit1)[2],digits=2), ' | H:', round(1+coef(lmfit1)[2],digits=2), ' | FD:',round(sa2fd.sda(coef(lmfit1)[2]),digits=2),'\nRange sap: ', round(coef(lmfit2)[2],digits=2), ' | H:', round(1+coef(lmfit1)[2],digits=2), ' | FD:',round(sa2fd.sda(coef(lmfit2)[2]),digits=2))) ifultools::splitplot(2,1,2) plot(log(out$sd) ~ log(out$scale), xlab="log(Bin Size)", ylab = "log(SD)") lines(log(out$scale), predict(lmfit1),lwd=3,col="darkred") lines(log(out$scale[bins]), predict(lmfit2),lwd=3,col="darkblue") legend("bottomleft",c(paste0("Full (n = ",length(out$scale),")"), paste0("Range (n = ",length(bins),")")), lwd=c(3,3),col=c("darkred","darkblue"), cex = .8) par(old) } return(list( PLAW = cbind.data.frame(freq.norm = frequency(y)/scales, size = out$scale, bulk = out$sd), fullRange = list(sap = coef(lmfit1)[2], H = 1+coef(lmfit1)[2], FD = sa2fd.sda(coef(lmfit1)[2]), fitlm1 = lmfit1), fitRange = list(sap = coef(lmfit2)[2], H = 1+coef(lmfit2)[2], FD = sa2fd.sda(coef(lmfit2)[2]), fitlm2 = lmfit2), info = out) ) } # DFA --------------------------------------------- #' fd.dfa #' #' @title Detrended Fluctuation Analysis (DFA) #' #' @param y A numeric vector or time series object. #' @param normalize Normalize the series (default). #' @param detrend Subtract linear trend from the series (default). #' @param dmethod Method to use for detrending, see \code{\link[fractal]{DFA}}. #' @param plot Return the log-log spectrum with linear fit (default). #' #' #' @return Estimate of Hurst exponent (slope of \code{log(bin)} vs. \code{log(RMSE))} and an FD estimate based on Hasselman(2013) #' A list object containing: #' \itemize{ #' \item A data matrix \code{PLAW} with columns \code{freq.norm}, \code{size} and \code{bulk}. #' \item Estimate of scaling exponent \code{sap} based on a fit over the standard range (\code{fullRange}), or on a user defined range \code{fitRange}. #' \item Estimate of the the Fractal Dimension (\code{FD}) using conversion formula's reported in Hasselman(2013). #' \item Information output by various functions. #' } #' #' @export #' #' @author Fred Hasselman #' @references Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model inference based on empirical waveforms. Frontiers in Physiology, 4, 75. \url{http://doi.org/10.3389/fphys.2013.00075} #' #' @family FD estimators #' @examples fd.dfa <- function(y, fs = NULL, dtrend = "poly1", normalize = FALSE, sum.order = 1, scale.max=trunc(length(y)/4), scale.min=4, scale.ratio=2^(1/4), overlap = 0, plot = FALSE){ require(pracma) require(fractal) reload <- FALSE if("signal" %in% .packages()){ warning("signal:poly is loaded and stats:poly is needed... will unload package:signal, compute slope, and reload...") reload <- TRUE detach("package:signal", unload=TRUE) } if(!is.ts(y)){ if(is.null(fs)){fs <- 1} y <- ts(y, frequency = fs) cat("\n\nfd.dfa:\tSample rate was set to 1.\n\n") } N <- length(y) # Normalize using N instead of N-1. if(normalize) y <- (y - mean(y, na.rm = TRUE)) / (sd(y, na.rm = TRUE)*sqrt((N-1)/N)) out1 <- fractal::DFA(y, detrend=dtrend, sum.order=sum.order, scale.max=trunc(length(y)/2), scale.min=2, scale.ratio=2, overlap = 0, verbose=FALSE) out2 <- fractal::DFA(y, detrend=dtrend, sum.order=sum.order, scale.max=scale.max, scale.min=scale.min, scale.ratio=scale.ratio, overlap = overlap, verbose=FALSE) lmfit1 <- lm(log(attributes(out1)$stat) ~ log(attributes(out1)$scale)) lmfit2 <- lm(log(attributes(out2)$stat) ~ log(attributes(out2)$scale)) if(plot){ plot.new() old <- ifultools::splitplot(2,1,1) plot(y,ylab = "Y", main = paste0('Full sap: ', round(coef(lmfit1)[2],digits=2), ' | H:', round(attributes(out1)$logfit[]$coefficients['x'] ,digits=2), ' | FD:', round(sa2fd.dfa(coef(lmfit1)[2]),digits=2),'\nRange sap: ', round(coef(lmfit2)[2],digits=2), ' | H:', round(attributes(out2)$logfit[]$coefficients['x'] ,digits=2), ' | FD:', round(sa2fd.dfa(coef(lmfit2)[2]),digits=2) ) ) ifultools::splitplot(2,1,2) plot(log(attributes(out1)$stat) ~ log(attributes(out1)$scale), xlab="log(Bin Size)", ylab = "log(RMSE)") lines(log(attributes(out1)$scale), predict(lmfit1),lwd=3,col="darkred") lines(log(attributes(out2)$scale), predict(lmfit2),lwd=3,col="darkblue") legend("topleft",c(paste0("Full (n = ",length(attributes(out1)$scale),")"), paste0("Range (n = ",length(attributes(out2)$scale),")")), lwd=c(3,3),col=c("darkred","darkblue"), cex = .8) par(old) } if(reload==TRUE){library(signal,verbose=FALSE,quietly=TRUE)} return(list( PLAW = cbind.data.frame(freq.norm = scale.R(attributes(out1)$scale*frequency(y)), size = attributes(out1)$scale, bulk = attributes(out1)$stat), fullRange = list(sap = coef(lmfit1)[2], H = attributes(out1)$logfit[]$coefficients['x'] , FD = sa2fd.dfa(coef(lmfit1)[2]), fitlm1 = lmfit1), fitRange = list(sap = coef(lmfit2)[2], H = coef(lmfit2)[2], FD = sa2fd.dfa(coef(lmfit2)[2]), fitlm2 = lmfit2), info = list(out1,out2)) ) } #' Detrended Fluctuation Analysis #' #' @param signal An input signal. #' @param qq A vector containing a range of values for the order of fluctuation \code{q}. #' @param mins Minimum scale to consider. #' @param maxs Maximum scale to consider. #' @param ressc #' @param m #' #' @return #' @export #' #' @examples DFA1 <- function(signal,mins=4,maxs=12,ressc=30,m=1){ require(pracma) require(plyr) require(dplyr) # reload <- FALSE # if("signal" %in% .packages()){ # warning("signal:poly is loaded and stats:poly is needed... will unload package:signal, compute slope, and reload...") # reload <- TRUE # detach("package:signal", unload=TRUE) # } scale <- round(2^(seq(mins,maxs,by=((maxs-mins)/ressc)))) segv <- numeric(length(scale)) RMS_scale <- vector("list",length(scale)) # qRMS <- vector("list",length(qq)) # Fq <- vector("list",length(qq)) # qRegLine <- vector("list",length(qq)) # Hq <- numeric(length(qq)) Y <- cumsum(signal-mean(signal)) TSm <- as.matrix(cbind(t=1:length(Y),y=Y)) Hglobal <- monoH(TSm,scale) } # Multi-Fractal DFA ----------------------------------------------------------------------------------------------- #' Multi-fractal Detrended Fluctuation Analysis #' #' @param signal An input signal. #' @param qq A vector containing a range of values for the order of fluctuation \code{q}. #' @param mins Minimum scale to consider. #' @param maxs Maximum scale to consider. #' @param ressc #' @param m #' #' @return #' @export #' #' @examples MFDFA <- function(signal,qq=c(-10,-5:5,10),mins=6,maxs=12,ressc=30,m=1){ require(pracma) require(plyr) require(dplyr) # reload <- FALSE # if("signal" %in% .packages()){ # warning("signal:poly is loaded and stats:poly is needed... will unload package:signal, compute slope, and reload...") # reload <- TRUE # detach("package:signal", unload=TRUE) # } scale <- round(2^(seq(mins,maxs,by=((maxs-mins)/ressc)))) segv <- numeric(length(scale)) RMS_scale <- vector("list",length(scale)) qRMS <- vector("list",length(qq)) Fq <- vector("list",length(qq)) qRegLine <- vector("list",length(qq)) Hq <- numeric(length(qq)) Y <- cumsum(signal-mean(signal)) TSm <- as.matrix(cbind(t=1:length(Y),y=Y)) Hglobal <- monoH(TSm,scale) Hadj <- 0 if((Hglobal>1.2)&(Hglobal<1.8)){ Y <- diff(signal) Hadj=1} if(Hglobal>1.8){ Y <- diff(diff(signal)) Hadj <- 2} if(Hglobal<0.2){ Y <- cumsum(signal-mean(signal)) Hadj <- -1} if(Hadj!=0){TSm <- as.matrix(cbind(t=1:length(Y),y=cumsum(Y-mean(Y))))} for(ns in seq_along(scale)){ RMS_scale[[ns]] <- ldply(sliceTS(TSm,scale[ns]),function(sv){return(sqrt(mean(detRend(sv[,2]))^2))}) for(nq in seq_along(qq)){ qRMS[[nq]][1:length(RMS_scale[[ns]]$V1)] <- RMS_scale[[ns]]$V1^qq[nq] Fq[[nq]][ns] <- mean(qRMS[[nq]][1:length(RMS_scale[[ns]]$V1)])^(1/qq[nq]) if(is.inf(log2(Fq[[nq]][ns]))){Fq[[nq]][ns]<-NA} } Fq[[which(qq==0)]][ns] <- exp(0.5*mean(log(RMS_scale[[ns]]^2))) if(is.inf(log2(Fq[[which(qq==0)]][ns]))){Fq[[which(qq==0)]][ns]<-NA} } fmin<-1 fmax<-which(scale==max(scale)) #for(nq in seq_along(qq)){Hq[nq] <- lm(log2(Fq[[nq]])~log2(scale))$coefficients[2]} Hq <- ldply(Fq,function(Fqs){lm(log2(Fqs[fmin:fmax])~log2(scale[fmin:fmax]),na.action=na.omit)$coefficients[2]}) tq <- (Hq[,1]*qq)-1 hq <- diff(tq)/diff(qq) Dq <- (qq[1:(length(qq)-1)]*hq) - (tq[1:(length(qq)-1)]) if(reload==TRUE){library(signal,verbose=FALSE,quietly=TRUE)} return(list(q=qq,Hq=Hq,tq=tq,hq=hq,Dq=Dq,Hglobal=Hglobal,Hadj=Hadj)) } monoH <- function(TSm,scale){ dfaRMS_scale <- vector("list",length(scale)) F2 <- numeric(length(scale)) for(ns in seq_along(scale)){ dfaRMS_scale[[ns]] <- ldply(sliceTS(TSm,scale[ns]),function(sv){return(sqrt(mean(detRend(sv[,2]))^2))}) F2[ns] <- mean(dfaRMS_scale[[ns]]$V1^2)^(1/2) if(is.inf(log2(F2[ns]))){F2[ns] <- NA} } return(lm(log2(F2)~log2(scale),na.action=na.omit)$coefficients[2]) } detRend <- function(TS, Order=1){ detR <- lm(TS~stats::poly(1:length(TS), degree=Order))$residuals return(detR) } # # set.seed(100) # z <- dispersion(rnorm(1024)) # plot(log(z$scale),log(z$sd)) # # # trace(detRend,edit=T) # seq(1,length(X),by=4096) # # z<-sliceTS(TSm,scale[1]) # z[[1]][,2] # # Hglobal <- # # segments <- laply(scale,function(s) floor(length(X)/s)) # IDv <- llply(segments,slice.index,) # segv <- function(X,segments){ # # # # seq((((v-1)*scale[ns])+1),(v*scale[ns]),length=scale[ns]) # # } # for(ns in seq_along(scale)){ # segv[ns] <- floor(length(X)/scale[ns]) # for(v in 1:segv[ns]){ # Index <- seq((((v-1)*scale[ns])+1),(v*scale[ns]),length=scale[ns]) # Cslope<- polyfit(Index,X[Index],m) # fit <- polyval(Cslope,Index) # RMS_scale[[ns]][v] <- sqrt(mean((X[Index]-fit)^2)) # rm(Cslope, fit, Index) # } # for(nq in seq_along(qq)){ # qRMS[[nq]][1:segv[ns]] <- RMS_scale[[ns]]^qq[nq] # Fq[[nq]][ns] <- mean(qRMS[[nq]][1:segv[ns]])^(1/qq[nq]) # } # Fq[[which(qq==0)]][ns] <- exp(0.5*mean(log(RMS_scale[[ns]]^2))) # } # # for(nq in seq_along(qq)){ # Cslope <- polyfit(log2(scale),log2(Fq[[nq]]),1) # Hq[nq] <- Cslope[1] # qRegLine[[nq]] <- polyval(Cslope,log2(scale)) # rm(Cslope) # } # # tq <- (Hq*qq)-1 # hq <- diff(tq)/diff(qq) # Dq <- (qq[1:(length(qq)-1)]*hq) - (tq[1:(length(qq)-1)]) # # plot(hq,Dq,type="l") # # # qq<-c(-10,-5,seq(-2,2,.1),5,10) # PLOTS ------------------------------------------------------------------- # # #' gg.theme #' #' @param type One of \code{"clean"}, or \code{"noax"} #' @param useArial Use the Arial font (requires \code{.afm} font files in the \code{afmPath}) #' @param afmPATH Path to Arial \code{.afm} font files. #' #' @details Will generate a \code{"clean"} ggplot theme, or a theme without any axes (\code{"noax"}). #' #' Some scientific journals explicitly request the Arial font should be used in figures. This can be achieved by using \code{.afm} font format (see, e.g. http://www.pure-mac.com/font.html). #' #' @return A theme for \code{ggplot2}. #' @export #' #' @examples #' library(ggplot2) #' g <- ggplot(data.frame(x = rnorm(n = 100), y = rnorm(n = 100)), aes(x = x, y = y)) + geom_point() #' g + gg.theme() #' g + gg.theme("noax") gg.theme <- function(type=c("clean","noax"),useArial = F, afmPATH="~/Dropbox"){ require(ggplot2) if(length(type)>1){type <- type[1]} if(useArial){ set.Arial(afmPATH) bf_font="Arial" } else {bf_font="Helvetica"} switch(type, clean = theme_bw(base_size = 16, base_family=bf_font) + theme(axis.text.x = element_text(size = 14), axis.title.y = element_text(vjust = +1.5), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.background = element_blank(), legend.key = element_blank(), panel.border = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")), noax = theme(line = element_blank(), text = element_blank(), title = element_blank(), plot.background = element_blank(), panel.border = element_blank(), panel.background = element_blank()) ) } #' gg.plotHolder #' #' @param useArial Use the Arial font (requires \code{.afm} font files in the \code{afmPath}) #' @param afmPATH Path to Arial \code{.afm} font files. #' #' @return A blank \code{ggplot2} object that can be used in concordance with \code{grid.arrange}. #' @export #' #' @examples #' # Create a plot with marginal distributions. #' library(ggplot2) #' library(scales) #' #' df <- data.frame(x = rnorm(n = 100), y = rnorm(n = 100), group = factor(sample(x=c(0,1), size = 100, replace = TRUE))) #' #' scatterP <- ggplot(df, aes(x = x, y =y, colour = group)) + geom_point() + gg.theme() #' xDense <- ggplot(df, aes(x = x, fill = group)) + geom_density(aes(y= ..count..),trim=FALSE, alpha=.5) + gg.theme("noax") + theme(legend.position = "none") #' yDense <- ggplot(df, aes(x = y, fill = group)) + geom_density(aes(y= ..count..),trim=FALSE, alpha=.5) + coord_flip() + gg.theme("noax") + theme(legend.position = "none") #' #' library(gridExtra) #' grid.arrange(xDense, gg.plotHolder(), scatterP, yDense, ncol=2, nrow=2, widths=c(4, 1.4), heights=c(1.4, 4)) gg.plotHolder <- function(useArial = F,afmPATH="~/Dropbox"){ require(ggplot2) ggplot() + geom_blank(aes(1,1)) + theme(line = element_blank(), text = element_blank(), title = element_blank(), plot.background = element_blank(), panel.border = element_blank(), panel.background = element_blank() ) } set.Arial <- function(afmPATH="~/Dropbox"){ # Set up PDF device on MAC OSX to use Arial as a font in Graphs if(nchar(afmPATH>0)){ if(file.exists(paste0(afmPATH,"/Arial.afm"))){ Arial <- Type1Font("Arial", c(paste(afmPATH,"/Arial.afm",sep=""), paste(afmPATH,"/Arial Bold.afm",sep=""), paste(afmPATH,"/Arial Italic.afm",sep=""), paste(afmPATH,"/Arial Bold Italic.afm",sep=""))) if(!"Arial" %in% names(pdfFonts())){pdfFonts(Arial=Arial)} if(!"Arial" %in% names(postscriptFonts())){postscriptFonts(Arial=Arial)} return() } else {disp(header='useArial=TRUE',message='The directory did not contain the *.afm version of the Arial font family')} } else {disp(header='useArial=TRUE',message='Please provide the path to the *.afm version of the Arial font family')} } netGroupCol <- function(g,grp){ library(scales) if(length(grp)<=12){ groupColours <- brewer_pal(palette="Set3")(length(grp)) } else { groupColours <- gradient_n_pal(brewer_pal(palette="Set3")(12))(seq(0, 1, length.out = length(grp))) } E(g)$alpha <- rescale(E(g)$weight) E(g)$color <- "#D9D9D9" for(c in seq_along(grp)){ if(length(grp[[c]])>0){ V(g)[grp[[c]]]$color <- groupColours[c] if(length(E(g)[from(V(g)[grp[[c]]])])>0){ id<-E(g)[from(V(g)[grp[[c]]])]$color%in%"#D9D9D9" if(any(id)){ E(g)[from(V(g)[grp[[c]]])[id]]$color <- add.alpha(groupColours[c],alpha = E(g)[from(V(g)[grp[[c]]])[id]]$alpha) } } } } return(g) } plot.loglog <- function(fd.OUT){ require(ggplot2) require(scales) g <- ggplot2::ggplot(fd.OUT$PLAW, aes(x=size,y=bulk), na.rm=T) + scale_x_log10(breaks = log_breaks(n=abs(diff(range(round(log10(fd.OUT$PLAW$size)))+c(-1,1))),base=10), labels = trans_format("log10", math_format(10^.x)), limits = range(round(log10(fd.OUT$PLAW$size)))+c(-1,1)) + scale_y_log10(breaks = log_breaks(n=abs(diff(range(round(log10(fd.OUT$PLAW$bulk)))+c(-1,1))),base=10), labels = trans_format("log10", math_format(10^.x)), limits = range(round(log10(fd.OUT$PLAW$bulk)))+c(-1,1)) + geom_point() + geom_abline(intercept = fd.OUT[[2]]$fitlm1$coefficients[[1]], slope = fd.OUT[[2]]$fitlm1$coefficients[[2]], colour = "red", size = 2) + ggtitle(paste("Regression over ",length(fd.OUT[[2]]$fitlm1$fitted.values)," frequencies/bins",sep=""))+ xlab("Frequency (log10)")+ylab("Power (log10)") + annotation_logticks() + annotate("text",x=10^-2,y=10^5,label=paste("Slope = ",round(fd.OUT[[2]]$alpha,digits=2),sep="")) + gg.theme("clean") return(g) } ## Add an alpha value to a colour add.alpha <- function(col, alpha=1){ if(missing(col)) stop("Please provide a vector of colours.") apply(sapply(col, col2rgb)/255, 2, function(x) rgb(x[1], x[2], x[3], alpha=alpha)) } # Variability Analyses -------------------------------------------------------------------------------------------------------------------------- # # #' PSDslope # #' # #' @param y A time series object, or a vector that can be converted to a time series object. # #' @param fs Sample frequency (defults to 1). # #' @param nfft Number of frequencies to estimate (defaults to next power of 2) # #' @param fitRange Vector of length 2 with range of frequencies to perform log-log fit. # #' @param plot Plot the log-log spectrum and slope. # #' # #' @return # #' @export # #' # #' @examples # #' # PSDslope <- function(y = ts(rnorm(n = 1024), frequency = 1), # fs = frequency(y), # nfft = 2^(nextpow2(length(y)/2)), # fitRange = c(1,round(.1*nfft)), # plot = FALSE){ # require(oce) # require(signal) # if(!is.ts(y)){ts(y, frequency = fs)} # # win <- signal::hamming(n=nfft) # # perioGram <- oce::pwelch(x = y, window = win, fs = frequency(y), nfft = nfft, plot = FALSE) # spec <- data.frame(Frequency = perioGram$freq, Power = perioGram$spec) # spec[1,1:2] <- NA # fit <- lm(log10(spec$Power[fitRange[1]:fitRange[2]])~log10(spec$Power[fitRange[1]:fitRange[2]])) # return(list(spec = spec, # slope = fit) # ) # } #' Scale.R #' #' @description Rescale a vector to a user defined range defined by user. #' #' @param x Input vector or data frame. #' @param mn Minimum value of original, defaults to \code{min(x, na.rm = TRUE)}. #' @param mx Maximum value of original, defaults to \code{max(x, na.rm = TRUE)}. #' @param hi Minimum value to rescale to, defaults to \code{0}. #' @param lo Maximum value to rescale to, defaults to \code{1}. #' #' #' @details Three uses: #' \enumerate{ #' \item scale.R(x) - Scale x to data range: min(x.out)==0; max(x.out)==1 #' \item scale.R(x,mn,mx) - Scale x to arg. range: min(x.out)==mn==0; max(x.out)==mx==1 #' \item scale.R(x,mn,mx,lo,hi) - Scale x to arg. range: min(x.out)==mn==lo; max(x.out)==mx==hi #' } #' #' @return #' @export #' #' @examples #' # Works on numeric objects #' somenumbers <- cbind(c(-5,100,sqrt(2)),c(exp(1),0,-pi)) #' #' scale.R(somenumbers) #' scale.R(somenumbers,mn=-100) # #' # Values < mn will return < lo (default=0) #' # Values > mx will return > hi (default=1) #' scale.R(somenumbers,mn=-1,mx=99) #' #' scale.R(somenumbers,lo=-1,hi=1) #' scale.R(somenumbers,mn=-10,mx=101,lo=-1,hi=4) scale.R <- function(x,mn=min(x,na.rm=T),mx=max(x,na.rm=T),lo=0,hi=1){ x <- as.data.frame(x) u <- x for(i in 1:dim(x)[2]){ mn=min(x[,i],na.rm=T) mx=max(x[,i],na.rm=T) if(mn>mx){warning("Minimum (mn) >= maximum (mx).")} if(lo>hi){warning("Lowest scale value (lo) >= highest scale value (hi).")} ifelse(mn==mx,{u[,i]<-rep(mx,length(x[,i]))},{ u[,i]<-(((x[i]-mn)*(hi-lo))/(mx-mn))+lo id<-complete.cases(u[,i]) u[!id,i]<-0 }) } return(u) } # Rmd2htmlWP <- function(infile, outfile, sup = T) { # require(markdown) # require(knitr) # mdOpt <- markdownHTMLOptions(default = T) # mdOpt <- mdOpt[mdOpt != "mathjax"] # mdExt <- markdownExtensions() # mdExt <- mdExt[mdExt != "latex_math"] # if (sup == T) { # mdExt <- mdExt[mdExt != "superscript"] # } # knit2html(input = infile, output = outfile, options = c(mdOpt), extensions = c(mdExt)) # } # MULTIPLOT FUNCTION ------------------------------------------------------------------------------------------------------------------ # # [copied from http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/ ] # # ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects) # - cols: Number of columns in layout # - layout: A matrix specifying the layout. If present, 'cols' is ignored. # # If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE), # then plot 1 will go in the upper left, 2 will go in the upper right, and # 3 will go all the way across the bottom. # multi.PLOT <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { require(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } } #' TRY ... CATCH #' #' @details #' In longer simulations, aka computer experiments, #' you may want to #' 1) catch all errors and warnings (and continue) #' 2) store the error or warning messages #' #' Here's a solution (see \R-help mailing list, Dec 9, 2010): #' #' Catch *and* save both errors and warnings, and in the case of #' a warning, also keep the computed result. #' #' @title tryCatch both warnings (with value) and errors #' @param expr an \R expression to evaluate #' @return a list with 'value' and 'warning', where value' may be an error caught. #' @author Martin Maechler; Copyright (C) 2010-2012 The R Core Team try.CATCH <- function(expr){ W <- NULL w.handler <- function(w){ # warning handler W <<- w invokeRestart("muffleWarning") } list(value = withCallingHandlers(tryCatch(expr, error = function(e) e), warning = w.handler), warning = W) }