SlideShare a Scribd company logo
1 of 31
Prepared by Volkan OBAN
Advanced Data Visualization in R- Somes Examples.
geomorph package in R....
Example:
Code:
>library(geomorph)
> data(scallopPLY)
> ply <- scallopPLY$ply
> digitdat <- scallopPLY$coords
> plotspec(spec=ply,digitspec=digitdat,fixed=16, centered =TRUE)
Example:
> data(scallops)
> Y.gpa<-gpagen(A=scallops$coorddata, curves=scallops$curvslide,
surfaces=scallops$surfslide)
> ref<-mshape(Y.gpa$coords)
> plotRefToTarget(ref,Y.gpa$coords[,,1],method="TPS", mag=3)
Reference:
http://www.public.iastate.edu/~dcadams/PDFPubs/Quick%20Guide%20to%20Geomorph%20v2.0.pdf
Example:
> boxplot.ej <- function(y, xloc = 1, width.box = 0.25, lwd.box = 2, width
.hor = 0.25,
+ lwd.hor = 2, range.wisk = 1.5, lwd.wisk = 2, pch.
box = 16, cex.boxpoint = 2,
+ plot.outliers = FALSE, pch.out = 1, cex.out = 1,
color = "black") {
+
+ # makes boxplot with dot as median and solid whisker Interquartile r
ange =
+ # (.75 quantile) - (.25 quantile). Note: Wiskers are not always sym
metrical;
+ # top wisker extends up to max(y) constrained by y <= (.75 quantile)
+
+ # range.wisk*Interquartile range bottom whisker is determined by min
(y)
+ # constrained by y >= (.25 quantile) - range.wisk*Interquartile rang
e
+
+ Q <- quantile(y, c(0.25, 0.5, 0.75))
+ names(Q) <- NULL # gets rid of percentages
+ IQ.range <- Q[3] - Q[1]
+ low <- Q[1] - range.wisk * IQ.range
+ high <- Q[3] + range.wisk * IQ.range
+ index <- which((y >= low) & (y <= high))
+ wisk.low <- min(y[index])
+ wisk.high <- max(y[index])
+ outliers <- y[which((y < low) | (y > high))]
+
+ # plot median:
+ points(xloc, Q[2], pch = pch.box, cex = cex.boxpoint, col = color)
+
+ # plot box:
+ xleft <- xloc - width.box/2
+ xright <- xloc + width.box/2
+ ybottom <- Q[1]
+ ytop <- Q[3]
+ rect(xleft, ybottom, xright, ytop, lwd = lwd.box, border = color)
+
+ # plot whiskers:
+ segments(xloc, wisk.low, xloc, Q[1], lwd = lwd.wisk, col = color)
+ segments(xloc, Q[3], xloc, wisk.high, lwd = lwd.wisk, col = color)
+
+ # plot horizontal segments:
+ x0 <- xloc - width.hor/2
+ x1 <- xloc + width.hor/2
+ segments(x0, wisk.low, x1, wisk.low, lwd = lwd.hor, col = color)
+ segments(x0, wisk.high, x1, wisk.high, lwd = lwd.hor, col = color)
+
+ # plot outliers:
+ if (plot.outliers == TRUE) {
+ xloc.p <- rep(xloc, length(outliers))
+ points(xloc.p, outliers, pch = pch.out, cex = cex.out, col = col
or)
+ }
+ }
>
> RT.hf.sp <- rnorm(1000, mean = 0.41, sd = 0.008)
> RT.lf.sp <- rnorm(1000, mean = 0.43, sd = 0.01)
> RT.vlf.sp <- rnorm(1000, mean = 0.425, sd = 0.012)
> RT.hf.ac <- rnorm(1000, mean = 0.46, sd = 0.008)
> RT.lf.ac <- rnorm(1000, mean = 0.51, sd = 0.01)
> RT.vlf.ac <- rnorm(1000, mean = 0.52, sd = 0.012)
>
> ps <- 1 # size of boxpoint
> par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.l
ab = 1.5,
+ font.lab = 2, cex.axis = 1.3, bty = "n", las = 1)
> x <- c(1, 2, 3, 4)
> plot(x, c(-10, -10, -10, -10), type = "p", ylab = " ", xlab = " ", cex =
1.5,
+ ylim = c(0.3, 0.6), xlim = c(1, 4), lwd = 2, pch = 5, axes = FALSE,
main = " ")
> axis(1, at = c(1.5, 2.5, 3.5), labels = c("HF", "LF", "VLF"))
> mtext("Word Frequency", side = 1, line = 3, cex = 1.5, font = 2)
> axis(2, pos = 1.1)
> par(las = 0)
> mtext("Group Mean M", side = 2, line = 2.9, cex = 1.5, font = 2)
>
> x <- c(1.5, 2.5, 3.5)
> boxplot.ej(RT.hf.sp, xloc = 1.5, cex.boxpoint = ps)
> boxplot.ej(RT.hf.ac, xloc = 1.5, cex.boxpoint = ps, color = "grey")
> boxplot.ej(RT.lf.sp, xloc = 2.5, cex.boxpoint = ps)
> boxplot.ej(RT.lf.ac, xloc = 2.5, cex.boxpoint = ps, color = "grey")
> boxplot.ej(RT.vlf.sp, xloc = 3.5, cex.boxpoint = ps)
> boxplot.ej(RT.vlf.ac, xloc = 3.5, cex.boxpoint = ps, color = "grey")
>
> text(2.5, 0.35, "Speed", cex = 1.4, font = 1, adj = 0.5)
> text(2.5, 0.57, "Accuracy", cex = 1.4, font = 1, col = "grey", adj = 0.5
)
>
>
"
> RT.hf.sp <- rnorm(1000, mean = 0.41, sd = 0.008)
> RT.lf.sp <- rnorm(1000, mean = 0.43, sd = 0.01)
> RT.vlf.sp <- rnorm(1000, mean = 0.425, sd = 0.012)
> RT.hf.ac <- rnorm(1000, mean = 0.46, sd = 0.008)
> RT.lf.ac <- rnorm(1000, mean = 0.51, sd = 0.01)
> RT.vlf.ac <- rnorm(1000, mean = 0.52, sd = 0.012)
>
> library(sm)
> # by Henrik Singmann customized violinplot function (singmann.org) the
> # original violinplot function stems from the 'vioplot' package Copyrigh
t (c)
> # 2004, Daniel Adler. All rights reserved. Redistribution and use in so
urce
> # and binary forms, with or without modification, are permitted provided
that
> # the following conditions are met: * Redistributions of source code mus
t
> # retain the above copyright notice, this list of conditions and the
> # following disclaimer. * Redistributions in binary form must reproduce
the
> # above copyright notice, this list of conditions and the following
> # disclaimer in the documentation and/or other materials provided with t
he
> # distribution. * Neither the name of the University of Goettingen nor
the
> # names of its contributors may be used to endorse or promote products
> # derived from this software without specific prior written permission.
THIS
> # SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND
> # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PUR
POSE
> # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE
> # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINE
SS
> # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER I
N
> # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE
)
> # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE
> # POSSIBILITY OF SUCH DAMAGE.
>
> vioplot.singmann <- function(x, ..., range = 1.5, h = NULL, ylim = NULL,
names = NULL,
+ horizontal = FALSE, col = NULL, border = "b
lack", lty = 1, lwd = 1, rectCol = "black",
+ colMed = "white", pchMed = 19, at, add = FA
LSE, wex = 1, mark.outlier = TRUE,
+ pch.mean = 4, ids = NULL, drawRect = TRUE,
yaxt = "s") {
+
+ # process multiple datas
+ datas <- list(x, ...)
+ n <- length(datas)
+ if (missing(at))
+ at <- 1:n
+ # pass 1 - calculate base range - estimate density setup parameters
for
+ # density estimation
+ upper <- vector(mode = "numeric", length = n)
+ lower <- vector(mode = "numeric", length = n)
+ q1 <- vector(mode = "numeric", length = n)
+ q3 <- vector(mode = "numeric", length = n)
+ med <- vector(mode = "numeric", length = n)
+ base <- vector(mode = "list", length = n)
+ height <- vector(mode = "list", length = n)
+ outliers <- vector(mode = "list", length = n)
+ baserange <- c(Inf, -Inf)
+
+ # global args for sm.density function-call
+ args <- list(display = "none")
+
+ if (!(is.null(h)))
+ args <- c(args, h = h)
+ for (i in 1:n) {
+ data <- datas[[i]]
+ if (!is.null(ids))
+ names(data) <- ids
+ if (is.null(names(data)))
+ names(data) <- as.character(1:(length(data)))
+
+ # calculate plot parameters 1- and 3-quantile, median, IQR, uppe
r- and
+ # lower-adjacent
+ data.min <- min(data)
+ data.max <- max(data)
+ q1[i] <- quantile(data, 0.25)
+ q3[i] <- quantile(data, 0.75)
+ med[i] <- median(data)
+ iqd <- q3[i] - q1[i]
+ upper[i] <- min(q3[i] + range * iqd, data.max)
+ lower[i] <- max(q1[i] - range * iqd, data.min)
+
+ # strategy: xmin = min(lower, data.min)) ymax = max(upper, data.
max))
+ est.xlim <- c(min(lower[i], data.min), max(upper[i], data.max))
+
+ # estimate density curve
+ smout <- do.call("sm.density", c(list(data, xlim = est.xlim), ar
gs))
+
+ # calculate stretch factor the plots density heights is defined
in range 0.0
+ # ... 0.5 we scale maximum estimated point to 0.4 per data
+ hscale <- 0.4/max(smout$estimate) * wex
+
+ # add density curve x,y pair to lists
+ base[[i]] <- smout$eval.points
+ height[[i]] <- smout$estimate * hscale
+ t <- range(base[[i]])
+ baserange[1] <- min(baserange[1], t[1])
+ baserange[2] <- max(baserange[2], t[2])
+ min.d <- boxplot.stats(data)[["stats"]][1]
+ max.d <- boxplot.stats(data)[["stats"]][5]
+ height[[i]] <- height[[i]][(base[[i]] > min.d) & (base[[i]] < ma
x.d)]
+ height[[i]] <- c(height[[i]][1], height[[i]], height[[i]][length
(height[[i]])])
+ base[[i]] <- base[[i]][(base[[i]] > min.d) & (base[[i]] < max.d)
]
+ base[[i]] <- c(min.d, base[[i]], max.d)
+ outliers[[i]] <- list(data[(data < min.d) | (data > max.d)], nam
es(data[(data <
+
min.d) | (data > max.d)]))
+
+ # calculate min,max base ranges
+ }
+ # pass 2 - plot graphics setup parameters for plot
+ if (!add) {
+ xlim <- if (n == 1)
+ at + c(-0.5, 0.5) else range(at) + min(diff(at))/2 * c(-1, 1
)
+
+ if (is.null(ylim)) {
+ ylim <- baserange
+ }
+ }
+ if (is.null(names)) {
+ label <- 1:n
+ } else {
+ label <- names
+ }
+ boxwidth <- 0.05 * wex
+
+ # setup plot
+ if (!add)
+ plot.new()
+ if (!horizontal) {
+ if (!add) {
+ plot.window(xlim = xlim, ylim = ylim)
+ axis(2)
+ axis(1, at = at, label = label)
+ }
+
+ box()
+ for (i in 1:n) {
+ # plot left/right density curve
+ polygon(c(at[i] - height[[i]], rev(at[i] + height[[i]])), c(
base[[i]],
+
rev(base[[i]])), col = col, border = border, lty = lty, lwd = lwd)
+
+ if (drawRect) {
+ # browser() plot IQR
+ boxplot(datas[[i]], at = at[i], add = TRUE, yaxt = yaxt,
pars = list(boxwex = 0.6 *
+
wex, outpch = if (mark.outlier) "" else 1))
+ if ((length(outliers[[i]][[1]]) > 0) & mark.outlier)
+ text(rep(at[i], length(outliers[[i]][[1]])), outlier
s[[i]][[1]],
+ labels = outliers[[i]][[2]])
+ # lines( at[c( i, i)], c(lower[i], upper[i]) ,lwd=lwd, l
ty=lty) plot 50% KI
+ # box rect( at[i]-boxwidth/2, q1[i], at[i]+boxwidth/2, q
3[i], col=rectCol)
+ # plot median point points( at[i], med[i], pch=pchMed, c
ol=colMed )
+ }
+ points(at[i], mean(datas[[i]]), pch = pch.mean, cex = 1.3)
+ }
+ } else {
+ if (!add) {
+ plot.window(xlim = ylim, ylim = xlim)
+ axis(1)
+ axis(2, at = at, label = label)
+ }
+
+ box()
+ for (i in 1:n) {
+ # plot left/right density curve
+ polygon(c(base[[i]], rev(base[[i]])), c(at[i] - height[[i]],
rev(at[i] +
+
height[[i]])), col = col, border = border, lty = lty, lwd = lwd)
+
+ if (drawRect) {
+ # plot IQR
+ boxplot(datas[[i]], yaxt = yaxt, at = at[i], add = TRUE,
pars = list(boxwex = 0.8 *
+
wex, outpch = if (mark.outlier) "" else 1))
+ if ((length(outliers[[i]][[1]]) > 0) & mark.outlier)
+ text(rep(at[i], length(outliers[[i]][[1]])), outlier
s[[i]][[1]],
+ labels = outliers[[i]][[2]])
+ # lines( at[c( i, i)], c(lower[i], upper[i]) ,lwd=lwd, l
ty=lty)
+ }
+ points(at[i], mean(datas[[i]]), pch = pch.mean, cex = 1.3)
+ }
+ }
+ invisible(list(upper = upper, lower = lower, median = med, q1 = q1,
q3 = q3))
+ }
>
> # plot
> par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.l
ab = 1.5,
+ font.lab = 2, cex.axis = 1.3, bty = "n", las = 1)
> x <- c(1, 2, 3, 4)
> plot(x, c(-10, -10, -10, -10), type = "p", ylab = " ", xlab = " ", cex =
1.5,
+ ylim = c(0.3, 0.6), xlim = c(1, 4), lwd = 2, pch = 5, axes = F, mai
n = " ")
> axis(1, at = c(1.5, 2.5, 3.5), labels = c("HF", "LF", "VLF"))
> axis(2, pos = 1.1)
> mtext("Word Frequency", side = 1, line = 3, cex = 1.5, font = 2)
>
> par(las = 0)
> mtext("Group Mean M", side = 2, line = 2.9, cex = 1.5, font = 2)
>
> x <- c(1.5, 2.5, 3.5)
>
> vioplot.singmann(RT.hf.sp, RT.lf.sp, RT.vlf.sp, add = TRUE, mark.outlier
= FALSE,
+ at = c(1.5, 2.5, 3.5), wex = 0.4, yaxt = "n")
> vioplot.singmann(RT.hf.ac, RT.lf.ac, RT.vlf.ac, add = TRUE, mark.outlier
= FALSE,
+ at = c(1.5, 2.5, 3.5), wex = 0.4, col = "grey", border
= "grey", rectCol = "grey",
+ colMed = "grey", yaxt = "n")
>
> text(2.5, 0.35, "Speed", cex = 1.4, font = 1, adj = 0.5)
> text(2.5, 0.58, "Accuracy", cex = 1.4, font = 1, col = "grey", adj = 0.5
)
Example:
plotsebargraph = function(loc, value, sterr, wiskwidth, color = "grey", lin
ewidth = 2) {
w = wiskwidth/2
segments(x0 = loc, x1 = loc, y0 = value, y1 = value + sterr, col = colo
r,
lwd = linewidth)
segments(x0 = loc - w, x1 = loc + w, y0 = value + sterr, y1 = value + s
terr,
col = color, lwd = linewidth) # upper whiskers
}
plotsegraph = function(loc, value, sterr, wiskwidth, color = "grey", linewi
dth = 2) {
w = wiskwidth/2
segments(x0 = loc, x1 = loc, y0 = value - sterr, y1 = value + sterr, co
l = color,
lwd = linewidth)
segments(x0 = loc - w, x1 = loc + w, y0 = value + sterr, y1 = value + s
terr,
col = color, lwd = linewidth) # upper whiskers
segments(x0 = loc - w, x1 = loc + w, y0 = value - sterr, y1 = value - s
terr,
col = color, lwd = linewidth) # lower whiskers
}
# =======================================================
# Data; order = Speed, neutral, accuracy
MRT <- c(429, 515, 555)
MRT.se <- c(25, 25, 30)
Er <- c(0.23, 0.14, 0.13)
Er.se <- c(0.022, 0.021, 0.021)
# ======================================================
par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.lab
= 1.5,
font.lab = 2, cex.axis = 1.3, bty = "n", las = 1)
# mpg = c(3, 1, 0) is default. first = axis labels!; middle = tick labels m
ar
# = c(5, 4, 4, 2) + 0.1 is default
digitsize <- 1.2
x <- c(1, 2, 3, 4)
plot(x, c(-10, -10, -10, -10), type = "p", ylab = " Mean Response Time (ms.
)",
xlab = " ", cex = 1.5, ylim = c(200, 800), xlim = c(1, 4), lwd = 2, pch
= 5,
axes = F, main = " ")
axis(1, at = c(1.5, 2.5, 3.5), labels = c("Speed", "Neutral", "Accuracy"))
mtext("Cue", side = 1, line = 3, cex = 1.5, font = 2)
axis(2, at = c(300, 400, 500, 600, 700))
x = c(1.5, 2.5, 3.5)
points(x, MRT, cex = 1.5, lwd = 2, pch = 19)
plot.errbars = plotsegraph(x, MRT, MRT.se, 0.1, color = "black") #0.1 = wi
skwidth
lines(c(1.5, 2.5, 3.5), MRT, lwd = 2, type = "c")
text(1.5, MRT[1] + 60, "429", adj = 0.5, cex = digitsize)
text(2.5, MRT[2] + 60, "515", adj = 0.5, cex = digitsize)
text(3.5, MRT[3] + 60, "555", adj = 0.5, cex = digitsize)
par(new = TRUE)
x <- c(1, 2, 3, 4)
plot(x, c(-10, -10, -10, -10), type = "p", ylab = " ", xlab = " ", cex = 1.
5,
ylim = c(0, 1), xlim = c(1, 4), lwd = 2, axes = FALSE, main = " ")
axis(4, at = c(0, 0.1, 0.2, 0.3, 0.4), las = 1)
grid::grid.text("Mean Proportion of Errors", 0.97, 0.5, rot = 270, gp = gri
d::gpar(cex = 1.5,
font = 2))
width <- 0.25
linewidth <- 2
x0 <- 1.5 - width
x1 <- 1.5 + width
y0 <- 0
y1 <- Er[1]
segments(x0, y0, x0, y1, lwd = linewidth)
segments(x0, y1, x1, y1, lwd = linewidth)
segments(x1, y1, x1, y0, lwd = linewidth)
segments(x1, y0, x0, y0, lwd = linewidth)
x0 <- 2.5 - width
x1 <- 2.5 + width
y0 <- 0
y1 <- Er[2]
segments(x0, y0, x0, y1, lwd = linewidth)
segments(x0, y1, x1, y1, lwd = linewidth)
segments(x1, y1, x1, y0, lwd = linewidth)
segments(x1, y0, x0, y0, lwd = linewidth)
x0 <- 3.5 - width
x1 <- 3.5 + width
y0 <- 0
y1 <- Er[3]
segments(x0, y0, x0, y1, lwd = linewidth)
segments(x0, y1, x1, y1, lwd = linewidth)
segments(x1, y1, x1, y0, lwd = linewidth)
segments(x1, y0, x0, y0, lwd = linewidth)
loc.errbars <- c(1.5, 2.5, 3.5)
plot.errbars <- plotsebargraph(loc.errbars, Er, Er.se, 0.2, color = "black"
) # 0.2 = wiskwidth
text(1.5, 0.9, "Behavioral Data", font = 2, cex = 2, pos = 4)
text(1.5, 0.05, "0.23", adj = 0.5, cex = digitsize)
text(2.5, 0.05, "0.14", adj = 0.5, cex = digitsize)
text(3.5, 0.05, "0.13", adj = 0.5, cex = digitsize)
Example:
xbar.therapy <- 92
s.therapy <- 8.5
xbar.placebo <- 85
s.placebo <- 9.1
n <- 15
xdiff <- xbar.therapy - xbar.placebo
sdiff <- sqrt((s.therapy^2 + s.placebo^2)/2) * sqrt(2/n)
sdiff <- sqrt(s.therapy^2 + s.placebo^2)/sqrt(n)
muH0 <- 0
muH1 <- 8
t0 <- (xdiff - muH0)/sdiff
par(cex.main = 1.5, mar = c(4, 4.5, 4.5, 1), mgp = c(3.5, 1, 0), cex.lab =
1.5,
font.lab = 2, cex.axis = 1.8, bty = "n", las = 1)
par(mar = c(4, 4.5, 4.5, 1)
x <- seq(-15, 30, by = 0.001)
y <- dt(x/sdiff, df = 28)
y3 <- dt((x - 9)/sdiff, df = 28)
plot(x, y, type = "l", axes = FALSE, xlab = NA, ylab = NA, xlim = c(-15, 25
),
lwd = 2)
lines(x, y3, lwd = 2)
axis(side = 1, at = seq(-15, 30, by = 5), pos = 0, lwd = 2, cex.axis = 1.7)
axis(side = 1, at = 7, pos = 0, col = "red4", col.axis = "red4", lwd = 2, p
adj = 0.1)
abline(v = xdiff, col = "red4", lwd = 2)
L0 <- dt((xdiff/sdiff), df = 28)
L2 <- dt(((xdiff - 9)/sdiff), df = 28)
lines(c(6.7, 7.3), y = rep(L0, 2), col = "red4", lwd = 2)
lines(c(6.7, 7.3), y = rep(L2, 2), col = "red4", lwd = 2)
text(8, L0, expression(paste(italic("L"), " = .04")), adj = 0, col = "red4"
,
cex = 1.8)
text(7.5, L2, expression(paste(italic("L"), " = .32")), adj = 0, col = "red
4",
cex = 1.8)
text(-16, 0.35, expression(paste(H[0], " : ", mu[diff], " = 0", sep = "")),
adj = 0,
cex = 1.8)
text(-16, 0.3, expression(paste(H[1], " : ", mu[diff], " = 9", sep = "")),
adj = 0,
cex = 1.8)
mtext(expression(bar(x)[diff]), side = 1, line = 2, at = 6.5, adj = 0, col
= "red4",
cex = 1.8, padj = 0.1)
text(14, 0.2, expression(paste("LR = ", frac(".32", ".04") %~~% 8, sep = ""
)),
adj = 0, col = "red4", cex = 1.8)
Example:
Max.BF10 = function(p) {
# Computes the upper bound on the Bayes factor As in Sellke, Bayarri, &
# Berger, 2001
Max.BF10 <- -1/(exp(1) * p * log(p))
return(Max.BF10)
}
# Plot this function for p in .001 to .1
xlow <- 0.001
xhigh <- 0.1
p1 <- 0.0373
p2 <- 0.00752
p3 <- 0.001968
par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.lab
= 1.5,
font.lab = 2, cex.axis = 1.3, bty = "n", las = 1)
plot(function(p) Max.BF10(p), xlow, xhigh, xlim = c(xlow, xhigh), lwd = 2,
xlab = " ",
ylab = " ")
mtext("Two-sided p value", 1, line = 2.5, cex = 1.5, font = 2)
mtext("Maximum Bayes factor for H1", 2, line = 2.8, cex = 1.5, font = 2, la
s = 0)
lines(c(0, p1), c(3, 3), lwd = 2, col = "grey")
lines(c(0, p2), c(10, 10), lwd = 2, col = "grey")
lines(c(0, p3), c(30, 30), lwd = 2, col = "grey")
lines(c(p1, p1), c(0, 3), lwd = 2, col = "grey")
lines(c(p2, p2), c(0, 10), lwd = 2, col = "grey")
lines(c(p3, p3), c(0, 30), lwd = 2, col = "grey")
cexsize <- 1.2
text(0.005, 31, expression(max((BF[10])) == 30 %<->% p %~~% 0.002), cex = c
exsize,
pos = 4)
text(0.01, 11, expression(max((BF[10])) == 10 %<->% p %~~% 0.008), cex = ce
xsize,
pos = 4)
text(p1 - 0.005, 5, expression(max((BF[10])) == 3 %<->% p %~~% 0.037), cex
= cexsize,
pos = 4)
Example:
# rm(list = ls())
IndividualPerformance <- function(choice, lo, show.losses = FALSE) {
# Plots the choice profile Args: choice: A vector containing the choice
s on
# each trial lo: A vector containing the losses on each trial show.loss
es:
# logical: Should the losses be indicated by filled dots?
par(mar = c(4, 4.5, 0.5, 1))
plot(choice, type = "b", axes = FALSE, xlab = "Trial", ylab = "Deck", c
ex.lab = 2)
axis(1, seq(0, 100, length = 6), cex.axis = 1.8)
axis(2, 1:4, labels = c("A", "B", "C", "D"), cex.axis = 1.8, las = 1)
if (show.losses == TRUE) {
index.losses <- which(lo < 0)
points(matrix(c(index.losses, choice[index.losses]), byrow = FALSE,
nrow = length(index.losses)),
pch = 19, lwd = 1.5)
}
}
# Synthetic data
choice <- sample(1:4, 100, replace = TRUE)
lo <- sample(c(-1250, -250, -50, 0), 100, replace = TRUE)
# postscript('DiversePerformance.eps', width = 7, height = 7)
IndividualPerformance(choice, lo, show.losses = TRUE)
# dev.off()
Example:
library(plotrix)
# mix of 2 normal distributions
mixedNorm <- function(x) {
return(0.5 * dnorm(x, 0.25, 0.13) + 0.5 * dnorm(x, 0.7, 0.082))
}
### normalize so that area [0,1] integrates to 1; k = normalizing constant
k <- 1/integrate(mixedNorm, 0, 1)$value
# normalized
pdfmix <- function(x, k) {
return(k * (0.5 * dnorm(x, 0.25, 0.13) + 0.5 * dnorm(x, 0.7, 0.082)))
}
# integrate(pdfmix, 0.0790321,0.4048)$value # 0.4
op <- par(mfrow = c(1, 2), mar = c(5.9, 6, 4, 2) + 0.1)
barplot(height = c(0.2, 0.25, 0.1, 0.05, 0.35, 0.05), names.arg = c(1,
2, 3, 4, 5, 6), axes = FALSE, ylim = c(0, 1), width = 1, cex.names = 1.
5)
arrows(x0 = 0.6, x1 = 0.6, y0 = 0.38, y1 = 0.23, length = c(0.2, 0.2),
lwd = 2)
text(0.6, 0.41, "0.2", cex = 1.3)
ablineclip(v = 1.9, y1 = 0.28, y2 = 0.375, lwd = 2)
ablineclip(v = 4.2, y1 = 0.28, y2 = 0.375, lwd = 2)
ablineclip(h = 0.375, x1 = 1.9, x2 = 4.2, lwd = 2)
arrows(x0 = 3.05, x1 = 3.05, y0 = 0.525, y1 = 0.375, length = c(0.2, 0.2),
lwd = 2)
text(3.05, 0.555, "0.4", cex = 1.3)
ablineclip(v = 5.5, y1 = 0.38, y2 = 0.43, lwd = 2)
arrows(x0 = 6.7, x1 = 6.7, y0 = 0.43, y1 = 0.09, length = c(0.2, 0.2),
lwd = 2)
ablineclip(h = 0.43, x1 = 5.5, x2 = 6.7, lwd = 2)
text(6.1, 0.46, "7 x", cex = 1.3)
par(las = 1)
axis(2, at = seq(0, 1, 0.1), labels = seq(0, 1, 0.1), lwd = 2, cex.axis = 1
.3)
par(las = 0)
mtext("Probability Mass", side = 2, line = 3.7, cex = 2)
mtext("Value", side = 1, line = 3.7, cex = 2)
par(mar = c(4.6, 6, 3.3, 2) + 0.1)
xx <- c(0.0790321, 0.079031, seq(0.08, 0.4, 0.01), 0.4084, 0.4084)
yy <- c(0, pdfmix(0.079031, k = k), pdfmix(seq(0.08, 0.4, 0.01), k = k), pd
fmix(0.4084, k = k),
0)
plot(1, type = "n", axes = FALSE, ylab = "", xlab = "", xlim = c(0, 1),
ylim = c(0, 3))
polygon(xx, yy, col = "grey", border = NA)
curve(pdfmix(x, k = k), from = 0, to = 1, lwd = 2, ylab = "", xlab = "", xl
im = c(0,
1), ylim = c(0, 3), add = TRUE)
text(0.25, 0.7, "0.4", cex = 1.3)
par(las = 1)
axis(2, at = seq(0, 3, 0.5), labels = seq(0, 3, 0.5), lwd = 2, cex.axis = 1
.3)
points(0.539580297, pdfmix(0.539580297, k = k), pch = 21, bg = "white", cex
= 1.4,
lwd = 2.7)
points(uniroot(function(x) pdfmix(x, k = k) - 5 * pdfmix(0.539580297, k = k
), interval = c(0.56,
0.7))$root, pdfmix(uniroot(function(x) pdfmix(x, k = k) - 5 * pdfmix(0.
539580297, k = k),
interval = c(0.56, 0.7))$root, k = k), pch = 21, bg = "white", cex = 1.
4,
lwd = 2.7)
arrows(x0 = 0.539580297, x1 = 0.539580297, y0 = 2.7, y1 = 0.7, length = c(0
.17,
0.17), angle = 19, lwd = 2)
ablineclip(h = 2.7, x1 = 0.539580297, x2 = 0.6994507, lwd = 2)
ablineclip(v = 0.6994507, y1 = 2.55, y2 = 2.7, lwd = 2)
text(0.6194593, 2.79, "5 x", cex = 1.3)
axis(1, at = seq(0, 1, 0.1), labels = c("0", ".1", ".2", ".3", ".4", ".5",
".6", ".7", ".8", ".9", "1"), line = -1.2, lwd = 2, cex.axis = 1.37)
par(las = 0)
mtext("Probability Density", side = 2, line = 3.7, cex = 2)
mtext("Value", side = 1, line = 2.4, cex = 2)
par(op)
Example:
library("psych")
library("qgraph")
# Load BFI data:
data(bfi)
bfi <- bfi[, 1:25]
# Groups and names object (not needed really, but make the plots easier to
# interpret):
Names <- scan("http://sachaepskamp.com/files/BFIitems.txt", what = "charact
er", sep = "n")
# Create groups object:
Groups <- rep(c("A", "C", "E", "N", "O"), each = 5)
# Compute correlations:
cor_bfi <- cor_auto(bfi)
# Plot correlation network:
graph_cor <- qgraph(cor_bfi, layout = "spring", nodeNames = Names, groups =
Groups, legend.cex = 0.6,
DoNotPlot = TRUE)
# Plot partial correlation network:
graph_pcor <- qgraph(cor_bfi, graph = "concentration", layout = "spring", n
odeNames = Names,
groups = Groups, legend.cex = 0.6, DoNotPlot = TRUE)
# Plot glasso network:
graph_glas <- qgraph(cor_bfi, graph = "glasso", sampleSize = nrow(bfi), lay
out = "spring",
nodeNames = Names, legend.cex = 0.6, groups = Groups, legend.cex = 0.7,
GLratio = 2,
DoNotPlot = TRUE)
# centrality plot (all graphs):
centralityPlot(list(r = graph_cor, `Partial r` = graph_pcor, glasso = graph
_glas),
labels = Names) + labs(colour = "") + theme_bw() + theme(legend.positio
n = "bottom")
Example:
### prior & posterior parameters
mean.prior <- 75
sd.prior <- 12
mean.posterior <- 73.33644
sd.posterior <- 4.831067
### plot settings
xlim <- c(40, 115)
ylim <- c(0, 0.117)
lwd <- 2
lwd.points <- 2
lwd.axis <- 1.2
cex.points <- 1.4
cex.axis <- 1.2
cex.text <- 1.1
cex.labels <- 1.3
cexLegend <- 1.2
op <- par(mar = c(5.1, 4.1, 4.1, 2.1))
### create empty canvas
plot(1, xlim = xlim, ylim = ylim, axes = FALSE, xlab = "", ylab = "")
### shade prior area < 70
greycol1 <- rgb(0, 0, 0, alpha = 0.2)
greycol2 <- rgb(0, 0, 0, alpha = 0.4)
polPrior <- seq(xlim[1], 70, length.out = 400)
xx <- c(polPrior, polPrior[length(polPrior)], polPrior[1])
yy <- c(dnorm(polPrior, mean.prior, sd.prior), 0, 0)
polygon(xx, yy, col = greycol1, border = NA)
### shade posterior area < 70
polPosterior <- seq(xlim[1], 70, length.out = 400)
xx <- c(polPosterior, polPosterior[length(polPosterior)], polPosterior[1])
yy <- c(dnorm(polPosterior, mean.posterior, sd.posterior), 0, 0)
polygon(xx, yy, col = greycol2, border = NA)
### shade posterior area on interval (81, 84)
polPosterior2 <- seq(81, 84, length.out = 400)
xx <- c(polPosterior2, polPosterior2[length(polPosterior2)], polPosterior2[
1])
yy <- c(dnorm(polPosterior2, mean.posterior, sd.posterior), 0, 0)
polygon(xx, yy, col = greycol2, border = NA)
### grey dashed lines to prior mean, posterior mean and posterior at 77
lines(rep(mean.prior, 2), c(0, dnorm(mean.prior, mean.prior, sd.prior)), lt
y = 2, col = "grey",
lwd = lwd)
lines(rep(mean.posterior, 2), c(0, dnorm(mean.posterior, mean.posterior, sd
.posterior)),
lty = 2, col = "grey", lwd = lwd)
lines(rep(mean.posterior + (mean.posterior - 70), 2), c(0, dnorm(mean.poste
rior + (mean.posterior -
70), mean.posterior, sd.posterior)), lty = 2, col = "grey", lwd = lwd)
### axes
axis(1, at = seq(xlim[1], xlim[2], 5), cex.axis = cex.axis, lwd = lwd.axis)
axis(2, labels = FALSE, tck = 0, lwd = lwd.axis, line = -0.5)
### axes labels
mtext("IQ Bob", side = 1, cex = 1.6, line = 2.4)
mtext("Density", side = 2, cex = 1.5, line = 0)
### plot prior and posterior
# prior
plot(function(x) dnorm(x, mean.prior, sd.prior), xlim = xlim, ylim = ylim,
xlab = "",
ylab = "", lwd = lwd, lty = 3, add = TRUE)
# posterior
plot(function(x) dnorm(x, mean.posterior, sd.posterior), xlim = xlim, ylim
= ylim, add = TRUE,
lwd = lwd)
### add points
# posterior density at 70
points(70, dnorm(70, mean.posterior, sd.posterior), pch = 21, bg = "white",
cex = cex.points,
lwd = lwd.points)
# posterior density at 76.67
points(mean.posterior + (mean.posterior - 70), dnorm(mean.posterior + (mean
.posterior -
70), mean.posterior, sd.posterior), pch = 21, bg = "white", cex = cex.p
oints, lwd = lwd.points)
# maximum a posteriori value
points(mean.posterior, dnorm(mean.posterior, mean.posterior, sd.posterior),
pch = 21,
bg = "white", cex = cex.points, lwd = lwd.points)
### credible interval
CIlow <- qnorm(0.025, mean.posterior, sd.posterior)
CIhigh <- qnorm(0.975, mean.posterior, sd.posterior)
yCI <- 0.11
arrows(CIlow, yCI, CIhigh, yCI, angle = 90, code = 3, length = 0.1, lwd = l
wd)
text(mean.posterior, yCI + 0.0042, labels = "95%", cex = cex.text)
text(CIlow, yCI, labels = paste(round(CIlow, 2)), cex = cex.text, pos = 2,
offset = 0.3)
text(CIhigh, yCI, labels = paste(round(CIhigh, 2)), cex = cex.text, pos = 4
, offset = 0.3)
### legend
legendPosition <- 115
legend(legendPosition, ylim[2] + 0.002, legend = c("Posterior", "Prior"), l
ty = c(1,
3), bty = "n", lwd = c(lwd, lwd), cex = cexLegend, xjust = 1, yjust = 1
, x.intersp = 0.6,
seg.len = 1.2)
### draw labels
# A
arrows(x0 = 57, x1 = 61, y0 = dnorm(62, mean.prior, sd.prior) + 0.0003, y1
= dnorm(62,
mean.prior, sd.prior) - 0.007, length = c(0.08, 0.08), lwd = lwd, code
= 2)
text(55.94, dnorm(5, mean.prior, sd.prior) + 0.0205, labels = "A", cex = ce
x.labels)
# B
arrows(x0 = 64.5, x1 = 69, y0 = dnorm(68, mean.posterior, sd.posterior) + 0
.003, y1 = dnorm(68,
mean.posterior, sd.posterior) - 0.005, length = c(0.08, 0.08), lwd = lw
d, code = 2)
text(63.5, dnorm(68, mean.posterior, sd.posterior) + 0.0042, labels = "B",
cex = cex.labels)
# C
arrows(x0 = mean.posterior + 1, x1 = mean.posterior + 6, y0 = dnorm(mean.po
sterior, mean.posterior,
sd.posterior) + 0.001, y1 = dnorm(mean.posterior, mean.posterior, sd.po
sterior) +
0.008, length = c(0.08, 0.08), lwd = lwd, code = 1)
text(mean.posterior + 7, dnorm(mean.posterior, mean.posterior, sd.posterior
) + 0.0093,
labels = "C", cex = cex.labels)
# D
arrows(x0 = 70 - 0.25, x1 = 70 - 0.25, y0 = dnorm(70, mean.posterior, sd.po
sterior) +
0.005, y1 = 0.092, length = c(0.08, 0.08), lwd = lwd, code = 1)
lines(c(70 - 0.25, mean.posterior), rep(0.092, 2), lwd = lwd)
arrows(x0 = mean.posterior, x1 = mean.posterior, y0 = 0.092, y1 = dnorm(mea
n.posterior,
mean.posterior, sd.posterior) + 0.003, length = c(0.08, 0.08), lwd = lw
d, code = 2)
ratio <- dnorm(mean.posterior, mean.posterior, sd.posterior)/dnorm(70, mean
.posterior,
sd.posterior)
text(mean(c(70 - 0.255, mean.posterior)), 0.096, labels = paste(round(ratio
, 2), "x"),
cex = cex.text)
text(70 - 1.5, dnorm(70, mean.posterior, sd.posterior) + 0.02, labels = "D"
, cex = cex.labels)
# E
arrows(x0 = 70 + 1, x1 = mean.posterior + (mean.posterior - 70) - 1, y0 = d
norm(70, mean.posterior,
sd.posterior), y1 = dnorm(mean.posterior + (mean.posterior - 70), mean.
posterior,
sd.posterior), length = c(0.08, 0.08), lwd = lwd, code = 3)
text(74.9, dnorm(mean.posterior + (mean.posterior - 70), mean.posterior, sd
.posterior) -
0.005, labels = "E", cex = cex.labels)
# F
arrows(x0 = 82.5, x1 = 87, y0 = dnorm(82, mean.posterior, sd.posterior) - 0
.012, y1 = dnorm(82,
mean.posterior, sd.posterior) - 0.005, length = c(0.08, 0.08), lwd = lw
d, code = 1)
text(88, dnorm(82, mean.posterior, sd.posterior) - 0.0034, labels = "F", ce
x = cex.labels)
# G
arrows(x0 = CIhigh + 6, x1 = CIhigh + 8.2, y0 = yCI, y1 = yCI, length = c(0
.08, 0.08),
lwd = lwd, code = 1)
text(CIhigh + 9.5, yCI, labels = "G", cex = cex.labels)
### additional information
scores <- "Bob's IQ scores: {73, 67, 79}"
priorText1 <- "Prior distribution:"
priorText2 <- expression(paste("IQ Bob ~ N(", 75, ", ", 12^2, ")"))
posteriorText1 <- "Posterior distribution:"
posteriorText2 <- expression(paste("IQ Bob ~ N(", 73.34, ", ", 4.83^2, ")")
)
xx <- 87
yCI2 <- 0.12
text(xx, yCI2 - 0.033, labels = priorText1, cex = cexLegend, pos = 4, offse
t = 0.3)
text(xx, yCI2 - 0.042, labels = priorText2, cex = cexLegend, pos = 4, offse
t = 0.3)
text(xx, yCI2 - 0.059, labels = scores, cex = cexLegend, pos = 4, offset =
0.3)
text(xx, yCI2 - 0.074, labels = posteriorText1, cex = cexLegend, pos = 4, o
ffset = 0.3)
text(xx, yCI2 - 0.083, labels = posteriorText2, cex = cexLegend, pos = 4, o
ffset = 0.3)
par(op)
Example:
> library(metafor)
Zorunlu paket yükleniyor: Matrix
Loading 'metafor' package (version 1.9-9).
> g <- c(-0.35, -0.67, -0.25, -0.22, -0.22, -0.36, -0.67, -0.25, -0.22, -0.
22, -0.36, -0.22,
+ -0.67, -0.25, -0.22, -0.36, -0.67, -0.25, -0.22, -0.22, -0.36)
>
> gSE <- c(0.469041575982343, 0.469041575982343, 0.458257569495584, 0.45825
7569495584,
+ 0.458257569495584, 0.458257569495584, 0.469041575982343, 0.45825
7569495584, 0.458257569495584,
+ 0.458257569495584, 0.458257569495584, 0.458257569495584, 0.46904
1575982343, 0.458257569495584,
+ 0.458257569495584, 0.458257569495584, 0.469041575982343, 0.45825
7569495584, 0.458257569495584,
+ 0.458257569495584, 0.458257569495584)
>
> cMeanSmile <- c("3.48", "3.75", "3.48", "3.67", "3.60", "3.58", "3.75", "
3.48", "3.67",
+ "3.60", "3.58", "3.67", "3.75", "3.48", "3.60", "3.58", "
3.75", "3.48", "3.67", "3.60",
+ "3.58")
> cMeanPout <- c("3.96", "4.81", "3.81", "3.94", "3.88", "4.03", "4.81", "3
.81", "3.94",
+ "3.88", "4.03", "3.94", "4.81", "3.81", "3.88", "4.03", "4
.81", "3.81", "3.94", "3.88", "4.03")
> forest(x = g, sei = gSE, xlab = "Hedges' g", cex.lab = 1.4, ilab = cbind(
cMeanSmile,
+
cMeanPout), ilab.xpos = c(-3.2, -2.5), cex.axis = 1.1, mlab = "Meta-Analyti
c Effect:",
+ lwd = 1.4, rows = 22:2, addfit = FALSE, atransf = FALSE, ylim = c(
-2, 25))
There were 50 or more warnings (use warnings() to see the first 50)
>
> text(-4.05, 24, "Study", cex = 1.3)
> text(-3.2, 24, "Smile", cex = 1.3)
> text(-2.5, 24, "Pout", cex = 1.3)
> text(2.75, 24, "Hedges' g [95% CI]", cex = 1.3)
>
> abline(h = 1, lwd = 1.4)
> addpoly(metaG, atransf = FALSE, row = -1, cex = 1.3, mlab = "Meta-Analyti
c Effect Size:")
Reference: http://shinyapps.org/apps/RGraphCompendium/index.php
Example:
> library(tidyr)
> library(plotly)
> s <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/s
chool_earnings.csv")
> s <- s[order(s$Men), ]
> gather(s, Sex, value, Women, Men) %>%
+ plot_ly(x = value, y = School, mode = "markers",
+ color = Sex, colors = c("pink", "blue")) %>%
+ add_trace(x = value, y = School, mode = "lines",
+ group = School, showlegend = F, line = list(color = "gray")
) %>%
+ layout(
+ title = "Gender earnings disparity",
+ xaxis = list(title = "Annual Salary (in thousands)"),
+ margin = list(l = 65)
+ )
Example:
> library(lattice)
> library(ggplot2)
> quakes$Magnitude <- equal.count(quakes$mag, 4)
> pl <- cloud(depth ~ lat * long | Magnitude, data = quakes,
+ zlim = rev(range(quakes$depth)), screen = list(z = 105,x = -70), panel.as
pect = 0.75, xlab = "Longitude",ylab = "Latitude", zlab = "Depth")
> print(pl)
> pl
Example:
kx <- function(u, v) cos(u) * (r + cos(u/2) * sin(t *v) - sin(u/2) * sin(2 * t * v))
ky <- function(u, v) sin(u) * (r + cos(u/2) * sin(t *v) - sin(u/2) * sin(2 * t * v))
kz <- function(u, v) sin(u/2) * sin(t * v) + cos(u/2) *sin(t * v)
n <- 50
u <- seq(0.3, 1.25, length = n) * 2 * pi
v <- seq(0, 1, length = n) * 2 * pi
um <- matrix(u, length(u), length(u))
vm <- matrix(v, length(v), length(v), byrow = TRUE)
r <- 2
t <- 1
pl <- wireframe(kz(um, vm) ~ kx(um, vm) + ky(um, vm),shade = TRUE,screen = list(z = 170, x = -
60), alpha = 0.75,panel.aspect = 0.6, aspect = c(1, 0.4))
print(pl)

More Related Content

What's hot

ggtimeseries-->ggplot2 extensions
ggtimeseries-->ggplot2 extensions ggtimeseries-->ggplot2 extensions
ggtimeseries-->ggplot2 extensions Dr. Volkan OBAN
 
Data Visualization with R.ggplot2 and its extensions examples.
Data Visualization with R.ggplot2 and its extensions examples.Data Visualization with R.ggplot2 and its extensions examples.
Data Visualization with R.ggplot2 and its extensions examples.Dr. Volkan OBAN
 
Plot3D Package and Example in R.-Data visualizat,on
Plot3D Package and Example in R.-Data visualizat,onPlot3D Package and Example in R.-Data visualizat,on
Plot3D Package and Example in R.-Data visualizat,onDr. Volkan OBAN
 
A Survey Of R Graphics
A Survey Of R GraphicsA Survey Of R Graphics
A Survey Of R GraphicsDataspora
 
R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...
R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...
R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...Revolution Analytics
 
Python built in functions
Python built in functionsPython built in functions
Python built in functionsRakshitha S
 
Python Cheat Sheet
Python Cheat SheetPython Cheat Sheet
Python Cheat SheetGlowTouch
 
The secrets of inverse brogramming
The secrets of inverse brogrammingThe secrets of inverse brogramming
The secrets of inverse brogrammingRichie Cotton
 
Day 4a iteration and functions.pptx
Day 4a   iteration and functions.pptxDay 4a   iteration and functions.pptx
Day 4a iteration and functions.pptxAdrien Melquiond
 
Effector: we need to go deeper
Effector: we need to go deeperEffector: we need to go deeper
Effector: we need to go deeperVictor Didenko
 
Day 4b iteration and functions for-loops.pptx
Day 4b   iteration and functions  for-loops.pptxDay 4b   iteration and functions  for-loops.pptx
Day 4b iteration and functions for-loops.pptxAdrien Melquiond
 
Data visualization using the grammar of graphics
Data visualization using the grammar of graphicsData visualization using the grammar of graphics
Data visualization using the grammar of graphicsRupak Roy
 

What's hot (20)

ggtimeseries-->ggplot2 extensions
ggtimeseries-->ggplot2 extensions ggtimeseries-->ggplot2 extensions
ggtimeseries-->ggplot2 extensions
 
RHadoop の紹介
RHadoop の紹介RHadoop の紹介
RHadoop の紹介
 
R meets Hadoop
R meets HadoopR meets Hadoop
R meets Hadoop
 
Data Visualization with R.ggplot2 and its extensions examples.
Data Visualization with R.ggplot2 and its extensions examples.Data Visualization with R.ggplot2 and its extensions examples.
Data Visualization with R.ggplot2 and its extensions examples.
 
Introduction to R
Introduction to RIntroduction to R
Introduction to R
 
Oh Composable World!
Oh Composable World!Oh Composable World!
Oh Composable World!
 
Script jantung copy
Script jantung   copyScript jantung   copy
Script jantung copy
 
Plot3D Package and Example in R.-Data visualizat,on
Plot3D Package and Example in R.-Data visualizat,onPlot3D Package and Example in R.-Data visualizat,on
Plot3D Package and Example in R.-Data visualizat,on
 
Millionways
MillionwaysMillionways
Millionways
 
A Survey Of R Graphics
A Survey Of R GraphicsA Survey Of R Graphics
A Survey Of R Graphics
 
R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...
R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...
R + Hadoop = Big Data Analytics. How Revolution Analytics' RHadoop Project Al...
 
Dplyr and Plyr
Dplyr and PlyrDplyr and Plyr
Dplyr and Plyr
 
Python hmm
Python hmmPython hmm
Python hmm
 
Python built in functions
Python built in functionsPython built in functions
Python built in functions
 
Python Cheat Sheet
Python Cheat SheetPython Cheat Sheet
Python Cheat Sheet
 
The secrets of inverse brogramming
The secrets of inverse brogrammingThe secrets of inverse brogramming
The secrets of inverse brogramming
 
Day 4a iteration and functions.pptx
Day 4a   iteration and functions.pptxDay 4a   iteration and functions.pptx
Day 4a iteration and functions.pptx
 
Effector: we need to go deeper
Effector: we need to go deeperEffector: we need to go deeper
Effector: we need to go deeper
 
Day 4b iteration and functions for-loops.pptx
Day 4b   iteration and functions  for-loops.pptxDay 4b   iteration and functions  for-loops.pptx
Day 4b iteration and functions for-loops.pptx
 
Data visualization using the grammar of graphics
Data visualization using the grammar of graphicsData visualization using the grammar of graphics
Data visualization using the grammar of graphics
 

Similar to Advanced Data Visualization in R- Somes Examples.

Артём Акуляков - F# for Data Analysis
Артём Акуляков - F# for Data AnalysisАртём Акуляков - F# for Data Analysis
Артём Акуляков - F# for Data AnalysisSpbDotNet Community
 
数学カフェ 確率・統計・機械学習回 「速習 確率・統計」
数学カフェ 確率・統計・機械学習回 「速習 確率・統計」数学カフェ 確率・統計・機械学習回 「速習 確率・統計」
数学カフェ 確率・統計・機械学習回 「速習 確率・統計」Ken'ichi Matsui
 
第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」
第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」
第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」Ken'ichi Matsui
 
The Ring programming language version 1.10 book - Part 33 of 212
The Ring programming language version 1.10 book - Part 33 of 212The Ring programming language version 1.10 book - Part 33 of 212
The Ring programming language version 1.10 book - Part 33 of 212Mahmoud Samir Fayed
 
Refactoring to Macros with Clojure
Refactoring to Macros with ClojureRefactoring to Macros with Clojure
Refactoring to Macros with ClojureDmitry Buzdin
 
An overview of Python 2.7
An overview of Python 2.7An overview of Python 2.7
An overview of Python 2.7decoupled
 
R (Shiny Package) - Server Side Code for Decision Support System
R (Shiny Package) - Server Side Code for Decision Support SystemR (Shiny Package) - Server Side Code for Decision Support System
R (Shiny Package) - Server Side Code for Decision Support SystemMaithreya Chakravarthula
 
Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)
Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)
Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)MongoSF
 
User Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love StoryUser Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love StoryDatabricks
 
User Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love StoryUser Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love StoryDatabricks
 
Assignment on Numerical Method C Code
Assignment on Numerical Method C CodeAssignment on Numerical Method C Code
Assignment on Numerical Method C CodeSyed Ahmed Zaki
 
関数潮流(Function Tendency)
関数潮流(Function Tendency)関数潮流(Function Tendency)
関数潮流(Function Tendency)riue
 
An example of R code for Data visualization
An example of R code for Data visualizationAn example of R code for Data visualization
An example of R code for Data visualizationLiang (Leon) Zhou
 
Using a mobile phone as a therapist - Superweek 2018
Using a mobile phone as a therapist - Superweek 2018Using a mobile phone as a therapist - Superweek 2018
Using a mobile phone as a therapist - Superweek 2018Peter Meyer
 

Similar to Advanced Data Visualization in R- Somes Examples. (20)

Артём Акуляков - F# for Data Analysis
Артём Акуляков - F# for Data AnalysisАртём Акуляков - F# for Data Analysis
Артём Акуляков - F# for Data Analysis
 
Joclad 2010 d
Joclad 2010 dJoclad 2010 d
Joclad 2010 d
 
A Shiny Example-- R
A Shiny Example-- RA Shiny Example-- R
A Shiny Example-- R
 
数学カフェ 確率・統計・機械学習回 「速習 確率・統計」
数学カフェ 確率・統計・機械学習回 「速習 確率・統計」数学カフェ 確率・統計・機械学習回 「速習 確率・統計」
数学カフェ 確率・統計・機械学習回 「速習 確率・統計」
 
第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」
第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」
第13回数学カフェ「素数!!」二次会 LT資料「乱数!!」
 
The Ring programming language version 1.10 book - Part 33 of 212
The Ring programming language version 1.10 book - Part 33 of 212The Ring programming language version 1.10 book - Part 33 of 212
The Ring programming language version 1.10 book - Part 33 of 212
 
Refactoring to Macros with Clojure
Refactoring to Macros with ClojureRefactoring to Macros with Clojure
Refactoring to Macros with Clojure
 
Elm: give it a try
Elm: give it a tryElm: give it a try
Elm: give it a try
 
An overview of Python 2.7
An overview of Python 2.7An overview of Python 2.7
An overview of Python 2.7
 
A tour of Python
A tour of PythonA tour of Python
A tour of Python
 
Slides
SlidesSlides
Slides
 
R (Shiny Package) - Server Side Code for Decision Support System
R (Shiny Package) - Server Side Code for Decision Support SystemR (Shiny Package) - Server Side Code for Decision Support System
R (Shiny Package) - Server Side Code for Decision Support System
 
Seminar PSU 10.10.2014 mme
Seminar PSU 10.10.2014 mmeSeminar PSU 10.10.2014 mme
Seminar PSU 10.10.2014 mme
 
Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)
Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)
Map/reduce, geospatial indexing, and other cool features (Kristina Chodorow)
 
User Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love StoryUser Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love Story
 
User Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love StoryUser Defined Aggregation in Apache Spark: A Love Story
User Defined Aggregation in Apache Spark: A Love Story
 
Assignment on Numerical Method C Code
Assignment on Numerical Method C CodeAssignment on Numerical Method C Code
Assignment on Numerical Method C Code
 
関数潮流(Function Tendency)
関数潮流(Function Tendency)関数潮流(Function Tendency)
関数潮流(Function Tendency)
 
An example of R code for Data visualization
An example of R code for Data visualizationAn example of R code for Data visualization
An example of R code for Data visualization
 
Using a mobile phone as a therapist - Superweek 2018
Using a mobile phone as a therapist - Superweek 2018Using a mobile phone as a therapist - Superweek 2018
Using a mobile phone as a therapist - Superweek 2018
 

More from Dr. Volkan OBAN

Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...
Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...
Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...Dr. Volkan OBAN
 
Covid19py Python Package - Example
Covid19py  Python Package - ExampleCovid19py  Python Package - Example
Covid19py Python Package - ExampleDr. Volkan OBAN
 
Object detection with Python
Object detection with Python Object detection with Python
Object detection with Python Dr. Volkan OBAN
 
Python - Rastgele Orman(Random Forest) Parametreleri
Python - Rastgele Orman(Random Forest) ParametreleriPython - Rastgele Orman(Random Forest) Parametreleri
Python - Rastgele Orman(Random Forest) ParametreleriDr. Volkan OBAN
 
Linear Programming wi̇th R - Examples
Linear Programming wi̇th R - ExamplesLinear Programming wi̇th R - Examples
Linear Programming wi̇th R - ExamplesDr. Volkan OBAN
 
"optrees" package in R and examples.(optrees:finds optimal trees in weighted ...
"optrees" package in R and examples.(optrees:finds optimal trees in weighted ..."optrees" package in R and examples.(optrees:finds optimal trees in weighted ...
"optrees" package in R and examples.(optrees:finds optimal trees in weighted ...Dr. Volkan OBAN
 
k-means Clustering in Python
k-means Clustering in Pythonk-means Clustering in Python
k-means Clustering in PythonDr. Volkan OBAN
 
Naive Bayes Example using R
Naive Bayes Example using  R Naive Bayes Example using  R
Naive Bayes Example using R Dr. Volkan OBAN
 
k-means Clustering and Custergram with R
k-means Clustering and Custergram with Rk-means Clustering and Custergram with R
k-means Clustering and Custergram with RDr. Volkan OBAN
 
Data Science and its Relationship to Big Data and Data-Driven Decision Making
Data Science and its Relationship to Big Data and Data-Driven Decision MakingData Science and its Relationship to Big Data and Data-Driven Decision Making
Data Science and its Relationship to Big Data and Data-Driven Decision MakingDr. Volkan OBAN
 
Scikit-learn Cheatsheet-Python
Scikit-learn Cheatsheet-PythonScikit-learn Cheatsheet-Python
Scikit-learn Cheatsheet-PythonDr. Volkan OBAN
 
Python Pandas for Data Science cheatsheet
Python Pandas for Data Science cheatsheet Python Pandas for Data Science cheatsheet
Python Pandas for Data Science cheatsheet Dr. Volkan OBAN
 
Pandas,scipy,numpy cheatsheet
Pandas,scipy,numpy cheatsheetPandas,scipy,numpy cheatsheet
Pandas,scipy,numpy cheatsheetDr. Volkan OBAN
 
ReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an exampleReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an exampleDr. Volkan OBAN
 
ReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an exampleReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an exampleDr. Volkan OBAN
 
R Machine Learning packages( generally used)
R Machine Learning packages( generally used)R Machine Learning packages( generally used)
R Machine Learning packages( generally used)Dr. Volkan OBAN
 
treemap package in R and examples.
treemap package in R and examples.treemap package in R and examples.
treemap package in R and examples.Dr. Volkan OBAN
 
R-Data table Cheat Sheet
R-Data table Cheat SheetR-Data table Cheat Sheet
R-Data table Cheat SheetDr. Volkan OBAN
 
Rcommands-for those who interested in R.
Rcommands-for those who interested in R.Rcommands-for those who interested in R.
Rcommands-for those who interested in R.Dr. Volkan OBAN
 

More from Dr. Volkan OBAN (20)

Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...
Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...
Conference Paper:IMAGE PROCESSING AND OBJECT DETECTION APPLICATION: INSURANCE...
 
Covid19py Python Package - Example
Covid19py  Python Package - ExampleCovid19py  Python Package - Example
Covid19py Python Package - Example
 
Object detection with Python
Object detection with Python Object detection with Python
Object detection with Python
 
Python - Rastgele Orman(Random Forest) Parametreleri
Python - Rastgele Orman(Random Forest) ParametreleriPython - Rastgele Orman(Random Forest) Parametreleri
Python - Rastgele Orman(Random Forest) Parametreleri
 
Linear Programming wi̇th R - Examples
Linear Programming wi̇th R - ExamplesLinear Programming wi̇th R - Examples
Linear Programming wi̇th R - Examples
 
"optrees" package in R and examples.(optrees:finds optimal trees in weighted ...
"optrees" package in R and examples.(optrees:finds optimal trees in weighted ..."optrees" package in R and examples.(optrees:finds optimal trees in weighted ...
"optrees" package in R and examples.(optrees:finds optimal trees in weighted ...
 
k-means Clustering in Python
k-means Clustering in Pythonk-means Clustering in Python
k-means Clustering in Python
 
Naive Bayes Example using R
Naive Bayes Example using  R Naive Bayes Example using  R
Naive Bayes Example using R
 
R forecasting Example
R forecasting ExampleR forecasting Example
R forecasting Example
 
k-means Clustering and Custergram with R
k-means Clustering and Custergram with Rk-means Clustering and Custergram with R
k-means Clustering and Custergram with R
 
Data Science and its Relationship to Big Data and Data-Driven Decision Making
Data Science and its Relationship to Big Data and Data-Driven Decision MakingData Science and its Relationship to Big Data and Data-Driven Decision Making
Data Science and its Relationship to Big Data and Data-Driven Decision Making
 
Scikit-learn Cheatsheet-Python
Scikit-learn Cheatsheet-PythonScikit-learn Cheatsheet-Python
Scikit-learn Cheatsheet-Python
 
Python Pandas for Data Science cheatsheet
Python Pandas for Data Science cheatsheet Python Pandas for Data Science cheatsheet
Python Pandas for Data Science cheatsheet
 
Pandas,scipy,numpy cheatsheet
Pandas,scipy,numpy cheatsheetPandas,scipy,numpy cheatsheet
Pandas,scipy,numpy cheatsheet
 
ReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an exampleReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an example
 
ReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an exampleReporteRs package in R. forming powerpoint documents-an example
ReporteRs package in R. forming powerpoint documents-an example
 
R Machine Learning packages( generally used)
R Machine Learning packages( generally used)R Machine Learning packages( generally used)
R Machine Learning packages( generally used)
 
treemap package in R and examples.
treemap package in R and examples.treemap package in R and examples.
treemap package in R and examples.
 
R-Data table Cheat Sheet
R-Data table Cheat SheetR-Data table Cheat Sheet
R-Data table Cheat Sheet
 
Rcommands-for those who interested in R.
Rcommands-for those who interested in R.Rcommands-for those who interested in R.
Rcommands-for those who interested in R.
 

Recently uploaded

VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130
VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130
VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130Suhani Kapoor
 
{Pooja: 9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...
{Pooja:  9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...{Pooja:  9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...
{Pooja: 9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...Pooja Nehwal
 
FESE Capital Markets Fact Sheet 2024 Q1.pdf
FESE Capital Markets Fact Sheet 2024 Q1.pdfFESE Capital Markets Fact Sheet 2024 Q1.pdf
FESE Capital Markets Fact Sheet 2024 Q1.pdfMarinCaroMartnezBerg
 
100-Concepts-of-AI by Anupama Kate .pptx
100-Concepts-of-AI by Anupama Kate .pptx100-Concepts-of-AI by Anupama Kate .pptx
100-Concepts-of-AI by Anupama Kate .pptxAnupama Kate
 
Industrialised data - the key to AI success.pdf
Industrialised data - the key to AI success.pdfIndustrialised data - the key to AI success.pdf
Industrialised data - the key to AI success.pdfLars Albertsson
 
Invezz.com - Grow your wealth with trading signals
Invezz.com - Grow your wealth with trading signalsInvezz.com - Grow your wealth with trading signals
Invezz.com - Grow your wealth with trading signalsInvezz1
 
Delhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip Call
Delhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip CallDelhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip Call
Delhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip Callshivangimorya083
 
RA-11058_IRR-COMPRESS Do 198 series of 1998
RA-11058_IRR-COMPRESS Do 198 series of 1998RA-11058_IRR-COMPRESS Do 198 series of 1998
RA-11058_IRR-COMPRESS Do 198 series of 1998YohFuh
 
代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改
代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改
代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改atducpo
 
High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...
High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...
High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...soniya singh
 
Customer Service Analytics - Make Sense of All Your Data.pptx
Customer Service Analytics - Make Sense of All Your Data.pptxCustomer Service Analytics - Make Sense of All Your Data.pptx
Customer Service Analytics - Make Sense of All Your Data.pptxEmmanuel Dauda
 
From idea to production in a day – Leveraging Azure ML and Streamlit to build...
From idea to production in a day – Leveraging Azure ML and Streamlit to build...From idea to production in a day – Leveraging Azure ML and Streamlit to build...
From idea to production in a day – Leveraging Azure ML and Streamlit to build...Florian Roscheck
 
PKS-TGC-1084-630 - Stage 1 Proposal.pptx
PKS-TGC-1084-630 - Stage 1 Proposal.pptxPKS-TGC-1084-630 - Stage 1 Proposal.pptx
PKS-TGC-1084-630 - Stage 1 Proposal.pptxPramod Kumar Srivastava
 
VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...
VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...
VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...Suhani Kapoor
 
Best VIP Call Girls Noida Sector 39 Call Me: 8448380779
Best VIP Call Girls Noida Sector 39 Call Me: 8448380779Best VIP Call Girls Noida Sector 39 Call Me: 8448380779
Best VIP Call Girls Noida Sector 39 Call Me: 8448380779Delhi Call girls
 
Aminabad Call Girl Agent 9548273370 , Call Girls Service Lucknow
Aminabad Call Girl Agent 9548273370 , Call Girls Service LucknowAminabad Call Girl Agent 9548273370 , Call Girls Service Lucknow
Aminabad Call Girl Agent 9548273370 , Call Girls Service Lucknowmakika9823
 
Schema on read is obsolete. Welcome metaprogramming..pdf
Schema on read is obsolete. Welcome metaprogramming..pdfSchema on read is obsolete. Welcome metaprogramming..pdf
Schema on read is obsolete. Welcome metaprogramming..pdfLars Albertsson
 
Digi Khata Problem along complete plan.pptx
Digi Khata Problem along complete plan.pptxDigi Khata Problem along complete plan.pptx
Digi Khata Problem along complete plan.pptxTanveerAhmed817946
 

Recently uploaded (20)

VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130
VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130
VIP Call Girls Service Miyapur Hyderabad Call +91-8250192130
 
{Pooja: 9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...
{Pooja:  9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...{Pooja:  9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...
{Pooja: 9892124323 } Call Girl in Mumbai | Jas Kaur Rate 4500 Free Hotel Del...
 
FESE Capital Markets Fact Sheet 2024 Q1.pdf
FESE Capital Markets Fact Sheet 2024 Q1.pdfFESE Capital Markets Fact Sheet 2024 Q1.pdf
FESE Capital Markets Fact Sheet 2024 Q1.pdf
 
VIP Call Girls Service Charbagh { Lucknow Call Girls Service 9548273370 } Boo...
VIP Call Girls Service Charbagh { Lucknow Call Girls Service 9548273370 } Boo...VIP Call Girls Service Charbagh { Lucknow Call Girls Service 9548273370 } Boo...
VIP Call Girls Service Charbagh { Lucknow Call Girls Service 9548273370 } Boo...
 
100-Concepts-of-AI by Anupama Kate .pptx
100-Concepts-of-AI by Anupama Kate .pptx100-Concepts-of-AI by Anupama Kate .pptx
100-Concepts-of-AI by Anupama Kate .pptx
 
Industrialised data - the key to AI success.pdf
Industrialised data - the key to AI success.pdfIndustrialised data - the key to AI success.pdf
Industrialised data - the key to AI success.pdf
 
Invezz.com - Grow your wealth with trading signals
Invezz.com - Grow your wealth with trading signalsInvezz.com - Grow your wealth with trading signals
Invezz.com - Grow your wealth with trading signals
 
Delhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip Call
Delhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip CallDelhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip Call
Delhi Call Girls Punjabi Bagh 9711199171 ☎✔👌✔ Whatsapp Hard And Sexy Vip Call
 
RA-11058_IRR-COMPRESS Do 198 series of 1998
RA-11058_IRR-COMPRESS Do 198 series of 1998RA-11058_IRR-COMPRESS Do 198 series of 1998
RA-11058_IRR-COMPRESS Do 198 series of 1998
 
꧁❤ Aerocity Call Girls Service Aerocity Delhi ❤꧂ 9999965857 ☎️ Hard And Sexy ...
꧁❤ Aerocity Call Girls Service Aerocity Delhi ❤꧂ 9999965857 ☎️ Hard And Sexy ...꧁❤ Aerocity Call Girls Service Aerocity Delhi ❤꧂ 9999965857 ☎️ Hard And Sexy ...
꧁❤ Aerocity Call Girls Service Aerocity Delhi ❤꧂ 9999965857 ☎️ Hard And Sexy ...
 
代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改
代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改
代办国外大学文凭《原版美国UCLA文凭证书》加州大学洛杉矶分校毕业证制作成绩单修改
 
High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...
High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...
High Class Call Girls Noida Sector 39 Aarushi 🔝8264348440🔝 Independent Escort...
 
Customer Service Analytics - Make Sense of All Your Data.pptx
Customer Service Analytics - Make Sense of All Your Data.pptxCustomer Service Analytics - Make Sense of All Your Data.pptx
Customer Service Analytics - Make Sense of All Your Data.pptx
 
From idea to production in a day – Leveraging Azure ML and Streamlit to build...
From idea to production in a day – Leveraging Azure ML and Streamlit to build...From idea to production in a day – Leveraging Azure ML and Streamlit to build...
From idea to production in a day – Leveraging Azure ML and Streamlit to build...
 
PKS-TGC-1084-630 - Stage 1 Proposal.pptx
PKS-TGC-1084-630 - Stage 1 Proposal.pptxPKS-TGC-1084-630 - Stage 1 Proposal.pptx
PKS-TGC-1084-630 - Stage 1 Proposal.pptx
 
VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...
VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...
VIP High Class Call Girls Bikaner Anushka 8250192130 Independent Escort Servi...
 
Best VIP Call Girls Noida Sector 39 Call Me: 8448380779
Best VIP Call Girls Noida Sector 39 Call Me: 8448380779Best VIP Call Girls Noida Sector 39 Call Me: 8448380779
Best VIP Call Girls Noida Sector 39 Call Me: 8448380779
 
Aminabad Call Girl Agent 9548273370 , Call Girls Service Lucknow
Aminabad Call Girl Agent 9548273370 , Call Girls Service LucknowAminabad Call Girl Agent 9548273370 , Call Girls Service Lucknow
Aminabad Call Girl Agent 9548273370 , Call Girls Service Lucknow
 
Schema on read is obsolete. Welcome metaprogramming..pdf
Schema on read is obsolete. Welcome metaprogramming..pdfSchema on read is obsolete. Welcome metaprogramming..pdf
Schema on read is obsolete. Welcome metaprogramming..pdf
 
Digi Khata Problem along complete plan.pptx
Digi Khata Problem along complete plan.pptxDigi Khata Problem along complete plan.pptx
Digi Khata Problem along complete plan.pptx
 

Advanced Data Visualization in R- Somes Examples.

  • 1. Prepared by Volkan OBAN Advanced Data Visualization in R- Somes Examples. geomorph package in R.... Example: Code: >library(geomorph) > data(scallopPLY) > ply <- scallopPLY$ply > digitdat <- scallopPLY$coords > plotspec(spec=ply,digitspec=digitdat,fixed=16, centered =TRUE) Example: > data(scallops) > Y.gpa<-gpagen(A=scallops$coorddata, curves=scallops$curvslide, surfaces=scallops$surfslide) > ref<-mshape(Y.gpa$coords) > plotRefToTarget(ref,Y.gpa$coords[,,1],method="TPS", mag=3)
  • 2. Reference: http://www.public.iastate.edu/~dcadams/PDFPubs/Quick%20Guide%20to%20Geomorph%20v2.0.pdf Example: > boxplot.ej <- function(y, xloc = 1, width.box = 0.25, lwd.box = 2, width .hor = 0.25, + lwd.hor = 2, range.wisk = 1.5, lwd.wisk = 2, pch. box = 16, cex.boxpoint = 2, + plot.outliers = FALSE, pch.out = 1, cex.out = 1, color = "black") { + + # makes boxplot with dot as median and solid whisker Interquartile r ange = + # (.75 quantile) - (.25 quantile). Note: Wiskers are not always sym metrical; + # top wisker extends up to max(y) constrained by y <= (.75 quantile) + + # range.wisk*Interquartile range bottom whisker is determined by min (y)
  • 3. + # constrained by y >= (.25 quantile) - range.wisk*Interquartile rang e + + Q <- quantile(y, c(0.25, 0.5, 0.75)) + names(Q) <- NULL # gets rid of percentages + IQ.range <- Q[3] - Q[1] + low <- Q[1] - range.wisk * IQ.range + high <- Q[3] + range.wisk * IQ.range + index <- which((y >= low) & (y <= high)) + wisk.low <- min(y[index]) + wisk.high <- max(y[index]) + outliers <- y[which((y < low) | (y > high))] + + # plot median: + points(xloc, Q[2], pch = pch.box, cex = cex.boxpoint, col = color) + + # plot box: + xleft <- xloc - width.box/2 + xright <- xloc + width.box/2 + ybottom <- Q[1] + ytop <- Q[3] + rect(xleft, ybottom, xright, ytop, lwd = lwd.box, border = color) + + # plot whiskers: + segments(xloc, wisk.low, xloc, Q[1], lwd = lwd.wisk, col = color) + segments(xloc, Q[3], xloc, wisk.high, lwd = lwd.wisk, col = color) + + # plot horizontal segments: + x0 <- xloc - width.hor/2 + x1 <- xloc + width.hor/2 + segments(x0, wisk.low, x1, wisk.low, lwd = lwd.hor, col = color) + segments(x0, wisk.high, x1, wisk.high, lwd = lwd.hor, col = color) + + # plot outliers: + if (plot.outliers == TRUE) { + xloc.p <- rep(xloc, length(outliers)) + points(xloc.p, outliers, pch = pch.out, cex = cex.out, col = col or) + } + } > > RT.hf.sp <- rnorm(1000, mean = 0.41, sd = 0.008) > RT.lf.sp <- rnorm(1000, mean = 0.43, sd = 0.01) > RT.vlf.sp <- rnorm(1000, mean = 0.425, sd = 0.012) > RT.hf.ac <- rnorm(1000, mean = 0.46, sd = 0.008) > RT.lf.ac <- rnorm(1000, mean = 0.51, sd = 0.01) > RT.vlf.ac <- rnorm(1000, mean = 0.52, sd = 0.012) > > ps <- 1 # size of boxpoint > par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.l ab = 1.5, + font.lab = 2, cex.axis = 1.3, bty = "n", las = 1) > x <- c(1, 2, 3, 4) > plot(x, c(-10, -10, -10, -10), type = "p", ylab = " ", xlab = " ", cex = 1.5, + ylim = c(0.3, 0.6), xlim = c(1, 4), lwd = 2, pch = 5, axes = FALSE, main = " ") > axis(1, at = c(1.5, 2.5, 3.5), labels = c("HF", "LF", "VLF")) > mtext("Word Frequency", side = 1, line = 3, cex = 1.5, font = 2) > axis(2, pos = 1.1) > par(las = 0) > mtext("Group Mean M", side = 2, line = 2.9, cex = 1.5, font = 2) > > x <- c(1.5, 2.5, 3.5) > boxplot.ej(RT.hf.sp, xloc = 1.5, cex.boxpoint = ps) > boxplot.ej(RT.hf.ac, xloc = 1.5, cex.boxpoint = ps, color = "grey") > boxplot.ej(RT.lf.sp, xloc = 2.5, cex.boxpoint = ps) > boxplot.ej(RT.lf.ac, xloc = 2.5, cex.boxpoint = ps, color = "grey") > boxplot.ej(RT.vlf.sp, xloc = 3.5, cex.boxpoint = ps)
  • 4. > boxplot.ej(RT.vlf.ac, xloc = 3.5, cex.boxpoint = ps, color = "grey") > > text(2.5, 0.35, "Speed", cex = 1.4, font = 1, adj = 0.5) > text(2.5, 0.57, "Accuracy", cex = 1.4, font = 1, col = "grey", adj = 0.5 ) > > " > RT.hf.sp <- rnorm(1000, mean = 0.41, sd = 0.008) > RT.lf.sp <- rnorm(1000, mean = 0.43, sd = 0.01) > RT.vlf.sp <- rnorm(1000, mean = 0.425, sd = 0.012) > RT.hf.ac <- rnorm(1000, mean = 0.46, sd = 0.008) > RT.lf.ac <- rnorm(1000, mean = 0.51, sd = 0.01) > RT.vlf.ac <- rnorm(1000, mean = 0.52, sd = 0.012) > > library(sm) > # by Henrik Singmann customized violinplot function (singmann.org) the > # original violinplot function stems from the 'vioplot' package Copyrigh t (c) > # 2004, Daniel Adler. All rights reserved. Redistribution and use in so urce > # and binary forms, with or without modification, are permitted provided that > # the following conditions are met: * Redistributions of source code mus t > # retain the above copyright notice, this list of conditions and the > # following disclaimer. * Redistributions in binary form must reproduce the > # above copyright notice, this list of conditions and the following > # disclaimer in the documentation and/or other materials provided with t he > # distribution. * Neither the name of the University of Goettingen nor the > # names of its contributors may be used to endorse or promote products > # derived from this software without specific prior written permission. THIS > # SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND > # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE > # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PUR POSE > # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE > # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR > # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF > # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINE SS > # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER I N > # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE ) > # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE > # POSSIBILITY OF SUCH DAMAGE. > > vioplot.singmann <- function(x, ..., range = 1.5, h = NULL, ylim = NULL, names = NULL, + horizontal = FALSE, col = NULL, border = "b lack", lty = 1, lwd = 1, rectCol = "black", + colMed = "white", pchMed = 19, at, add = FA LSE, wex = 1, mark.outlier = TRUE, + pch.mean = 4, ids = NULL, drawRect = TRUE, yaxt = "s") { + + # process multiple datas + datas <- list(x, ...) + n <- length(datas) + if (missing(at))
  • 5. + at <- 1:n + # pass 1 - calculate base range - estimate density setup parameters for + # density estimation + upper <- vector(mode = "numeric", length = n) + lower <- vector(mode = "numeric", length = n) + q1 <- vector(mode = "numeric", length = n) + q3 <- vector(mode = "numeric", length = n) + med <- vector(mode = "numeric", length = n) + base <- vector(mode = "list", length = n) + height <- vector(mode = "list", length = n) + outliers <- vector(mode = "list", length = n) + baserange <- c(Inf, -Inf) + + # global args for sm.density function-call + args <- list(display = "none") + + if (!(is.null(h))) + args <- c(args, h = h) + for (i in 1:n) { + data <- datas[[i]] + if (!is.null(ids)) + names(data) <- ids + if (is.null(names(data))) + names(data) <- as.character(1:(length(data))) + + # calculate plot parameters 1- and 3-quantile, median, IQR, uppe r- and + # lower-adjacent + data.min <- min(data) + data.max <- max(data) + q1[i] <- quantile(data, 0.25) + q3[i] <- quantile(data, 0.75) + med[i] <- median(data) + iqd <- q3[i] - q1[i] + upper[i] <- min(q3[i] + range * iqd, data.max) + lower[i] <- max(q1[i] - range * iqd, data.min) + + # strategy: xmin = min(lower, data.min)) ymax = max(upper, data. max)) + est.xlim <- c(min(lower[i], data.min), max(upper[i], data.max)) + + # estimate density curve + smout <- do.call("sm.density", c(list(data, xlim = est.xlim), ar gs)) + + # calculate stretch factor the plots density heights is defined in range 0.0 + # ... 0.5 we scale maximum estimated point to 0.4 per data + hscale <- 0.4/max(smout$estimate) * wex + + # add density curve x,y pair to lists + base[[i]] <- smout$eval.points + height[[i]] <- smout$estimate * hscale + t <- range(base[[i]]) + baserange[1] <- min(baserange[1], t[1]) + baserange[2] <- max(baserange[2], t[2]) + min.d <- boxplot.stats(data)[["stats"]][1] + max.d <- boxplot.stats(data)[["stats"]][5] + height[[i]] <- height[[i]][(base[[i]] > min.d) & (base[[i]] < ma x.d)] + height[[i]] <- c(height[[i]][1], height[[i]], height[[i]][length (height[[i]])]) + base[[i]] <- base[[i]][(base[[i]] > min.d) & (base[[i]] < max.d) ] + base[[i]] <- c(min.d, base[[i]], max.d) + outliers[[i]] <- list(data[(data < min.d) | (data > max.d)], nam es(data[(data <
  • 6. + min.d) | (data > max.d)])) + + # calculate min,max base ranges + } + # pass 2 - plot graphics setup parameters for plot + if (!add) { + xlim <- if (n == 1) + at + c(-0.5, 0.5) else range(at) + min(diff(at))/2 * c(-1, 1 ) + + if (is.null(ylim)) { + ylim <- baserange + } + } + if (is.null(names)) { + label <- 1:n + } else { + label <- names + } + boxwidth <- 0.05 * wex + + # setup plot + if (!add) + plot.new() + if (!horizontal) { + if (!add) { + plot.window(xlim = xlim, ylim = ylim) + axis(2) + axis(1, at = at, label = label) + } + + box() + for (i in 1:n) { + # plot left/right density curve + polygon(c(at[i] - height[[i]], rev(at[i] + height[[i]])), c( base[[i]], + rev(base[[i]])), col = col, border = border, lty = lty, lwd = lwd) + + if (drawRect) { + # browser() plot IQR + boxplot(datas[[i]], at = at[i], add = TRUE, yaxt = yaxt, pars = list(boxwex = 0.6 * + wex, outpch = if (mark.outlier) "" else 1)) + if ((length(outliers[[i]][[1]]) > 0) & mark.outlier) + text(rep(at[i], length(outliers[[i]][[1]])), outlier s[[i]][[1]], + labels = outliers[[i]][[2]]) + # lines( at[c( i, i)], c(lower[i], upper[i]) ,lwd=lwd, l ty=lty) plot 50% KI + # box rect( at[i]-boxwidth/2, q1[i], at[i]+boxwidth/2, q 3[i], col=rectCol) + # plot median point points( at[i], med[i], pch=pchMed, c ol=colMed ) + } + points(at[i], mean(datas[[i]]), pch = pch.mean, cex = 1.3) + } + } else { + if (!add) { + plot.window(xlim = ylim, ylim = xlim) + axis(1) + axis(2, at = at, label = label) + } + + box() + for (i in 1:n) { + # plot left/right density curve
  • 7. + polygon(c(base[[i]], rev(base[[i]])), c(at[i] - height[[i]], rev(at[i] + + height[[i]])), col = col, border = border, lty = lty, lwd = lwd) + + if (drawRect) { + # plot IQR + boxplot(datas[[i]], yaxt = yaxt, at = at[i], add = TRUE, pars = list(boxwex = 0.8 * + wex, outpch = if (mark.outlier) "" else 1)) + if ((length(outliers[[i]][[1]]) > 0) & mark.outlier) + text(rep(at[i], length(outliers[[i]][[1]])), outlier s[[i]][[1]], + labels = outliers[[i]][[2]]) + # lines( at[c( i, i)], c(lower[i], upper[i]) ,lwd=lwd, l ty=lty) + } + points(at[i], mean(datas[[i]]), pch = pch.mean, cex = 1.3) + } + } + invisible(list(upper = upper, lower = lower, median = med, q1 = q1, q3 = q3)) + } > > # plot > par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.l ab = 1.5, + font.lab = 2, cex.axis = 1.3, bty = "n", las = 1) > x <- c(1, 2, 3, 4) > plot(x, c(-10, -10, -10, -10), type = "p", ylab = " ", xlab = " ", cex = 1.5, + ylim = c(0.3, 0.6), xlim = c(1, 4), lwd = 2, pch = 5, axes = F, mai n = " ") > axis(1, at = c(1.5, 2.5, 3.5), labels = c("HF", "LF", "VLF")) > axis(2, pos = 1.1) > mtext("Word Frequency", side = 1, line = 3, cex = 1.5, font = 2) > > par(las = 0) > mtext("Group Mean M", side = 2, line = 2.9, cex = 1.5, font = 2) > > x <- c(1.5, 2.5, 3.5) > > vioplot.singmann(RT.hf.sp, RT.lf.sp, RT.vlf.sp, add = TRUE, mark.outlier = FALSE, + at = c(1.5, 2.5, 3.5), wex = 0.4, yaxt = "n") > vioplot.singmann(RT.hf.ac, RT.lf.ac, RT.vlf.ac, add = TRUE, mark.outlier = FALSE, + at = c(1.5, 2.5, 3.5), wex = 0.4, col = "grey", border = "grey", rectCol = "grey", + colMed = "grey", yaxt = "n") > > text(2.5, 0.35, "Speed", cex = 1.4, font = 1, adj = 0.5) > text(2.5, 0.58, "Accuracy", cex = 1.4, font = 1, col = "grey", adj = 0.5 )
  • 8. Example: plotsebargraph = function(loc, value, sterr, wiskwidth, color = "grey", lin ewidth = 2) { w = wiskwidth/2 segments(x0 = loc, x1 = loc, y0 = value, y1 = value + sterr, col = colo r, lwd = linewidth) segments(x0 = loc - w, x1 = loc + w, y0 = value + sterr, y1 = value + s terr, col = color, lwd = linewidth) # upper whiskers } plotsegraph = function(loc, value, sterr, wiskwidth, color = "grey", linewi dth = 2) { w = wiskwidth/2 segments(x0 = loc, x1 = loc, y0 = value - sterr, y1 = value + sterr, co l = color,
  • 9. lwd = linewidth) segments(x0 = loc - w, x1 = loc + w, y0 = value + sterr, y1 = value + s terr, col = color, lwd = linewidth) # upper whiskers segments(x0 = loc - w, x1 = loc + w, y0 = value - sterr, y1 = value - s terr, col = color, lwd = linewidth) # lower whiskers } # ======================================================= # Data; order = Speed, neutral, accuracy MRT <- c(429, 515, 555) MRT.se <- c(25, 25, 30) Er <- c(0.23, 0.14, 0.13) Er.se <- c(0.022, 0.021, 0.021) # ====================================================== par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.lab = 1.5, font.lab = 2, cex.axis = 1.3, bty = "n", las = 1) # mpg = c(3, 1, 0) is default. first = axis labels!; middle = tick labels m ar # = c(5, 4, 4, 2) + 0.1 is default digitsize <- 1.2 x <- c(1, 2, 3, 4) plot(x, c(-10, -10, -10, -10), type = "p", ylab = " Mean Response Time (ms. )", xlab = " ", cex = 1.5, ylim = c(200, 800), xlim = c(1, 4), lwd = 2, pch = 5, axes = F, main = " ") axis(1, at = c(1.5, 2.5, 3.5), labels = c("Speed", "Neutral", "Accuracy")) mtext("Cue", side = 1, line = 3, cex = 1.5, font = 2) axis(2, at = c(300, 400, 500, 600, 700)) x = c(1.5, 2.5, 3.5)
  • 10. points(x, MRT, cex = 1.5, lwd = 2, pch = 19) plot.errbars = plotsegraph(x, MRT, MRT.se, 0.1, color = "black") #0.1 = wi skwidth lines(c(1.5, 2.5, 3.5), MRT, lwd = 2, type = "c") text(1.5, MRT[1] + 60, "429", adj = 0.5, cex = digitsize) text(2.5, MRT[2] + 60, "515", adj = 0.5, cex = digitsize) text(3.5, MRT[3] + 60, "555", adj = 0.5, cex = digitsize) par(new = TRUE) x <- c(1, 2, 3, 4) plot(x, c(-10, -10, -10, -10), type = "p", ylab = " ", xlab = " ", cex = 1. 5, ylim = c(0, 1), xlim = c(1, 4), lwd = 2, axes = FALSE, main = " ") axis(4, at = c(0, 0.1, 0.2, 0.3, 0.4), las = 1) grid::grid.text("Mean Proportion of Errors", 0.97, 0.5, rot = 270, gp = gri d::gpar(cex = 1.5, font = 2)) width <- 0.25 linewidth <- 2 x0 <- 1.5 - width x1 <- 1.5 + width y0 <- 0 y1 <- Er[1] segments(x0, y0, x0, y1, lwd = linewidth) segments(x0, y1, x1, y1, lwd = linewidth) segments(x1, y1, x1, y0, lwd = linewidth) segments(x1, y0, x0, y0, lwd = linewidth) x0 <- 2.5 - width x1 <- 2.5 + width y0 <- 0 y1 <- Er[2] segments(x0, y0, x0, y1, lwd = linewidth) segments(x0, y1, x1, y1, lwd = linewidth) segments(x1, y1, x1, y0, lwd = linewidth) segments(x1, y0, x0, y0, lwd = linewidth)
  • 11. x0 <- 3.5 - width x1 <- 3.5 + width y0 <- 0 y1 <- Er[3] segments(x0, y0, x0, y1, lwd = linewidth) segments(x0, y1, x1, y1, lwd = linewidth) segments(x1, y1, x1, y0, lwd = linewidth) segments(x1, y0, x0, y0, lwd = linewidth) loc.errbars <- c(1.5, 2.5, 3.5) plot.errbars <- plotsebargraph(loc.errbars, Er, Er.se, 0.2, color = "black" ) # 0.2 = wiskwidth text(1.5, 0.9, "Behavioral Data", font = 2, cex = 2, pos = 4) text(1.5, 0.05, "0.23", adj = 0.5, cex = digitsize) text(2.5, 0.05, "0.14", adj = 0.5, cex = digitsize) text(3.5, 0.05, "0.13", adj = 0.5, cex = digitsize)
  • 12. Example: xbar.therapy <- 92 s.therapy <- 8.5 xbar.placebo <- 85 s.placebo <- 9.1 n <- 15 xdiff <- xbar.therapy - xbar.placebo sdiff <- sqrt((s.therapy^2 + s.placebo^2)/2) * sqrt(2/n) sdiff <- sqrt(s.therapy^2 + s.placebo^2)/sqrt(n) muH0 <- 0 muH1 <- 8 t0 <- (xdiff - muH0)/sdiff par(cex.main = 1.5, mar = c(4, 4.5, 4.5, 1), mgp = c(3.5, 1, 0), cex.lab = 1.5, font.lab = 2, cex.axis = 1.8, bty = "n", las = 1) par(mar = c(4, 4.5, 4.5, 1) x <- seq(-15, 30, by = 0.001) y <- dt(x/sdiff, df = 28) y3 <- dt((x - 9)/sdiff, df = 28) plot(x, y, type = "l", axes = FALSE, xlab = NA, ylab = NA, xlim = c(-15, 25 ), lwd = 2) lines(x, y3, lwd = 2) axis(side = 1, at = seq(-15, 30, by = 5), pos = 0, lwd = 2, cex.axis = 1.7) axis(side = 1, at = 7, pos = 0, col = "red4", col.axis = "red4", lwd = 2, p adj = 0.1) abline(v = xdiff, col = "red4", lwd = 2) L0 <- dt((xdiff/sdiff), df = 28) L2 <- dt(((xdiff - 9)/sdiff), df = 28) lines(c(6.7, 7.3), y = rep(L0, 2), col = "red4", lwd = 2) lines(c(6.7, 7.3), y = rep(L2, 2), col = "red4", lwd = 2) text(8, L0, expression(paste(italic("L"), " = .04")), adj = 0, col = "red4" , cex = 1.8)
  • 13. text(7.5, L2, expression(paste(italic("L"), " = .32")), adj = 0, col = "red 4", cex = 1.8) text(-16, 0.35, expression(paste(H[0], " : ", mu[diff], " = 0", sep = "")), adj = 0, cex = 1.8) text(-16, 0.3, expression(paste(H[1], " : ", mu[diff], " = 9", sep = "")), adj = 0, cex = 1.8) mtext(expression(bar(x)[diff]), side = 1, line = 2, at = 6.5, adj = 0, col = "red4", cex = 1.8, padj = 0.1) text(14, 0.2, expression(paste("LR = ", frac(".32", ".04") %~~% 8, sep = "" )), adj = 0, col = "red4", cex = 1.8) Example:
  • 14. Max.BF10 = function(p) { # Computes the upper bound on the Bayes factor As in Sellke, Bayarri, & # Berger, 2001 Max.BF10 <- -1/(exp(1) * p * log(p)) return(Max.BF10) } # Plot this function for p in .001 to .1 xlow <- 0.001 xhigh <- 0.1 p1 <- 0.0373 p2 <- 0.00752 p3 <- 0.001968 par(cex.main = 1.5, mar = c(5, 6, 4, 5) + 0.1, mgp = c(3.5, 1, 0), cex.lab = 1.5, font.lab = 2, cex.axis = 1.3, bty = "n", las = 1) plot(function(p) Max.BF10(p), xlow, xhigh, xlim = c(xlow, xhigh), lwd = 2, xlab = " ", ylab = " ") mtext("Two-sided p value", 1, line = 2.5, cex = 1.5, font = 2) mtext("Maximum Bayes factor for H1", 2, line = 2.8, cex = 1.5, font = 2, la s = 0) lines(c(0, p1), c(3, 3), lwd = 2, col = "grey") lines(c(0, p2), c(10, 10), lwd = 2, col = "grey") lines(c(0, p3), c(30, 30), lwd = 2, col = "grey") lines(c(p1, p1), c(0, 3), lwd = 2, col = "grey") lines(c(p2, p2), c(0, 10), lwd = 2, col = "grey") lines(c(p3, p3), c(0, 30), lwd = 2, col = "grey") cexsize <- 1.2 text(0.005, 31, expression(max((BF[10])) == 30 %<->% p %~~% 0.002), cex = c exsize, pos = 4) text(0.01, 11, expression(max((BF[10])) == 10 %<->% p %~~% 0.008), cex = ce xsize, pos = 4) text(p1 - 0.005, 5, expression(max((BF[10])) == 3 %<->% p %~~% 0.037), cex = cexsize,
  • 15. pos = 4) Example: # rm(list = ls()) IndividualPerformance <- function(choice, lo, show.losses = FALSE) { # Plots the choice profile Args: choice: A vector containing the choice s on # each trial lo: A vector containing the losses on each trial show.loss es: # logical: Should the losses be indicated by filled dots? par(mar = c(4, 4.5, 0.5, 1)) plot(choice, type = "b", axes = FALSE, xlab = "Trial", ylab = "Deck", c ex.lab = 2) axis(1, seq(0, 100, length = 6), cex.axis = 1.8) axis(2, 1:4, labels = c("A", "B", "C", "D"), cex.axis = 1.8, las = 1)
  • 16. if (show.losses == TRUE) { index.losses <- which(lo < 0) points(matrix(c(index.losses, choice[index.losses]), byrow = FALSE, nrow = length(index.losses)), pch = 19, lwd = 1.5) } } # Synthetic data choice <- sample(1:4, 100, replace = TRUE) lo <- sample(c(-1250, -250, -50, 0), 100, replace = TRUE) # postscript('DiversePerformance.eps', width = 7, height = 7) IndividualPerformance(choice, lo, show.losses = TRUE) # dev.off()
  • 17. Example: library(plotrix) # mix of 2 normal distributions mixedNorm <- function(x) { return(0.5 * dnorm(x, 0.25, 0.13) + 0.5 * dnorm(x, 0.7, 0.082)) } ### normalize so that area [0,1] integrates to 1; k = normalizing constant k <- 1/integrate(mixedNorm, 0, 1)$value # normalized pdfmix <- function(x, k) { return(k * (0.5 * dnorm(x, 0.25, 0.13) + 0.5 * dnorm(x, 0.7, 0.082))) } # integrate(pdfmix, 0.0790321,0.4048)$value # 0.4 op <- par(mfrow = c(1, 2), mar = c(5.9, 6, 4, 2) + 0.1) barplot(height = c(0.2, 0.25, 0.1, 0.05, 0.35, 0.05), names.arg = c(1, 2, 3, 4, 5, 6), axes = FALSE, ylim = c(0, 1), width = 1, cex.names = 1. 5) arrows(x0 = 0.6, x1 = 0.6, y0 = 0.38, y1 = 0.23, length = c(0.2, 0.2), lwd = 2) text(0.6, 0.41, "0.2", cex = 1.3) ablineclip(v = 1.9, y1 = 0.28, y2 = 0.375, lwd = 2) ablineclip(v = 4.2, y1 = 0.28, y2 = 0.375, lwd = 2) ablineclip(h = 0.375, x1 = 1.9, x2 = 4.2, lwd = 2) arrows(x0 = 3.05, x1 = 3.05, y0 = 0.525, y1 = 0.375, length = c(0.2, 0.2), lwd = 2) text(3.05, 0.555, "0.4", cex = 1.3) ablineclip(v = 5.5, y1 = 0.38, y2 = 0.43, lwd = 2) arrows(x0 = 6.7, x1 = 6.7, y0 = 0.43, y1 = 0.09, length = c(0.2, 0.2), lwd = 2) ablineclip(h = 0.43, x1 = 5.5, x2 = 6.7, lwd = 2) text(6.1, 0.46, "7 x", cex = 1.3) par(las = 1)
  • 18. axis(2, at = seq(0, 1, 0.1), labels = seq(0, 1, 0.1), lwd = 2, cex.axis = 1 .3) par(las = 0) mtext("Probability Mass", side = 2, line = 3.7, cex = 2) mtext("Value", side = 1, line = 3.7, cex = 2) par(mar = c(4.6, 6, 3.3, 2) + 0.1) xx <- c(0.0790321, 0.079031, seq(0.08, 0.4, 0.01), 0.4084, 0.4084) yy <- c(0, pdfmix(0.079031, k = k), pdfmix(seq(0.08, 0.4, 0.01), k = k), pd fmix(0.4084, k = k), 0) plot(1, type = "n", axes = FALSE, ylab = "", xlab = "", xlim = c(0, 1), ylim = c(0, 3)) polygon(xx, yy, col = "grey", border = NA) curve(pdfmix(x, k = k), from = 0, to = 1, lwd = 2, ylab = "", xlab = "", xl im = c(0, 1), ylim = c(0, 3), add = TRUE) text(0.25, 0.7, "0.4", cex = 1.3) par(las = 1) axis(2, at = seq(0, 3, 0.5), labels = seq(0, 3, 0.5), lwd = 2, cex.axis = 1 .3) points(0.539580297, pdfmix(0.539580297, k = k), pch = 21, bg = "white", cex = 1.4, lwd = 2.7) points(uniroot(function(x) pdfmix(x, k = k) - 5 * pdfmix(0.539580297, k = k ), interval = c(0.56, 0.7))$root, pdfmix(uniroot(function(x) pdfmix(x, k = k) - 5 * pdfmix(0. 539580297, k = k), interval = c(0.56, 0.7))$root, k = k), pch = 21, bg = "white", cex = 1. 4, lwd = 2.7) arrows(x0 = 0.539580297, x1 = 0.539580297, y0 = 2.7, y1 = 0.7, length = c(0 .17, 0.17), angle = 19, lwd = 2) ablineclip(h = 2.7, x1 = 0.539580297, x2 = 0.6994507, lwd = 2) ablineclip(v = 0.6994507, y1 = 2.55, y2 = 2.7, lwd = 2) text(0.6194593, 2.79, "5 x", cex = 1.3) axis(1, at = seq(0, 1, 0.1), labels = c("0", ".1", ".2", ".3", ".4", ".5", ".6", ".7", ".8", ".9", "1"), line = -1.2, lwd = 2, cex.axis = 1.37) par(las = 0)
  • 19. mtext("Probability Density", side = 2, line = 3.7, cex = 2) mtext("Value", side = 1, line = 2.4, cex = 2) par(op) Example: library("psych") library("qgraph") # Load BFI data: data(bfi) bfi <- bfi[, 1:25] # Groups and names object (not needed really, but make the plots easier to # interpret): Names <- scan("http://sachaepskamp.com/files/BFIitems.txt", what = "charact er", sep = "n")
  • 20. # Create groups object: Groups <- rep(c("A", "C", "E", "N", "O"), each = 5) # Compute correlations: cor_bfi <- cor_auto(bfi) # Plot correlation network: graph_cor <- qgraph(cor_bfi, layout = "spring", nodeNames = Names, groups = Groups, legend.cex = 0.6, DoNotPlot = TRUE) # Plot partial correlation network: graph_pcor <- qgraph(cor_bfi, graph = "concentration", layout = "spring", n odeNames = Names, groups = Groups, legend.cex = 0.6, DoNotPlot = TRUE) # Plot glasso network: graph_glas <- qgraph(cor_bfi, graph = "glasso", sampleSize = nrow(bfi), lay out = "spring", nodeNames = Names, legend.cex = 0.6, groups = Groups, legend.cex = 0.7, GLratio = 2, DoNotPlot = TRUE) # centrality plot (all graphs): centralityPlot(list(r = graph_cor, `Partial r` = graph_pcor, glasso = graph _glas), labels = Names) + labs(colour = "") + theme_bw() + theme(legend.positio n = "bottom")
  • 21. Example: ### prior & posterior parameters mean.prior <- 75 sd.prior <- 12 mean.posterior <- 73.33644 sd.posterior <- 4.831067 ### plot settings xlim <- c(40, 115) ylim <- c(0, 0.117) lwd <- 2 lwd.points <- 2 lwd.axis <- 1.2 cex.points <- 1.4 cex.axis <- 1.2 cex.text <- 1.1
  • 22. cex.labels <- 1.3 cexLegend <- 1.2 op <- par(mar = c(5.1, 4.1, 4.1, 2.1)) ### create empty canvas plot(1, xlim = xlim, ylim = ylim, axes = FALSE, xlab = "", ylab = "") ### shade prior area < 70 greycol1 <- rgb(0, 0, 0, alpha = 0.2) greycol2 <- rgb(0, 0, 0, alpha = 0.4) polPrior <- seq(xlim[1], 70, length.out = 400) xx <- c(polPrior, polPrior[length(polPrior)], polPrior[1]) yy <- c(dnorm(polPrior, mean.prior, sd.prior), 0, 0) polygon(xx, yy, col = greycol1, border = NA) ### shade posterior area < 70 polPosterior <- seq(xlim[1], 70, length.out = 400) xx <- c(polPosterior, polPosterior[length(polPosterior)], polPosterior[1]) yy <- c(dnorm(polPosterior, mean.posterior, sd.posterior), 0, 0) polygon(xx, yy, col = greycol2, border = NA) ### shade posterior area on interval (81, 84) polPosterior2 <- seq(81, 84, length.out = 400) xx <- c(polPosterior2, polPosterior2[length(polPosterior2)], polPosterior2[ 1]) yy <- c(dnorm(polPosterior2, mean.posterior, sd.posterior), 0, 0) polygon(xx, yy, col = greycol2, border = NA) ### grey dashed lines to prior mean, posterior mean and posterior at 77 lines(rep(mean.prior, 2), c(0, dnorm(mean.prior, mean.prior, sd.prior)), lt y = 2, col = "grey", lwd = lwd) lines(rep(mean.posterior, 2), c(0, dnorm(mean.posterior, mean.posterior, sd .posterior)), lty = 2, col = "grey", lwd = lwd)
  • 23. lines(rep(mean.posterior + (mean.posterior - 70), 2), c(0, dnorm(mean.poste rior + (mean.posterior - 70), mean.posterior, sd.posterior)), lty = 2, col = "grey", lwd = lwd) ### axes axis(1, at = seq(xlim[1], xlim[2], 5), cex.axis = cex.axis, lwd = lwd.axis) axis(2, labels = FALSE, tck = 0, lwd = lwd.axis, line = -0.5) ### axes labels mtext("IQ Bob", side = 1, cex = 1.6, line = 2.4) mtext("Density", side = 2, cex = 1.5, line = 0) ### plot prior and posterior # prior plot(function(x) dnorm(x, mean.prior, sd.prior), xlim = xlim, ylim = ylim, xlab = "", ylab = "", lwd = lwd, lty = 3, add = TRUE) # posterior plot(function(x) dnorm(x, mean.posterior, sd.posterior), xlim = xlim, ylim = ylim, add = TRUE, lwd = lwd) ### add points # posterior density at 70 points(70, dnorm(70, mean.posterior, sd.posterior), pch = 21, bg = "white", cex = cex.points, lwd = lwd.points) # posterior density at 76.67 points(mean.posterior + (mean.posterior - 70), dnorm(mean.posterior + (mean .posterior - 70), mean.posterior, sd.posterior), pch = 21, bg = "white", cex = cex.p oints, lwd = lwd.points) # maximum a posteriori value
  • 24. points(mean.posterior, dnorm(mean.posterior, mean.posterior, sd.posterior), pch = 21, bg = "white", cex = cex.points, lwd = lwd.points) ### credible interval CIlow <- qnorm(0.025, mean.posterior, sd.posterior) CIhigh <- qnorm(0.975, mean.posterior, sd.posterior) yCI <- 0.11 arrows(CIlow, yCI, CIhigh, yCI, angle = 90, code = 3, length = 0.1, lwd = l wd) text(mean.posterior, yCI + 0.0042, labels = "95%", cex = cex.text) text(CIlow, yCI, labels = paste(round(CIlow, 2)), cex = cex.text, pos = 2, offset = 0.3) text(CIhigh, yCI, labels = paste(round(CIhigh, 2)), cex = cex.text, pos = 4 , offset = 0.3) ### legend legendPosition <- 115 legend(legendPosition, ylim[2] + 0.002, legend = c("Posterior", "Prior"), l ty = c(1, 3), bty = "n", lwd = c(lwd, lwd), cex = cexLegend, xjust = 1, yjust = 1 , x.intersp = 0.6, seg.len = 1.2) ### draw labels # A arrows(x0 = 57, x1 = 61, y0 = dnorm(62, mean.prior, sd.prior) + 0.0003, y1 = dnorm(62, mean.prior, sd.prior) - 0.007, length = c(0.08, 0.08), lwd = lwd, code = 2) text(55.94, dnorm(5, mean.prior, sd.prior) + 0.0205, labels = "A", cex = ce x.labels) # B arrows(x0 = 64.5, x1 = 69, y0 = dnorm(68, mean.posterior, sd.posterior) + 0 .003, y1 = dnorm(68, mean.posterior, sd.posterior) - 0.005, length = c(0.08, 0.08), lwd = lw d, code = 2)
  • 25. text(63.5, dnorm(68, mean.posterior, sd.posterior) + 0.0042, labels = "B", cex = cex.labels) # C arrows(x0 = mean.posterior + 1, x1 = mean.posterior + 6, y0 = dnorm(mean.po sterior, mean.posterior, sd.posterior) + 0.001, y1 = dnorm(mean.posterior, mean.posterior, sd.po sterior) + 0.008, length = c(0.08, 0.08), lwd = lwd, code = 1) text(mean.posterior + 7, dnorm(mean.posterior, mean.posterior, sd.posterior ) + 0.0093, labels = "C", cex = cex.labels) # D arrows(x0 = 70 - 0.25, x1 = 70 - 0.25, y0 = dnorm(70, mean.posterior, sd.po sterior) + 0.005, y1 = 0.092, length = c(0.08, 0.08), lwd = lwd, code = 1) lines(c(70 - 0.25, mean.posterior), rep(0.092, 2), lwd = lwd) arrows(x0 = mean.posterior, x1 = mean.posterior, y0 = 0.092, y1 = dnorm(mea n.posterior, mean.posterior, sd.posterior) + 0.003, length = c(0.08, 0.08), lwd = lw d, code = 2) ratio <- dnorm(mean.posterior, mean.posterior, sd.posterior)/dnorm(70, mean .posterior, sd.posterior) text(mean(c(70 - 0.255, mean.posterior)), 0.096, labels = paste(round(ratio , 2), "x"), cex = cex.text) text(70 - 1.5, dnorm(70, mean.posterior, sd.posterior) + 0.02, labels = "D" , cex = cex.labels) # E arrows(x0 = 70 + 1, x1 = mean.posterior + (mean.posterior - 70) - 1, y0 = d norm(70, mean.posterior, sd.posterior), y1 = dnorm(mean.posterior + (mean.posterior - 70), mean. posterior, sd.posterior), length = c(0.08, 0.08), lwd = lwd, code = 3) text(74.9, dnorm(mean.posterior + (mean.posterior - 70), mean.posterior, sd .posterior) - 0.005, labels = "E", cex = cex.labels) # F
  • 26. arrows(x0 = 82.5, x1 = 87, y0 = dnorm(82, mean.posterior, sd.posterior) - 0 .012, y1 = dnorm(82, mean.posterior, sd.posterior) - 0.005, length = c(0.08, 0.08), lwd = lw d, code = 1) text(88, dnorm(82, mean.posterior, sd.posterior) - 0.0034, labels = "F", ce x = cex.labels) # G arrows(x0 = CIhigh + 6, x1 = CIhigh + 8.2, y0 = yCI, y1 = yCI, length = c(0 .08, 0.08), lwd = lwd, code = 1) text(CIhigh + 9.5, yCI, labels = "G", cex = cex.labels) ### additional information scores <- "Bob's IQ scores: {73, 67, 79}" priorText1 <- "Prior distribution:" priorText2 <- expression(paste("IQ Bob ~ N(", 75, ", ", 12^2, ")")) posteriorText1 <- "Posterior distribution:" posteriorText2 <- expression(paste("IQ Bob ~ N(", 73.34, ", ", 4.83^2, ")") ) xx <- 87 yCI2 <- 0.12 text(xx, yCI2 - 0.033, labels = priorText1, cex = cexLegend, pos = 4, offse t = 0.3) text(xx, yCI2 - 0.042, labels = priorText2, cex = cexLegend, pos = 4, offse t = 0.3) text(xx, yCI2 - 0.059, labels = scores, cex = cexLegend, pos = 4, offset = 0.3) text(xx, yCI2 - 0.074, labels = posteriorText1, cex = cexLegend, pos = 4, o ffset = 0.3) text(xx, yCI2 - 0.083, labels = posteriorText2, cex = cexLegend, pos = 4, o ffset = 0.3) par(op)
  • 27. Example: > library(metafor) Zorunlu paket yükleniyor: Matrix Loading 'metafor' package (version 1.9-9). > g <- c(-0.35, -0.67, -0.25, -0.22, -0.22, -0.36, -0.67, -0.25, -0.22, -0. 22, -0.36, -0.22, + -0.67, -0.25, -0.22, -0.36, -0.67, -0.25, -0.22, -0.22, -0.36) > > gSE <- c(0.469041575982343, 0.469041575982343, 0.458257569495584, 0.45825 7569495584, + 0.458257569495584, 0.458257569495584, 0.469041575982343, 0.45825 7569495584, 0.458257569495584, + 0.458257569495584, 0.458257569495584, 0.458257569495584, 0.46904 1575982343, 0.458257569495584, + 0.458257569495584, 0.458257569495584, 0.469041575982343, 0.45825 7569495584, 0.458257569495584, + 0.458257569495584, 0.458257569495584) > > cMeanSmile <- c("3.48", "3.75", "3.48", "3.67", "3.60", "3.58", "3.75", " 3.48", "3.67", + "3.60", "3.58", "3.67", "3.75", "3.48", "3.60", "3.58", " 3.75", "3.48", "3.67", "3.60", + "3.58") > cMeanPout <- c("3.96", "4.81", "3.81", "3.94", "3.88", "4.03", "4.81", "3 .81", "3.94", + "3.88", "4.03", "3.94", "4.81", "3.81", "3.88", "4.03", "4 .81", "3.81", "3.94", "3.88", "4.03")
  • 28. > forest(x = g, sei = gSE, xlab = "Hedges' g", cex.lab = 1.4, ilab = cbind( cMeanSmile, + cMeanPout), ilab.xpos = c(-3.2, -2.5), cex.axis = 1.1, mlab = "Meta-Analyti c Effect:", + lwd = 1.4, rows = 22:2, addfit = FALSE, atransf = FALSE, ylim = c( -2, 25)) There were 50 or more warnings (use warnings() to see the first 50) > > text(-4.05, 24, "Study", cex = 1.3) > text(-3.2, 24, "Smile", cex = 1.3) > text(-2.5, 24, "Pout", cex = 1.3) > text(2.75, 24, "Hedges' g [95% CI]", cex = 1.3) > > abline(h = 1, lwd = 1.4) > addpoly(metaG, atransf = FALSE, row = -1, cex = 1.3, mlab = "Meta-Analyti c Effect Size:") Reference: http://shinyapps.org/apps/RGraphCompendium/index.php Example: > library(tidyr) > library(plotly) > s <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/s chool_earnings.csv") > s <- s[order(s$Men), ] > gather(s, Sex, value, Women, Men) %>% + plot_ly(x = value, y = School, mode = "markers", + color = Sex, colors = c("pink", "blue")) %>% + add_trace(x = value, y = School, mode = "lines",
  • 29. + group = School, showlegend = F, line = list(color = "gray") ) %>% + layout( + title = "Gender earnings disparity", + xaxis = list(title = "Annual Salary (in thousands)"), + margin = list(l = 65) + )
  • 30. Example: > library(lattice) > library(ggplot2) > quakes$Magnitude <- equal.count(quakes$mag, 4) > pl <- cloud(depth ~ lat * long | Magnitude, data = quakes, + zlim = rev(range(quakes$depth)), screen = list(z = 105,x = -70), panel.as pect = 0.75, xlab = "Longitude",ylab = "Latitude", zlab = "Depth") > print(pl) > pl Example: kx <- function(u, v) cos(u) * (r + cos(u/2) * sin(t *v) - sin(u/2) * sin(2 * t * v)) ky <- function(u, v) sin(u) * (r + cos(u/2) * sin(t *v) - sin(u/2) * sin(2 * t * v)) kz <- function(u, v) sin(u/2) * sin(t * v) + cos(u/2) *sin(t * v) n <- 50 u <- seq(0.3, 1.25, length = n) * 2 * pi v <- seq(0, 1, length = n) * 2 * pi um <- matrix(u, length(u), length(u))
  • 31. vm <- matrix(v, length(v), length(v), byrow = TRUE) r <- 2 t <- 1 pl <- wireframe(kz(um, vm) ~ kx(um, vm) + ky(um, vm),shade = TRUE,screen = list(z = 170, x = - 60), alpha = 0.75,panel.aspect = 0.6, aspect = c(1, 0.4)) print(pl)