Title: | Sparse Group Penalized Regression for Bi-Level Variable Selection |
---|---|
Description: | Fits the regularization path of regression models (linear and logistic) with additively combined penalty terms. All possible combinations with Least Absolute Shrinkage and Selection Operator (LASSO), Smoothly Clipped Absolute Deviation (SCAD), Minimax Concave Penalty (MCP) and Exponential Penalty (EP) are supported. This includes Sparse Group LASSO (SGL), Sparse Group SCAD (SGS), Sparse Group MCP (SGM) and Sparse Group EP (SGE). For more information, see Buch, G., Schulz, A., Schmidtmann, I., Strauch, K., & Wild, P. S. (2024) <doi:10.1002/bimj.202200334>. |
Authors: | Gregor Buch [aut, cre, cph], Andreas Schulz [ths], Irene Schmidtmann [ths], Konstantin Strauch [ths], Philipp Wild [ths] |
Maintainer: | Gregor Buch <[email protected]> |
License: | GPL (>= 3) |
Version: | 0.1.2 |
Built: | 2025-02-12 05:41:05 UTC |
Source: | https://github.com/cran/SGPR |
A function that extracts the estimated coefficients from an SGP object.
## S3 method for class 'sgp' coef(object, lambda, index = 1:length(object$lambda), drop = TRUE, ...)
## S3 method for class 'sgp' coef(object, lambda, index = 1:length(object$lambda), drop = TRUE, ...)
object |
A object that was generated with sgp. |
lambda |
The value of lambda at which the coefficients are to be extracted. |
index |
The index that indicates the lambda at which the coefficients are to be extracted (alternative to specifying 'lambda'). |
drop |
A Boolean value that specifies whether empty dimensions should be removed. |
... |
Other parameters of underlying basic functions. |
A vector or matrix with the estimated coefficients.
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp(X, y_lin, g, type = "linear") coef(lin_fit, index = 5:7) log_fit <- sgp(X, y_log, g, type = "logit") coef(log_fit, index = 5:7)
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp(X, y_lin, g, type = "linear") coef(lin_fit, index = 5:7) log_fit <- sgp(X, y_log, g, type = "logit") coef(log_fit, index = 5:7)
A function that extracts the estimated coefficients from an cross-validated SGP object.
## S3 method for class 'sgp.cv' coef(object, lambda = object$lambda.min, index = object$min, ...)
## S3 method for class 'sgp.cv' coef(object, lambda = object$lambda.min, index = object$min, ...)
object |
A object that was generated with sgp.cv. |
lambda |
The value of lambda at which the coefficients are to be extracted. |
index |
The index that indicates the lambda at which the coefficients are to be extracted (alternative to specifying 'lambda'). |
... |
Other parameters of underlying basic functions. |
A vector or matrix with the estimated coefficients.
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp.cv(X, y_lin, g, type = "linear") coef(lin_fit) log_fit <- sgp.cv(X, y_log, g, type = "logit") coef(log_fit)
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp.cv(X, y_lin, g, type = "linear") coef(lin_fit) log_fit <- sgp.cv(X, y_log, g, type = "logit") coef(log_fit)
A function that calculates the loss/cost
get.loss(y, pred, type)
get.loss(y, pred, type)
y |
The response vector. |
pred |
The predicted values for the response. |
type |
A string indicating the type of regression model (linear or binomial). |
The loss of the input vectors.
Produces a coefficient profile plot of the coefficient paths for a fitted SGP object
## S3 method for class 'sgp' plot(x, alpha = 1, legend.pos, label = FALSE, log.l = FALSE, norm = FALSE, ...)
## S3 method for class 'sgp' plot(x, alpha = 1, legend.pos, label = FALSE, log.l = FALSE, norm = FALSE, ...)
x |
A object that was generated with sgp. |
alpha |
Tuning parameter for the alpha-blending. |
legend.pos |
Coordinates or keyword for positioning the legend. |
label |
A Boolean value that specifies whether the plot should be annotated. |
log.l |
A Boolean value that specifies whether the horizontal axis should be on the log scale. |
norm |
A Boolean value that specifies whether the norm of each group should be plotted. |
... |
Other parameters of underlying basic functions. |
A plot object with the coefficient path of an SGP.
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp(X, y_lin, g, type = "linear") plot(lin_fit, legend.pos = "topright", label = TRUE) plot(lin_fit, label = TRUE, norm = TRUE) log_fit <- sgp(X, y_log, g, type = "logit") plot(log_fit, legend.pos = "topright", label = TRUE) plot(log_fit, label = TRUE, norm = TRUE)
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp(X, y_lin, g, type = "linear") plot(lin_fit, legend.pos = "topright", label = TRUE) plot(lin_fit, label = TRUE, norm = TRUE) log_fit <- sgp(X, y_log, g, type = "logit") plot(log_fit, legend.pos = "topright", label = TRUE) plot(log_fit, label = TRUE, norm = TRUE)
Plots the cross-validation curve as a function of the lambda values used.
## S3 method for class 'sgp.cv' plot(x, log.l = TRUE, highlight = TRUE, col = "firebrick3", ...)
## S3 method for class 'sgp.cv' plot(x, log.l = TRUE, highlight = TRUE, col = "firebrick3", ...)
x |
A object that was generated with sgp.cv. |
log.l |
A Boolean value that specifies whether the horizontal axis should be on the log scale. |
highlight |
A Boolean value that specifies whether a vertical line should be added at the value where the cross-validation error is minimized. |
col |
Controls the color of the dots. |
... |
Other parameters of underlying basic functions. |
A plot object with the cross-validation curve of an SGP.
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp.cv(X, y_lin, g, type = "linear") plot(lin_fit, col = "blue") log_fit <- sgp.cv(X, y_log, g, type = "logit") plot(log_fit, col = "blue")
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp.cv(X, y_lin, g, type = "linear") plot(lin_fit, col = "blue") log_fit <- sgp.cv(X, y_log, g, type = "logit") plot(log_fit, col = "blue")
A function that extracts information from a SGP object and performs predictions.
## S3 method for class 'sgp' predict( object, X = NULL, extract = c("link", "response", "class", "coef", "vars", "groups", "nvars", "ngroups", "norm"), lambda, index = 1:length(object$lambda), ... )
## S3 method for class 'sgp' predict( object, X = NULL, extract = c("link", "response", "class", "coef", "vars", "groups", "nvars", "ngroups", "norm"), lambda, index = 1:length(object$lambda), ... )
object |
A object that was generated with sgp. |
X |
The design matrix for making predictions. |
extract |
A string indicating the type of information to return. |
lambda |
The value of lambda at which predictions should be made. |
index |
The index that indicates the lambda at which predictions should be made (alternative to specifying 'lambda'). |
... |
Other parameters of underlying basic functions. |
Different objects depending on the sting indicated by 'extract'.
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp(X, y_lin, g, type = "linear") predict(lin_fit, X = X, extract = "nvars") log_fit <- sgp(X, y_log, g, type = "logit") predict(log_fit, X = X, extract = "nvars")
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp(X, y_lin, g, type = "linear") predict(lin_fit, X = X, extract = "nvars") log_fit <- sgp(X, y_log, g, type = "logit") predict(log_fit, X = X, extract = "nvars")
A function that extracts information from a cross-validated SGP object and performs predictions.
## S3 method for class 'sgp.cv' predict( object, X, lambda = object$lambda.min, index = object$min, extract = c("link", "response", "class", "coefficients", "vars", "groups", "nvars", "ngroups", "norm"), ... )
## S3 method for class 'sgp.cv' predict( object, X, lambda = object$lambda.min, index = object$min, extract = c("link", "response", "class", "coefficients", "vars", "groups", "nvars", "ngroups", "norm"), ... )
object |
A object that was generated with sgp.cv. |
X |
The design matrix for making predictions. |
lambda |
The value of lambda at which predictions should be made. |
index |
The index that indicates the lambda at which predictions should be made (alternative to specifying 'lambda'). |
extract |
A string indicating the type of information to return. |
... |
Other parameters of underlying basic functions. |
Different objects depending on the sting indicated by 'extract'.
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp.cv(X, y_lin, g, type = "linear") predict(lin_fit, X = X, extract = "link") log_fit <- sgp.cv(X, y_log, g, type = "logit") predict(log_fit, X = X, extract = "class")
n <- 100 p <- 12 nr <- 4 g <- paste0("Group ",ceiling(1:p / nr)) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) lin_fit <- sgp.cv(X, y_lin, g, type = "linear") predict(lin_fit, X = X, extract = "link") log_fit <- sgp.cv(X, y_log, g, type = "logit") predict(log_fit, X = X, extract = "class")
A function that checks the group information for possible errors and processes it.
process.group(group, group.weight)
process.group(group, group.weight)
group |
A vector that specifies the group membership of each variable in X. |
group.weight |
A vector specifying weights that are multiplied by the group penalty to account for different group sizes. |
A structure containing the prepared group structure and, as an attribute, its labels and group weights.
A function that sets up a lambda sequence for a sparse group penalty.
process.lambda( X, y, group, Z, type, alpha, lambda.min, log.lambda, nlambda, group.weight, ada_mult )
process.lambda( X, y, group, Z, type, alpha, lambda.min, log.lambda, nlambda, group.weight, ada_mult )
X |
The design matrix without intercept with the variables to be selected. |
y |
The response vector. |
group |
A vector indicating the group membership of each variable in X. |
Z |
The design matrix of the variables to be included in the model without penalization. |
type |
A string indicating the type of regression model (linear or binomial). |
alpha |
Tuning parameter for the mixture of penalties at group and variable level. A value of 0 results in a selection at group level, a value of 1 results in a selection at variable level and everything in between is bi-level selection. |
lambda.min |
An integer multiplied by the maximum lambda to define the end of the lambda sequence. |
log.lambda |
A Boolean value that specifies whether the values of the lambda sequence should be on the log scale. |
nlambda |
An integer that specifies the length of the lambda sequence. |
group.weight |
A vector specifying weights that are multiplied by the group penalty to account for different group sizes. |
ada_mult |
An integer that defines the multiplier for adjusting the convergence threshold. |
A vector with values for lambda.
A function that checks arguments about the penalty and translates them to integer (for the C++ code).
process.penalty(penalty, pvar, pgr, vargamma, grgamma, vartau, grtau, alpha)
process.penalty(penalty, pvar, pgr, vargamma, grgamma, vartau, grtau, alpha)
penalty |
A string that specifies the sparse group penalty to be used. |
pvar |
A string that specifies the penalty used at the variable level. |
pgr |
A string that specifies the penalty used at the group level. |
vargamma |
An integer that defines the value of gamma for the penalty at the variable level. |
grgamma |
An integer that specifies the value of gamma for the penalty at the group level. |
vartau |
An integer that defines the value of tau for the penalty at the variable level. |
grtau |
An integer that specifies the value of tau for the penalty at the group level. |
alpha |
Tuning parameter for the mixture of penalties at group and variable level. A value of 0 results in a selection at group level, a value of 1 results in a selection at variable level and everything in between is bi-level selection. |
A list of two integers indicating the penalty for the C++ code.
A function that checks the design matrix X for possible errors and scales it.
process.X(X, group)
process.X(X, group)
X |
The design matrix without intercept with the variables to be selected. |
group |
A vector that specifies the group membership of each variable in X. |
A list containing:
The standardized design matrix X.
The variable names of the matrix.
The center of the variables before the transformation.
The scale of the variables before the transformation.
A function that checks the response vector y for possible errors.
process.y(y, type)
process.y(y, type)
y |
The response vector. |
type |
A string indicating the type of regression model (linear or binomial). |
The verified response vector y.
A function that checks the design matrix Z for possible errors and scales it.
process.Z(Z)
process.Z(Z)
Z |
The design matrix of the variables to be included in the model without penalization. |
A list containing:
The standardized design matrix Z.
The variable names of the matrix.
The center of the variables before the transformation.
The scale of the variables before the transformation.
A function that determines the regularization paths for models with sparse group penalties at a grid of values for the regularization parameter lambda.
sgp( X, y, group = 1:ncol(X), penalty = c("sgl", "sgs", "sgm", "sge"), alpha = 1/3, type = c("linear", "logit"), Z = NULL, nlambda = 100, lambda.min = { if (nrow(X) > ncol(X)) 1e-04 else 0.05 }, log.lambda = TRUE, lambdas, prec = 1e-04, ada_mult = 2, max.iter = 10000, standardize = TRUE, vargamma = ifelse(pvar == "scad" | penalty == "sgs", 4, 3), grgamma = ifelse(pgr == "scad" | penalty == "sgs", 4, 3), vartau = 1, grtau = 1, pvar = c("lasso", "scad", "mcp", "exp"), pgr = c("lasso", "scad", "mcp", "exp"), group.weight = rep(1, length(unique(group))), returnX = FALSE, ... )
sgp( X, y, group = 1:ncol(X), penalty = c("sgl", "sgs", "sgm", "sge"), alpha = 1/3, type = c("linear", "logit"), Z = NULL, nlambda = 100, lambda.min = { if (nrow(X) > ncol(X)) 1e-04 else 0.05 }, log.lambda = TRUE, lambdas, prec = 1e-04, ada_mult = 2, max.iter = 10000, standardize = TRUE, vargamma = ifelse(pvar == "scad" | penalty == "sgs", 4, 3), grgamma = ifelse(pgr == "scad" | penalty == "sgs", 4, 3), vartau = 1, grtau = 1, pvar = c("lasso", "scad", "mcp", "exp"), pgr = c("lasso", "scad", "mcp", "exp"), group.weight = rep(1, length(unique(group))), returnX = FALSE, ... )
X |
The design matrix without intercept with the variables to be selected. |
y |
The response vector. |
group |
A vector indicating the group membership of each variable in X. |
penalty |
A string that specifies the sparse group penalty to be used. |
alpha |
Tuning parameter for the mixture of penalties at group and variable level. A value of 0 results in a selection at group level, a value of 1 results in a selection at variable level and everything in between is bi-level selection. |
type |
A string indicating the type of regression model (linear or binomial). |
Z |
The design matrix of the variables to be included in the model without penalization. |
nlambda |
An integer that specifies the length of the lambda sequence. |
lambda.min |
An integer multiplied by the maximum lambda to define the end of the lambda sequence. |
log.lambda |
A Boolean value that specifies whether the values of the lambda sequence should be on the log scale. |
lambdas |
A user supplied vector with values for lambda. |
prec |
The convergence threshold for the algorithm. |
ada_mult |
An integer that defines the multiplier for adjusting the convergence threshold. |
max.iter |
The convergence threshold for the algorithm. |
standardize |
An integer that defines the multiplier for adjusting the convergence threshold. |
vargamma |
An integer that defines the value of gamma for the penalty at the variable level. |
grgamma |
An integer that specifies the value of gamma for the penalty at the group level. |
vartau |
An integer that defines the value of tau for the penalty at the variable level. |
grtau |
An integer that specifies the value of tau for the penalty at the group level. |
pvar |
A string that specifies the penalty used at the variable level. |
pgr |
A string that specifies the penalty used at the group level. |
group.weight |
A vector specifying weights that are multiplied by the group penalty to account for different group sizes. |
returnX |
A Boolean value that specifies whether standardized design matrix should be returned. |
... |
Other parameters of underlying basic functions. |
Two options are available for choosing a penalty. With the argument penalty
,
the methods Sparse Group LASSO, Sparse Group SCAD, Sparse Group MCP and Sparse Group EP
can be selected with the abbreviations sgl
, sgs
, sgm
and sge
.
Alternatively, penalties can be combined additively with the arguments pvar
and pgr
, where pvar
is the penalty applied at the variable level and
pgr
is the penalty applied at the group level. The options are lasso
,
scad
, mcp
and exp
for Least Absolute Shrinkage and Selection Operator,
Smoothly Clipped Absolute Deviation, Minimax Concave Penalty and Exponential Penalty.
A list containing:
A vector with estimated coefficients.
A string indicating the type of regression model (linear or binomial).
A vector indicating the group membership of the individual variables in X.
The sequence of lambda values.
Tuning parameter for the mixture of penalties at group and variable level.
A vector containing either the residual sum of squares (linear) or the negative log-likelihood (binomial).
The convergence threshold used for each lambda.
Number of observations.
A string indicating the sparse group penalty used.
A vector of pseudo degrees of freedom for each lambda.
A vector of the number of iterations for each lambda.
A vector of weights multiplied by the group penalty.
The response vector.
The design matrix without intercept.
Buch, G., Schulz, A., Schmidtmann, I., Strauch, K., and Wild, P. S. (2024) Sparse Group Penalties for bi-level variable selection. Biometrical Journal, 66, 2200334. doi:10.1002/bimj.202200334
Simon, N., Friedman, J., Hastie, T., and Tibshirani, R. (2011) A Sparse-Group Lasso. Journal of computational and graphical statistics, 22(2), 231-245. doi:10.1080/10618600.2012.681250
Breheny, P., and Huang J. (2009) Penalized methods for bi-level variable selection. Statistics and its interface, 2: 369-380. doi:10.4310/sii.2009.v2.n3.a10
# Generate data n <- 100 p <- 200 nr <- 10 g <- ceiling(1:p / nr) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) # Linear regression lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sgl") plot(lin_fit) lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sgs") plot(lin_fit) lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sgm") plot(lin_fit) lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sge") plot(lin_fit) # Logistic regression log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sgl") plot(log_fit) log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sgs") plot(log_fit) log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sgm") plot(log_fit) log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sge") plot(log_fit)
# Generate data n <- 100 p <- 200 nr <- 10 g <- ceiling(1:p / nr) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) # Linear regression lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sgl") plot(lin_fit) lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sgs") plot(lin_fit) lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sgm") plot(lin_fit) lin_fit <- sgp(X, y_lin, g, type = "linear", penalty = "sge") plot(lin_fit) # Logistic regression log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sgl") plot(log_fit) log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sgs") plot(log_fit) log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sgm") plot(log_fit) log_fit <- sgp(X, y_log, g, type = "logit", penalty = "sge") plot(log_fit)
A function that performs k-fold cross-validation for sparse group penalties for a lambda sequence.
sgp.cv( X, y, group = 1:ncol(X), Z = NULL, ..., nfolds = 10, seed, fold, type, returnY = FALSE, print.trace = FALSE )
sgp.cv( X, y, group = 1:ncol(X), Z = NULL, ..., nfolds = 10, seed, fold, type, returnY = FALSE, print.trace = FALSE )
X |
The design matrix without intercept with the variables to be selected. |
y |
The response vector. |
group |
A vector indicating the group membership of each variable in X. |
Z |
The design matrix of the variables to be included in the model without penalization. |
... |
Other parameters of underlying basic functions. |
nfolds |
The number of folds for cross-validation. |
seed |
A seed provided by the user for the random number generator. |
fold |
A vector of folds specified by the user (default is a random assignment). |
type |
A string indicating the type of regression model (linear or binomial). |
returnY |
A Boolean value indicating whether the fitted values should be returned. |
print.trace |
A Boolean value that specifies whether the beginning of a fold should be printed. |
A list containing:
The average cross-validation error for each value of lambda.
The estimated standard error for each value of cve.
The sequence of lambda values.
The sparse group penalty model fitted to the entire data.
The fold assignments for each observation for the cross-validation procedure.
The index of lambda corresponding to the minimum cross-validation error.
The value of lambda with the minimum cross-validation error.
The deviance for the empty model.
The cross-validation prediction error for each value of lambda (for binomial only).
The fitted values from the cross-validation folds.
# Generate data n <- 100 p <- 200 nr <- 10 g <- ceiling(1:p / nr) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) # Linear regression lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sgl") plot(lin_fit) predict(lin_fit, extract = "vars") lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sgs") plot(lin_fit) predict(lin_fit, extract = "vars") lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sgm") plot(lin_fit) predict(lin_fit, extract = "vars") lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sge") plot(lin_fit) predict(lin_fit, extract = "vars") # Logistic regression log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sgl") plot(log_fit) predict(log_fit, extract = "vars") log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sgs") plot(log_fit) predict(log_fit, extract = "vars") log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sgm") plot(log_fit) predict(log_fit, extract = "vars") log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sge") plot(log_fit) predict(log_fit, extract = "vars")
# Generate data n <- 100 p <- 200 nr <- 10 g <- ceiling(1:p / nr) X <- matrix(rnorm(n * p), n, p) b <- c(-3:3) y_lin <- X[, 1:length(b)] %*% b + 5 * rnorm(n) y_log <- rbinom(n, 1, exp(y_lin) / (1 + exp(y_lin))) # Linear regression lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sgl") plot(lin_fit) predict(lin_fit, extract = "vars") lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sgs") plot(lin_fit) predict(lin_fit, extract = "vars") lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sgm") plot(lin_fit) predict(lin_fit, extract = "vars") lin_fit <- sgp.cv(X, y_lin, g, type = "linear", penalty = "sge") plot(lin_fit) predict(lin_fit, extract = "vars") # Logistic regression log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sgl") plot(log_fit) predict(log_fit, extract = "vars") log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sgs") plot(log_fit) predict(log_fit, extract = "vars") log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sgm") plot(log_fit) predict(log_fit, extract = "vars") log_fit <- sgp.cv(X, y_log, g, type = "logit", penalty = "sge") plot(log_fit) predict(log_fit, extract = "vars")