Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Type: Package
Package: CASE
Title: Cell-type-specific And Shared EQTL fine-mapping
Version: 0.2.0
Version: 0.2.1
Authors@R: c(
person("Chen", "Lin", , "[email protected]", role = c("aut", "cre"),
comment = c(ORCID = "0000-0001-9821-2578")),
Expand All @@ -16,7 +16,10 @@ Imports:
magrittr,
MASS,
mvtnorm,
stats
stats,
CorBin,
mvsusieR,
susieR
Encoding: UTF-8
RoxygenNote: 7.3.1
Suggests:
Expand Down
7 changes: 7 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,21 @@ export(CASE)
export(CASE_test)
export(CASE_train)
export(get_credible_sets)
importFrom(CorBin,cBern)
importFrom(MASS,ginv)
importFrom(magrittr,"%>%")
importFrom(mvsusieR,create_mixture_prior)
importFrom(mvsusieR,mvsusie_rss)
importFrom(mvtnorm,rmvnorm)
importFrom(stats,cor)
importFrom(stats,cov2cor)
importFrom(stats,lm)
importFrom(stats,p.adjust)
importFrom(stats,pnorm)
importFrom(stats,qchisq)
importFrom(stats,qnorm)
importFrom(stats,quantile)
importFrom(stats,rmultinom)
importFrom(stats,rnorm)
importFrom(stats,sd)
importFrom(susieR,susie_rss)
8 changes: 4 additions & 4 deletions R/CASE.R
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#' @title CASE Model
#' @title CASE
#'
#' @description Perform Multi-trait Fine-mapping
#' @author Chen Lin, Hongyu Zhao
Expand Down Expand Up @@ -77,10 +77,10 @@ CASE <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, cs = TRUE,
hatS = hatBS$hatS

# V = estimate_null_correlation_simple(mash_set_data(do.call(rbind, raw.data$hatB), do.call(rbind, raw.data$hatS)))

m1 <- CASE_train(hatB = hatB, hatS = hatS, V = V, R = R, N = N, Z = NULL, ...)

m1 <- CASE_train(hatB = hatB, hatS = hatS, V = V, R = R, N = N, Z = NULL, list(...))

res <- CASE_test(hatB = hatB, hatS = hatS, R = R, N = N, CASE_training = m1, Z = NULL, list(...))
res <- CASE_test(hatB = hatB, hatS = hatS, R = R, N = N, CASE_training = m1, Z = NULL, ...)
t2 = Sys.time()
res$time = difftime(t2, t1, units = "secs")
if (cs){
Expand Down
29 changes: 13 additions & 16 deletions R/CASE_models.R
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
args = list(...)

cat("Start Prior fitting.", "\n")

if (is.null(Z)){
Z = hatB / hatS
}
Expand Down Expand Up @@ -64,8 +66,6 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
pi = args$pi.init
U = args$U.orig
} else{
#print(hatB)
#print(hatS)
init = Initialize_pi_U(hatB, hatS, C, M = nrow(R))
pi = init$pi
U = init$U
Expand All @@ -89,15 +89,16 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
pi.in = M1 / M0
M <- nrow(R)
if (M1 == 0){
cat("No marginally significant variants.")
cat("No marginally significant variants in the inputs.", "\n")
return(list(pi = 1, U = list(matrix(0, C, C)), V = V, n.iter = 0))
}

J <- 0
g <- list()
patterns.old = names(U)
repeated_pattern = 0

alpha = ifelse("alpha" %in% names(args), args$alpha, 0.05)

# MCEM steps
for (kk in 1:n.iter){
# cat("\n iter:", kk, " ")
Expand All @@ -114,7 +115,7 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
while (nsim < MC.sim[kk]){
BB[, , nsim + 1] = gBupdate(B = BB[, , nsim], hatB = hatB,
R = R, pi = pi, h = args$h,
TT = gBc$TT, TT_det = gBc$TT_det, mu1 = gBc$mu1, Sigma1 = gBc$Sigma1, alpha = args$alpha)
TT = gBc$TT, TT_det = gBc$TT_det, mu1 = gBc$mu1, Sigma1 = gBc$Sigma1, alpha = alpha)

nsim = nsim + 1
gg[, nsim] = ifelse(as.matrix(BB[, , nsim]) != 0, 1, 0) %>% apply(1, paste, collapse = "")
Expand All @@ -134,7 +135,7 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
L = length(patterns)

if (L <= 1){
cat("Estimated no eQTL effects in the CASE prior fitting step.")
cat("Estimates no eQTL effects in the CASE prior fitting step.", "\n")
return(list(pi = pi, U = U, V = V, n.iter = kk, pi.in = pi.in, M1 = M1))
}

Expand Down Expand Up @@ -205,13 +206,12 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
ll = which(pi < 1e-2 / M * pi.in)
}
if (length(ll) > 0){
# cat("pi delete", ll, "\n")
U = U[-ll]
pi = pi[-ll]
}

if (length(pi) <= 1){
cat("Estimated no eQTL effects in the CASE prior fitting step.")
cat("Estimated no eQTL effects in the CASE prior fitting step.", "\n")
return(list(pi = pi, U = U, V = V, n.iter = kk, pi.in = pi.in, M1 = M1))
}

Expand All @@ -222,7 +222,7 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
ll = integer(0)
dd = 0
for (l in 1:(L-1)){
ind = which(diag(U[[l]]) <= qchisq(0.95, 1) * diag(V))
ind = which(diag(U[[l]]) <= qchisq(1 - alpha, 1) * diag(V))
U[[l]][ind, ] = 0
U[[l]][, ind] = 0
lp = 0
Expand All @@ -240,19 +240,16 @@ CASE_train <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, V = NULL, ...){
}

if (length(ll) > 0){
# cat("U delete", ll, "\n")
U = U[-ll]
pi = pi[-ll]
}

L = length(U)
if (L <= 1){
cat("Estimated no eQTL effects in the CASE prior fitting step.")
cat("Estimated no eQTL effects in the CASE prior fitting step.", "\n")
return(list(pi = pi, U = U, V = V, n.iter = kk, pi.in = pi.in, M1 = M1))
}

# cat("\n", names(pi), "\n")
# print(pi)
if (setequal(patterns.old, names(pi))){
repeated_pattern = repeated_pattern + 1
} else{
Expand Down Expand Up @@ -295,8 +292,8 @@ CASE_test <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, CASE_training, .
# Here V is V adjusted for sample sizes
#### Testing ####
args = list(...)
cat("Start Posterior Analysis.")
cat("Start Posterior Analysis.", "\n")

U = CASE_training$U
V = CASE_training$V
pi = CASE_training$pi
Expand Down Expand Up @@ -371,7 +368,7 @@ CASE_test <- function(Z = NULL, R, hatB = NULL, hatS = NULL, N, CASE_training, .
#' @importFrom magrittr %>%
#' @export
get_credible_sets <- function(pvalues, R, cor.min = 0.5, pip = 0.95, ruled_out = 1e-4){
cat("Start getting credible sets.")
cat("Start getting credible sets.", "\n")

pvalues = as.matrix(pvalues)
C = ncol(pvalues)
Expand Down
2 changes: 1 addition & 1 deletion R/CASE_uitility.R
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ Initialize_pi_U <- function(hatB, hatS, C, M, sig_threshold = 0.1){

U = list()
for (l in patterns){
U[[l]] = matrix(.1, C, C)
U[[l]] = matrix(.2, C, C)
diag(U[[l]]) = 1
idx = which(as.numeric(strsplit(l, "")[[1]]) == 0)
U[[l]][idx, ] = 0
Expand Down
52 changes: 52 additions & 0 deletions R/Example.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#' @importFrom mvsusieR mvsusie_rss create_mixture_prior
#' @importFrom susieR susie_rss
#' @importFrom CorBin cBern
#' @importFrom stats cor lm rnorm
generate_example_data <- function(){
set.seed(1130)
N = 500
M = 1000
C = 3

X = cbind(cBern(N, rep(0.3, M/4), rho = 0.95, type = "DCP"),
cBern(N, rep(0.3, M/2), rho = 0.85, type = "DCP"),
cBern(N, rep(0.3, M/4), rho = 0.98, type = "DCP"))

B = matrix(0, M, C)
idx1 = 10
idx2 = 950
B[idx1, ] = c(sqrt(0.3), sqrt(0.2), sqrt(0.1))
B[idx2, ] = c(sqrt(0.2), sqrt(0.2), 0)
e = matrix(rnorm(N * C), N, C)
Y = X %*% B + e


### heritability
# apply(X %*% B, 2, var) / apply(Y, 2, var)

R = cor(X)

Z = matrix(0, M, C)
for (i in 1:M){
m1 = summary(lm(Y ~ X[, i]))
Z[i, ] = sapply(m1, function(x) x$coefficients[2, 3])
}
Z[c(idx1, idx2), ]
B[c(idx1, idx2), ]

fit <- CASE(Z = Z, R = R, N = rep(N, C))
# fit$sets

f1 = susie_rss(z = Z[, 1], R = R, n = N)
# f1$sets

prior <- create_mixture_prior(R = C)
f2 = mvsusie_rss(Z = Z, R = R, N = N, prior_variance = prior)
# f2$sets
# f2$single_effect_lfsr

# mvsusie_plot(f2)
}



2 changes: 1 addition & 1 deletion man/CASE.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading