suppressMessages({ library("ggplot2") library("plotly") library("htmlwidgets") library("htmltools") library("dplyr") library("rlang") library("ggthemes") library("data.table") library("grid") library("gridExtra") library("future") library("furrr") library("purrr") }) # These parallelization libraries are very noisy suppressPackageStartupMessages({ library("future") library("furrr") library("purrr") }) # Turn all warnings into errors for development options(warn = 2) parse_arguments <- function() { args <- if (interactive()) { c( "/home/bryan/documents/develop/hartmanlab/qhtcp-workflow/out/20240116_jhartman2_DoxoHLD/20240116_jhartman2_DoxoHLD", "/home/bryan/documents/develop/hartmanlab/qhtcp-workflow/apps/r/SGD_features.tab", "/home/bryan/documents/develop/hartmanlab/qhtcp-workflow/out/20240116_jhartman2_DoxoHLD/easy/20240116_jhartman2_DoxoHLD/results_std.txt", "/home/bryan/documents/develop/hartmanlab/qhtcp-workflow/out/20240116_jhartman2_DoxoHLD/20240822_jhartman2_DoxoHLD/exp1", "Experiment 1: Doxo versus HLD", 3, "/home/bryan/documents/develop/hartmanlab/qhtcp-workflow/out/20240116_jhartman2_DoxoHLD/20240822_jhartman2_DoxoHLD/exp2", "Experiment 2: HLD versus Doxo", 3 ) } else { commandArgs(trailingOnly = TRUE) } out_dir <- normalizePath(args[1], mustWork = FALSE) sgd_gene_list <- normalizePath(args[2], mustWork = FALSE) easy_results_file <- normalizePath(args[3], mustWork = FALSE) # The remaining arguments should be in groups of 3 exp_args <- args[-(1:3)] if (length(exp_args) %% 3 != 0) { stop("Experiment arguments should be in groups of 3: path, name, sd.") } # Extract the experiments into a list experiments <- list() for (i in seq(1, length(exp_args), by = 3)) { exp_name <- exp_args[i + 1] experiments[[exp_name]] <- list( path = normalizePath(exp_args[i], mustWork = FALSE), sd = as.numeric(exp_args[i + 2]) ) } # Extract the trailing number from each path trailing_numbers <- sapply(experiments, function(x) { path <- x$path nums <- gsub("[^0-9]", "", basename(path)) as.integer(nums) }) # Sort the experiments based on the trailing numbers sorted_experiments <- experiments[order(trailing_numbers)] list( out_dir = out_dir, sgd_gene_list = sgd_gene_list, easy_results_file = easy_results_file, experiments = sorted_experiments ) } args <- parse_arguments() # Should we keep output in exp dirs or combine in the study output dir? # dir.create(file.path(args$out_dir, "zscores"), showWarnings = FALSE) # dir.create(file.path(args$out_dir, "zscores", "qc"), showWarnings = FALSE) theme_publication <- function(base_size = 14, base_family = "sans", legend_position = NULL) { # Ensure that legend_position has a valid value or default to "none" legend_position <- if (is.null(legend_position) || length(legend_position) == 0) "none" else legend_position theme_foundation <- ggthemes::theme_foundation(base_size = base_size, base_family = base_family) theme_foundation %+replace% theme( plot.title = element_text(face = "bold", size = rel(1.6), hjust = 0.5), text = element_text(), panel.background = element_blank(), plot.background = element_blank(), panel.border = element_blank(), axis.title = element_text(face = "bold", size = rel(1.4)), axis.title.y = element_text(angle = 90, vjust = 2), axis.text = element_text(size = rel(1.2)), axis.line = element_line(colour = "black"), panel.grid.major = element_line(colour = "#f0f0f0"), panel.grid.minor = element_blank(), legend.key = element_rect(colour = NA), legend.position = legend_position, legend.direction = if (legend_position == "right") { "vertical" } else if (legend_position == "bottom") { "horizontal" } else { NULL # No legend direction if position is "none" or other values }, legend.spacing = unit(0, "cm"), legend.title = element_text(face = "italic", size = rel(1.3)), legend.text = element_text(size = rel(1.2)), plot.margin = unit(c(10, 5, 5, 5), "mm") ) } scale_fill_publication <- function(...) { discrete_scale("fill", "Publication", manual_pal(values = c( "#386cb0", "#fdb462", "#7fc97f", "#ef3b2c", "#662506", "#a6cee3", "#fb9a99", "#984ea3", "#ffff33" )), ...) } scale_colour_publication <- function(...) { discrete_scale("colour", "Publication", manual_pal(values = c( "#386cb0", "#fdb462", "#7fc97f", "#ef3b2c", "#662506", "#a6cee3", "#fb9a99", "#984ea3", "#ffff33" )), ...) } # Load the initial dataframe from the easy_results_file load_and_filter_data <- function(easy_results_file, sd = 3) { df <- read.delim(easy_results_file, skip = 2, as.is = TRUE, row.names = 1, strip.white = TRUE) df <- df %>% filter(!(.[[1]] %in% c("", "Scan"))) %>% filter(!is.na(ORF) & ORF != "" & !Gene %in% c("BLANK", "Blank", "blank") & Drug != "BMH21") %>% # Rename columns rename(L = l, num = Num., AUC = AUC96, scan = Scan, last_bg = LstBackgrd, first_bg = X1stBackgrd) %>% mutate( across(c(Col, Row, num, L, K, r, scan, AUC, last_bg, first_bg), as.numeric), delta_bg = last_bg - first_bg, delta_bg_tolerance = mean(delta_bg, na.rm = TRUE) + (sd * sd(delta_bg, na.rm = TRUE)), NG = if_else(L == 0 & !is.na(L), 1, 0), DB = if_else(delta_bg >= delta_bg_tolerance, 1, 0), SM = 0, OrfRep = if_else(ORF == "YDL227C", "YDL227C", OrfRep), # should these be hardcoded? conc_num = as.numeric(gsub("[^0-9\\.]", "", Conc)), conc_num_factor = as.numeric(as.factor(conc_num)) - 1, # for legacy purposes conc_num_factor_factor = as.factor(conc_num) ) return(df) } update_gene_names <- function(df, sgd_gene_list) { genes <- read.delim(file = sgd_gene_list, quote = "", header = FALSE, colClasses = c(rep("NULL", 3), rep("character", 2), rep("NULL", 11))) gene_map <- setNames(genes$V5, genes$V4) # ORF to GeneName mapping df <- df %>% mutate( mapped_genes = gene_map[ORF], Gene = if_else(is.na(mapped_genes) | OrfRep == "YDL227C", Gene, mapped_genes), Gene = if_else(Gene == "" | Gene == "OCT1", OrfRep, Gene) # Handle invalid names ) return(df) } calculate_summary_stats <- function(df, variables, group_vars) { summary_stats <- df %>% group_by(across(all_of(group_vars))) %>% summarise( N = n(), across(all_of(variables), list( mean = ~ mean(.x, na.rm = TRUE), median = ~ median(.x, na.rm = TRUE), max = ~ ifelse(all(is.na(.x)), NA, max(.x, na.rm = TRUE)), min = ~ ifelse(all(is.na(.x)), NA, min(.x, na.rm = TRUE)), sd = ~ sd(.x, na.rm = TRUE), se = ~ sd(.x, na.rm = TRUE) / sqrt(n() - 1) ), .names = "{.fn}_{.col}" ), .groups = "drop" ) # Create a cleaned version of df that doesn't overlap with summary_stats df_cleaned <- df %>% select(-any_of(setdiff(intersect(names(df), names(summary_stats)), group_vars))) df_joined <- left_join(df_cleaned, summary_stats, by = group_vars) return(list(summary_stats = summary_stats, df_with_stats = df_joined)) } calculate_interaction_scores <- function(df, df_bg, type, overlap_threshold = 2) { max_conc <- max(as.numeric(df$conc_num_factor), na.rm = TRUE) total_conc_num <- length(unique(df$conc_num)) if (type == "reference") { bg_group_vars <- c("OrfRep", "Gene", "num", "Drug", "conc_num", "conc_num_factor", "conc_num_factor_factor") group_vars <- c("OrfRep", "Gene", "num", "Drug") } else if (type == "deletion") { bg_group_vars <- c("Drug", "conc_num", "conc_num_factor", "conc_num_factor_factor") group_vars <- c("OrfRep", "Gene", "Drug") } perform_lm <- function(x, y, max_conc) { if (all(is.na(x)) || all(is.na(y)) || length(x[!is.na(x)]) == 0 || length(y[!is.na(y)]) == 0) { return(list(intercept = NA, slope = NA, r_squared = NA, score = NA)) } else { fit <- lm(y ~ x) return(list( intercept = coef(fit)[1], slope = coef(fit)[2], r_squared = summary(fit)$r.squared, score = max_conc * coef(fit)[2] + coef(fit)[1] )) } } # Calculate WT statistics from df_bg wt_stats <- df_bg %>% group_by(across(all_of(bg_group_vars))) %>% summarise( WT_L = mean(mean_L, na.rm = TRUE), WT_sd_L = mean(sd_L, na.rm = TRUE), WT_K = mean(mean_K, na.rm = TRUE), WT_sd_K = mean(sd_K, na.rm = TRUE), WT_r = mean(mean_r, na.rm = TRUE), WT_sd_r = mean(sd_r, na.rm = TRUE), WT_AUC = mean(mean_AUC, na.rm = TRUE), WT_sd_AUC = mean(sd_AUC, na.rm = TRUE), .groups = "drop" ) # Join WT statistics to df df <- df %>% left_join(wt_stats, by = c(bg_group_vars)) # Compute mean values at zero concentration mean_zeroes <- df %>% filter(conc_num == 0) %>% group_by(across(all_of(group_vars))) %>% summarise( mean_L_zero = mean(mean_L, na.rm = TRUE), mean_K_zero = mean(mean_K, na.rm = TRUE), mean_r_zero = mean(mean_r, na.rm = TRUE), mean_AUC_zero = mean(mean_AUC, na.rm = TRUE), .groups = "drop" ) df <- df %>% left_join(mean_zeroes, by = c(group_vars)) # Calculate Raw Shifts and Z Shifts df <- df %>% mutate( Raw_Shift_L = mean_L_zero - WT_L, Raw_Shift_K = mean_K_zero - WT_K, Raw_Shift_r = mean_r_zero - WT_r, Raw_Shift_AUC = mean_AUC_zero - WT_AUC, Z_Shift_L = Raw_Shift_L / WT_sd_L, Z_Shift_K = Raw_Shift_K / WT_sd_K, Z_Shift_r = Raw_Shift_r / WT_sd_r, Z_Shift_AUC = Raw_Shift_AUC / WT_sd_AUC ) calculations <- df %>% group_by(across(all_of(c(group_vars, "conc_num", "conc_num_factor", "conc_num_factor_factor")))) %>% mutate( NG_sum = sum(NG, na.rm = TRUE), DB_sum = sum(DB, na.rm = TRUE), SM_sum = sum(SM, na.rm = TRUE), # Expected values Exp_L = WT_L + Raw_Shift_L, Exp_K = WT_K + Raw_Shift_K, Exp_r = WT_r + Raw_Shift_r, Exp_AUC = WT_AUC + Raw_Shift_AUC, # Deltas Delta_L = mean_L - Exp_L, Delta_K = mean_K - Exp_K, Delta_r = mean_r - Exp_r, Delta_AUC = mean_AUC - Exp_AUC, # Adjust deltas for NG and SM Delta_L = if_else(NG == 1, mean_L - WT_L, Delta_L), Delta_K = if_else(NG == 1, mean_K - WT_K, Delta_K), Delta_r = if_else(NG == 1, mean_r - WT_r, Delta_r), Delta_AUC = if_else(NG == 1, mean_AUC - WT_AUC, Delta_AUC), Delta_L = if_else(SM == 1, mean_L - WT_L, Delta_L), # Calculate Z-scores Zscore_L = Delta_L / WT_sd_L, Zscore_K = Delta_K / WT_sd_K, Zscore_r = Delta_r / WT_sd_r, Zscore_AUC = Delta_AUC / WT_sd_AUC ) %>% ungroup() calculations <- calculations %>% group_by(across(all_of(group_vars))) %>% mutate( # Apply the simple LM function for each variable lm_L = list(perform_lm(Delta_L, conc_num_factor, max_conc)), lm_K = list(perform_lm(Delta_K, conc_num_factor, max_conc)), lm_r = list(perform_lm(Delta_r, conc_num_factor, max_conc)), lm_AUC = list(perform_lm(Delta_AUC, conc_num_factor, max_conc)), # Extract coefficients and statistics for each model lm_intercept_L = lm_L[[1]]$intercept, lm_slope_L = lm_L[[1]]$slope, R_Squared_L = lm_L[[1]]$r_squared, lm_Score_L = lm_L[[1]]$score, lm_intercept_K = lm_K[[1]]$intercept, lm_slope_K = lm_K[[1]]$slope, R_Squared_K = lm_K[[1]]$r_squared, lm_Score_K = lm_K[[1]]$score, lm_intercept_r = lm_r[[1]]$intercept, lm_slope_r = lm_r[[1]]$slope, R_Squared_r = lm_r[[1]]$r_squared, lm_Score_r = lm_r[[1]]$score, lm_intercept_AUC = lm_AUC[[1]]$intercept, lm_slope_AUC = lm_AUC[[1]]$slope, R_Squared_AUC = lm_AUC[[1]]$r_squared, lm_Score_AUC = lm_AUC[[1]]$score ) %>% select(-lm_L, -lm_K, -lm_r, -lm_AUC) %>% ungroup() # For interaction plot error bars delta_means_sds <- calculations %>% group_by(across(all_of(group_vars))) %>% summarise( mean_Delta_L = mean(Delta_L, na.rm = TRUE), mean_Delta_K = mean(Delta_K, na.rm = TRUE), mean_Delta_r = mean(Delta_r, na.rm = TRUE), mean_Delta_AUC = mean(Delta_AUC, na.rm = TRUE), sd_Delta_L = sd(Delta_L, na.rm = TRUE), sd_Delta_K = sd(Delta_K, na.rm = TRUE), sd_Delta_r = sd(Delta_r, na.rm = TRUE), sd_Delta_AUC = sd(Delta_AUC, na.rm = TRUE), .groups = "drop" ) calculations <- calculations %>% left_join(delta_means_sds, by = group_vars) # Summary statistics for lm scores calculations <- calculations %>% mutate( lm_mean_L = mean(lm_Score_L, na.rm = TRUE), lm_sd_L = sd(lm_Score_L, na.rm = TRUE), lm_mean_K = mean(lm_Score_K, na.rm = TRUE), lm_sd_K = sd(lm_Score_K, na.rm = TRUE), lm_mean_r = mean(lm_Score_r, na.rm = TRUE), lm_sd_r = sd(lm_Score_r, na.rm = TRUE), lm_mean_AUC = mean(lm_Score_AUC, na.rm = TRUE), lm_sd_AUC = sd(lm_Score_AUC, na.rm = TRUE), Z_lm_L = (lm_Score_L - lm_mean_L) / lm_sd_L, Z_lm_K = (lm_Score_K - lm_mean_K) / lm_sd_K, Z_lm_r = (lm_Score_r - lm_mean_r) / lm_sd_r, Z_lm_AUC = (lm_Score_AUC - lm_mean_AUC) / lm_sd_AUC ) # Build summary stats (interactions) interactions <- calculations %>% group_by(across(all_of(group_vars))) %>% summarise( num_non_removed_concs = total_conc_num - sum(DB, na.rm = TRUE) - 1, Sum_Z_Score_L = sum(Zscore_L, na.rm = TRUE), Sum_Z_Score_K = sum(Zscore_K, na.rm = TRUE), Sum_Z_Score_r = sum(Zscore_r, na.rm = TRUE), Sum_Z_Score_AUC = sum(Zscore_AUC, na.rm = TRUE), Avg_Zscore_L = Sum_Z_Score_L / first(num_non_removed_concs), Avg_Zscore_K = Sum_Z_Score_K / first(num_non_removed_concs), Avg_Zscore_r = Sum_Z_Score_r / first(num_non_removed_concs), Avg_Zscore_AUC = Sum_Z_Score_AUC / first(num_non_removed_concs), # R_Squared R_Squared_L = first(R_Squared_L), R_Squared_K = first(R_Squared_K), R_Squared_r = first(R_Squared_r), R_Squared_AUC = first(R_Squared_AUC), # Interaction Z-scores Z_lm_L = first(Z_lm_L), Z_lm_K = first(Z_lm_K), Z_lm_r = first(Z_lm_r), Z_lm_AUC = first(Z_lm_AUC), # Raw Shifts Raw_Shift_L = first(Raw_Shift_L), Raw_Shift_K = first(Raw_Shift_K), Raw_Shift_r = first(Raw_Shift_r), Raw_Shift_AUC = first(Raw_Shift_AUC), # Z Shifts Z_Shift_L = first(Z_Shift_L), Z_Shift_K = first(Z_Shift_K), Z_Shift_r = first(Z_Shift_r), Z_Shift_AUC = first(Z_Shift_AUC), # Gene-Gene Interaction lm_Score_L = first(lm_Score_L), lm_Score_K = first(lm_Score_K), lm_Score_r = first(lm_Score_r), lm_Score_AUC = first(lm_Score_AUC), # NG, DB, SM values NG_sum_int = sum(NG), DB_sum_int = sum(DB), SM_sum_int = sum(SM), .groups = "drop" ) %>% arrange(desc(Z_lm_L), desc(NG_sum_int)) # Deletion data ranking and linear modeling if (type == "deletion") { interactions <- interactions %>% mutate( Avg_Zscore_L_adjusted = ifelse(is.na(Avg_Zscore_L), 0.001, Avg_Zscore_L), Avg_Zscore_K_adjusted = ifelse(is.na(Avg_Zscore_K), 0.001, Avg_Zscore_K), Avg_Zscore_r_adjusted = ifelse(is.na(Avg_Zscore_r), 0.001, Avg_Zscore_r), Avg_Zscore_AUC_adjusted = ifelse(is.na(Avg_Zscore_AUC), 0.001, Avg_Zscore_AUC), Z_lm_L_adjusted = ifelse(is.na(Z_lm_L), 0.001, Z_lm_L), Z_lm_K_adjusted = ifelse(is.na(Z_lm_K), 0.001, Z_lm_K), Z_lm_r_adjusted = ifelse(is.na(Z_lm_r), 0.001, Z_lm_r), Z_lm_AUC_adjusted = ifelse(is.na(Z_lm_AUC), 0.001, Z_lm_AUC) ) %>% mutate( Rank_L = rank(Avg_Zscore_L_adjusted), Rank_K = rank(Avg_Zscore_K_adjusted), Rank_r = rank(Avg_Zscore_r_adjusted), Rank_AUC = rank(Avg_Zscore_AUC_adjusted), Rank_lm_L = rank(Z_lm_L_adjusted), Rank_lm_K = rank(Z_lm_K_adjusted), Rank_lm_r = rank(Z_lm_r_adjusted), Rank_lm_AUC = rank(Z_lm_AUC_adjusted) ) %>% mutate( lm_R_squared_rank_L = summary(lm(Rank_lm_L ~ Rank_L, data = .))$r.squared, lm_R_squared_rank_K = summary(lm(Rank_lm_K ~ Rank_K, data = .))$r.squared, lm_R_squared_rank_r = summary(lm(Rank_lm_r ~ Rank_r, data = .))$r.squared, lm_R_squared_rank_AUC = summary(lm(Rank_lm_AUC ~ Rank_AUC, data = .))$r.squared ) # Add overlap threshold categories based on Z-lm and Avg-Z scores interactions <- interactions %>% filter(!is.na(Z_lm_L) | !is.na(Avg_Zscore_L)) %>% mutate( Overlap = case_when( Z_lm_L >= overlap_threshold & Avg_Zscore_L >= overlap_threshold ~ "Deletion Enhancer Both", Z_lm_L <= -overlap_threshold & Avg_Zscore_L <= -overlap_threshold ~ "Deletion Suppressor Both", Z_lm_L >= overlap_threshold & Avg_Zscore_L <= overlap_threshold ~ "Deletion Enhancer lm only", Z_lm_L <= overlap_threshold & Avg_Zscore_L >= overlap_threshold ~ "Deletion Enhancer Avg Zscore only", Z_lm_L <= -overlap_threshold & Avg_Zscore_L >= -overlap_threshold ~ "Deletion Suppressor lm only", Z_lm_L >= -overlap_threshold & Avg_Zscore_L <= -overlap_threshold ~ "Deletion Suppressor Avg Zscore only", Z_lm_L >= overlap_threshold & Avg_Zscore_L <= -overlap_threshold ~ "Deletion Enhancer lm, Deletion Suppressor Avg Zscore", Z_lm_L <= -overlap_threshold & Avg_Zscore_L >= overlap_threshold ~ "Deletion Suppressor lm, Deletion Enhancer Avg Zscore", TRUE ~ "No Effect" ), # Apply the perform_lm function for each variable pair lm_L = list(perform_lm(Z_lm_L, Avg_Zscore_L, max_conc)), lm_K = list(perform_lm(Z_lm_K, Avg_Zscore_K, max_conc)), lm_r = list(perform_lm(Z_lm_r, Avg_Zscore_r, max_conc)), lm_AUC = list(perform_lm(Z_lm_AUC, Avg_Zscore_AUC, max_conc)), # Correlation models for various pairs Z_lm_K_L = list(perform_lm(Z_lm_K, Z_lm_L, max_conc)), Z_lm_r_L = list(perform_lm(Z_lm_r, Z_lm_L, max_conc)), Z_lm_AUC_L = list(perform_lm(Z_lm_AUC, Z_lm_L, max_conc)), Z_lm_r_K = list(perform_lm(Z_lm_r, Z_lm_K, max_conc)), Z_lm_AUC_K = list(perform_lm(Z_lm_AUC, Z_lm_K, max_conc)), Z_lm_AUC_r = list(perform_lm(Z_lm_AUC, Z_lm_r, max_conc)), # Extract coefficients and statistics for each model lm_rank_intercept_L = lm_L[[1]]$intercept, lm_rank_slope_L = lm_L[[1]]$slope, R_Squared_L = lm_L[[1]]$r_squared, lm_Score_L = lm_L[[1]]$score, lm_intercept_K = lm_K[[1]]$intercept, lm_slope_K = lm_K[[1]]$slope, R_Squared_K = lm_K[[1]]$r_squared, lm_Score_K = lm_K[[1]]$score, lm_intercept_r = lm_r[[1]]$intercept, lm_slope_r = lm_r[[1]]$slope, R_Squared_r = lm_r[[1]]$r_squared, lm_Score_r = lm_r[[1]]$score, lm_intercept_AUC = lm_AUC[[1]]$intercept, lm_slope_AUC = lm_AUC[[1]]$slope, R_Squared_AUC = lm_AUC[[1]]$r_squared, lm_Score_AUC = lm_AUC[[1]]$score, Z_lm_intercept_K_L = Z_lm_K_L[[1]]$intercept, Z_lm_slope_K_L = Z_lm_K_L[[1]]$slope, Z_lm_R_squared_K_L = Z_lm_K_L[[1]]$r_squared, Z_lm_Score_K_L = Z_lm_K_L[[1]]$score, Z_lm_intercept_r_L = Z_lm_r_L[[1]]$intercept, Z_lm_slope_r_L = Z_lm_r_L[[1]]$slope, Z_lm_R_squared_r_L = Z_lm_r_L[[1]]$r_squared, Z_lm_Score_r_L = Z_lm_r_L[[1]]$score, Z_lm_intercept_AUC_L = Z_lm_AUC_L[[1]]$intercept, Z_lm_slope_AUC_L = Z_lm_AUC_L[[1]]$slope, Z_lm_R_squared_AUC_L = Z_lm_AUC_L[[1]]$r_squared, Z_lm_Score_AUC_L = Z_lm_AUC_L[[1]]$score, Z_lm_intercept_r_K = Z_lm_r_K[[1]]$intercept, Z_lm_slope_r_K = Z_lm_r_K[[1]]$slope, Z_lm_R_squared_r_K = Z_lm_r_K[[1]]$r_squared, Z_lm_Score_r_K = Z_lm_r_K[[1]]$score, Z_lm_intercept_AUC_K = Z_lm_AUC_K[[1]]$intercept, Z_lm_slope_AUC_K = Z_lm_AUC_K[[1]]$slope, Z_lm_R_squared_AUC_K = Z_lm_AUC_K[[1]]$r_squared, Z_lm_Score_AUC_K = Z_lm_AUC_K[[1]]$score, Z_lm_intercept_AUC_r = Z_lm_AUC_r[[1]]$intercept, Z_lm_slope_AUC_r = Z_lm_AUC_r[[1]]$slope, Z_lm_R_squared_AUC_r = Z_lm_AUC_r[[1]]$r_squared, Z_lm_Score_AUC_r = Z_lm_AUC_r[[1]]$score ) %>% select( -lm_L, -lm_K, -lm_r, -lm_AUC, -Z_lm_K_L, -Z_lm_r_L, -Z_lm_AUC_L, -Z_lm_r_K, -Z_lm_AUC_K, -Z_lm_AUC_r) } # end deletion-specific block # Create the final calculations and interactions dataframes with only required columns for csv output df_calculations <- calculations %>% select( all_of(group_vars), conc_num, conc_num_factor, conc_num_factor_factor, N, mean_L, median_L, sd_L, se_L, mean_K, median_K, sd_K, se_K, mean_r, median_r, sd_r, se_r, mean_AUC, median_AUC, sd_AUC, se_AUC, Raw_Shift_L, Raw_Shift_K, Raw_Shift_r, Raw_Shift_AUC, Z_Shift_L, Z_Shift_K, Z_Shift_r, Z_Shift_AUC, WT_L, WT_K, WT_r, WT_AUC, WT_sd_L, WT_sd_K, WT_sd_r, WT_sd_AUC, Exp_L, Exp_K, Exp_r, Exp_AUC, Delta_L, Delta_K, Delta_r, Delta_AUC, mean_Delta_L, mean_Delta_K, mean_Delta_r, mean_Delta_AUC, Zscore_L, Zscore_K, Zscore_r, Zscore_AUC, NG_sum, DB_sum, SM_sum ) %>% rename(NG = NG_sum, DB = DB_sum, SM = SM_sum) df_interactions <- interactions %>% select( any_of(c( group_vars, "Avg_Zscore_L", "Avg_Zscore_K", "Avg_Zscore_r", "Avg_Zscore_AUC", "Sum_Z_Score_L", "Sum_Z_Score_K", "Sum_Z_Score_r", "Sum_Z_Score_AUC", "Z_lm_L", "Z_lm_K", "Z_lm_r", "Z_lm_AUC", "Raw_Shift_L", "Raw_Shift_K", "Raw_Shift_r", "Raw_Shift_AUC", "Z_Shift_L", "Z_Shift_K", "Z_Shift_r", "Z_Shift_AUC", "lm_Score_L", "lm_Score_K", "lm_Score_r", "lm_Score_AUC", "R_Squared_L", "R_Squared_K", "R_Squared_r", "R_Squared_AUC", "NG_sum_int", "DB_sum_int", "SM_sum_int", "Z_lm_intercept_L", "Z_lm_slope_L", "Z_lm_R_squared_L", "Z_lm_Score_L", "Z_lm_intercept_K", "Z_lm_slope_K", "Z_lm_R_squared_K", "Z_lm_Score_K", "Z_lm_intercept_r", "Z_lm_slope_r", "Z_lm_R_squared_r", "Z_lm_Score_r", "Z_lm_intercept_AUC", "Z_lm_slope_AUC", "Z_lm_R_squared_AUC", "Z_lm_Score_AUC", "Z_lm_intercept_K_L", "Z_lm_slope_K_L", "Z_lm_R_squared_K_L", "Z_lm_Score_K_L", "Z_lm_intercept_r_L", "Z_lm_slope_r_L", "Z_lm_R_squared_r_L", "Z_lm_Score_r_L", "Z_lm_intercept_AUC_L", "Z_lm_slope_AUC_L", "Z_lm_R_squared_AUC_L", "Z_lm_Score_AUC_L", "Z_lm_intercept_r_K", "Z_lm_slope_r_K", "Z_lm_R_squared_r_K", "Z_lm_Score_r_K", "Z_lm_intercept_AUC_K", "Z_lm_slope_AUC_K", "Z_lm_R_squared_AUC_K", "Z_lm_Score_AUC_K", "Z_lm_intercept_AUC_r", "Z_lm_slope_AUC_r", "Z_lm_R_squared_AUC_r", "Z_lm_Score_AUC_r" )) ) %>% rename(NG = NG_sum_int, DB = DB_sum_int, SM = SM_sum_int) # Avoid column collision on left join for overlapping variables calculations_no_overlap <- calculations %>% select(-any_of(c("DB", "NG", "SM", # Don't need these anywhere so easier to remove "Raw_Shift_L", "Raw_Shift_K", "Raw_Shift_r", "Raw_Shift_AUC", "R_Squared_L", "R_Squared_K", "R_Squared_r", "R_Squared_AUC", # we need these for the interactions but the original code has the same names in both datasets "Z_Shift_L", "Z_Shift_K", "Z_Shift_r", "Z_Shift_AUC" ))) full_data <- calculations_no_overlap %>% left_join(interactions, by = group_vars) # Return final dataframes return(list( calculations = df_calculations, interactions = df_interactions, full_data = full_data )) } generate_and_save_plots <- function(out_dir, filename, plot_configs, page_width = 12, page_height = 8) { message("Generating ", filename, ".pdf and ", filename, ".html") plot_groups <- if ("plots" %in% names(plot_configs)) { list(plot_configs) # Single group } else { plot_configs # Multiple groups } pdf(file.path(out_dir, paste0(filename, ".pdf")), width = page_width, height = page_height) for (group in plot_groups) { static_plots <- list() plotly_plots <- list() for (i in seq_along(group$plots)) { config <- group$plots[[i]] df <- config$df # Filter NAs if (!is.null(config$filter_na) && config$filter_na) { df <- df %>% filter(!is.na(!!sym(config$y_var))) } # TODO for now skip all NA plots NA data # Eventually add to own or filter_na block so we can handle selectively if (nrow(df) == 0) { message("Insufficient data for plot:", config$title) next # skip plot if insufficient data is available } # Create initial aes mappings for all plot types aes_mapping <- aes(x = !!sym(config$x_var)) # required if (!is.null(config$y_var)) { aes_mapping <- modifyList(aes_mapping, aes(y = !!sym(config$y_var))) # optional for density/bar plots } if (!is.null(config$color_var)) { aes_mapping <- modifyList(aes_mapping, aes(color = !!sym(config$color_var))) # dynamic color_var } else if (!is.null(config$color)) { aes_mapping <- modifyList(aes_mapping, aes(color = config$color)) # static color } if (config$plot_type == "bar" && !is.null(config$color_var)) { aes_mapping <- modifyList(aes_mapping, aes(fill = !!sym(config$color_var))) # only fill bar plots } # Begin plot generation plot <- ggplot(df, aes_mapping) + theme_publication(legend_position = config$legend_position) plot <- switch(config$plot_type, "scatter" = generate_scatter_plot(plot, config), "box" = generate_boxplot(plot, config), "density" = plot + geom_density(), "bar" = plot + geom_bar(), plot # default ) if (!is.null(config$title)) { plot <- plot + ggtitle(config$title) if (!is.null(config$title_size)) { plot <- plot + theme(plot.title = element_text(size = config$title_size)) } } if (!is.null(config$x_label)) plot <- plot + xlab(config$x_label) if (!is.null(config$y_label)) plot <- plot + ylab(config$y_label) if (!is.null(config$coord_cartesian)) plot <- plot + coord_cartesian(ylim = config$coord_cartesian) #plotly_plot <- suppressWarnings(plotly::ggplotly(plot)) static_plots[[i]] <- plot #plotly_plots[[i]] <- plotly_plot } grid_layout <- group$grid_layout if (!is.null(grid_layout)) { if (is.null(grid_layout$ncol)) { grid_layout$ncol <- 1 } if (!is.null(grid_layout$ncol) && is.null(grid_layout$nrow)) { num_plots <- length(static_plots) grid_layout$nrow <- ceiling(num_plots / grid_layout$ncol) } # total_spots <- grid_layout$nrow * grid_layout$ncol # num_plots <- length(static_plots) # if (num_plots < total_spots) { # message("Filling ", total_spots - num_plots, " empty spots with nullGrob()") # static_plots <- c(static_plots, replicate(total_spots - num_plots, nullGrob(), simplify = FALSE)) # } grid.arrange( grobs = static_plots, ncol = grid_layout$ncol, nrow = grid_layout$nrow ) } else { for (plot in static_plots) { print(plot) } } } dev.off() # out_html_file <- file.path(out_dir, paste0(filename, ".html")) # message("Saving combined HTML file: ", out_html_file) # htmltools::save_html( # htmltools::tagList(plotly_plots), # file = out_html_file # ) } generate_scatter_plot <- function(plot, config) { # Define the points shape <- if (!is.null(config$shape)) config$shape else 3 size <- if (!is.null(config$size)) config$size else 1.5 position <- if (!is.null(config$position) && config$position == "jitter") { position_jitter(width = 0.4, height = 0.1) } else { "identity" } plot <- plot + geom_point( shape = shape, size = size, position = position ) # Add a cyan point for the reference data for correlation plots if (!is.null(config$cyan_points) && config$cyan_points) { plot <- plot + geom_point( aes(x = !!sym(config$x_var), y = !!sym(config$y_var)), data = config$df_reference, color = "cyan", shape = 3, size = 0.5 ) } # Add error bars if specified if (!is.null(config$error_bar) && config$error_bar) { # Check if custom columns are provided for y_mean and y_sd, or use the defaults y_mean_col <- if (!is.null(config$error_bar_params$y_mean_col)) { config$error_bar_params$y_mean_col } else { paste0("mean_", config$y_var) } y_sd_col <- if (!is.null(config$error_bar_params$y_sd_col)) { config$error_bar_params$y_sd_col } else { paste0("sd_", config$y_var) } # Use rlang to handle custom error bar calculations if (!is.null(config$error_bar_params$custom_error_bar)) { custom_ymin_expr <- rlang::parse_expr(config$error_bar_params$custom_error_bar$ymin) custom_ymax_expr <- rlang::parse_expr(config$error_bar_params$custom_error_bar$ymax) plot <- plot + geom_errorbar( aes( ymin = !!custom_ymin_expr, ymax = !!custom_ymax_expr ), color = config$error_bar_params$color, linewidth = ifelse(is.null(config$error_bar_params$linewidth), 0.1, config$error_bar_params$linewidth) ) } else { # If no custom error bar formula, use the default or dynamic ones if (!is.null(config$color_var) && config$color_var %in% colnames(config$df)) { # Only use color_var if it's present in the dataframe plot <- plot + geom_errorbar( aes( ymin = !!sym(y_mean_col) - !!sym(y_sd_col), ymax = !!sym(y_mean_col) + !!sym(y_sd_col), color = !!sym(config$color_var) ), linewidth = 0.1 ) } else { # If color_var is missing, fall back to a default color or none plot <- plot + geom_errorbar( aes( ymin = !!sym(y_mean_col) - !!sym(y_sd_col), ymax = !!sym(y_mean_col) + !!sym(y_sd_col) ), color = config$error_bar_params$color, # use the provided color or default linewidth = ifelse(is.null(config$error_bar_params$linewidth), 0.1, config$error_bar_params$linewidth) ) } } # Add the center point if the option is provided if (!is.null(config$error_bar_params$mean_point) && config$error_bar_params$mean_point) { if (!is.null(config$error_bar_params$color)) { plot <- plot + geom_point( aes(x = !!sym(config$x_var), y = !!sym(y_mean_col)), color = config$error_bar_params$color, shape = 16 ) } else { plot <- plot + geom_point( aes(x = !!sym(config$x_var), y = !!sym(y_mean_col)), shape = 16 ) } } } # Add linear regression line if specified if (!is.null(config$lm_line)) { # Extract necessary values intercept <- config$lm_line$intercept # required slope <- config$lm_line$slope # required xmin <- ifelse(!is.null(config$lm_line$xmin), config$lm_line$xmin, min(as.numeric(config$df[[config$x_var]]))) xmax <- ifelse(!is.null(config$lm_line$xmax), config$lm_line$xmax, max(as.numeric(config$df[[config$x_var]]))) color <- ifelse(!is.null(config$lm_line$color), config$lm_line$color, "blue") linewidth <- ifelse(!is.null(config$lm_line$linewidth), config$lm_line$linewidth, 1) ymin <- intercept + slope * xmin ymax <- intercept + slope * xmax # Ensure y-values are within y-limits (if any) if (!is.null(config$ylim_vals)) { ymin_within_limits <- ymin >= config$ylim_vals[1] && ymin <= config$ylim_vals[2] ymax_within_limits <- ymax >= config$ylim_vals[1] && ymax <= config$ylim_vals[2] # Adjust or skip based on whether the values fall within limits if (ymin_within_limits && ymax_within_limits) { plot <- plot + annotate( "segment", x = xmin, xend = xmax, y = ymin, yend = ymax, color = color, linewidth = linewidth, ) } else { message("Skipping linear regression line due to y-values outside of limits") } } else { # If no y-limits are provided, proceed with the annotation plot <- plot + annotate( "segment", x = xmin, xend = xmax, y = ymin, yend = ymax, color = color, linewidth = linewidth ) } } # Add SD Bands if specified if (!is.null(config$sd_band)) { plot <- plot + annotate( "rect", xmin = -Inf, xmax = Inf, ymin = config$sd_band, ymax = Inf, fill = ifelse(!is.null(config$fill_positive), config$fill_positive, "#542788"), alpha = ifelse(!is.null(config$alpha_positive), config$alpha_positive, 0.3) ) + annotate( "rect", xmin = -Inf, xmax = Inf, ymin = -config$sd_band, ymax = -Inf, fill = ifelse(!is.null(config$fill_negative), config$fill_negative, "orange"), alpha = ifelse(!is.null(config$alpha_negative), config$alpha_negative, 0.3) ) + geom_hline( yintercept = c(-config$sd_band, config$sd_band), color = ifelse(!is.null(config$hl_color), config$hl_color, "black") ) } # # Add rectangles if specified # if (!is.null(config$rectangles)) { # for (rect in config$rectangles) { # plot <- plot + annotate( # "rect", # xmin = rect$xmin, # xmax = rect$xmax, # ymin = rect$ymin, # ymax = rect$ymax, # fill = ifelse(is.null(rect$fill), NA, rect$fill), # color = ifelse(is.null(rect$color), "black", rect$color), # alpha = ifelse(is.null(rect$alpha), 0.1, rect$alpha) # ) # } # } # Customize X-axis if specified if (!is.null(config$x_breaks) && !is.null(config$x_labels) && !is.null(config$x_label)) { # Check if x_var is factor or character (for discrete x-axis) if (is.factor(plot$data[[config$x_var]]) || is.character(plot$data[[config$x_var]])) { plot <- plot + scale_x_discrete( name = config$x_label, breaks = config$x_breaks, labels = config$x_labels ) } else { plot <- plot + scale_x_continuous( name = config$x_label, breaks = config$x_breaks, labels = config$x_labels ) } } # Set Y-axis limits if specified if (!is.null(config$ylim_vals)) { plot <- plot + scale_y_continuous(limits = config$ylim_vals) } return(plot) } generate_boxplot <- function(plot, config) { # Convert x_var to a factor within aes mapping plot <- plot + geom_boxplot(aes(x = factor(!!sym(config$x_var)))) # Customize X-axis if specified if (!is.null(config$x_breaks) && !is.null(config$x_labels) && !is.null(config$x_label)) { # Check if x_var is factor or character (for discrete x-axis) if (is.factor(plot$data[[config$x_var]]) || is.character(plot$data[[config$x_var]])) { plot <- plot + scale_x_discrete( name = config$x_label, breaks = config$x_breaks, labels = config$x_labels ) } else { plot <- plot + scale_x_continuous( name = config$x_label, breaks = config$x_breaks, labels = config$x_labels ) } } return(plot) } generate_plate_analysis_plot_configs <- function(variables, df_before = NULL, df_after = NULL, plot_type = "scatter", stages = c("before", "after")) { plot_configs <- list() for (var in variables) { for (stage in stages) { df_plot <- if (stage == "before") df_before else df_after # Check for non-finite values in the y-variable # df_plot_filtered <- df_plot %>% filter(is.finite(!!sym(var))) # Adjust settings based on plot_type plot_config <- list( df = df_plot, x_var = "scan", y_var = var, plot_type = plot_type, title = paste("Plate analysis by Drug Conc for", var, stage, "quality control"), color_var = "conc_num_factor_factor", size = 0.2, error_bar = (plot_type == "scatter"), legend_position = "bottom", filter_na = TRUE ) # Add config to plots list plot_configs <- append(plot_configs, list(plot_config)) } } return(list(plots = plot_configs)) } generate_interaction_plot_configs <- function(df_summary, df_interactions, type) { # Define the y-limits for the plots limits_map <- list( L = c(0, 130), K = c(-20, 160), r = c(0, 1), AUC = c(0, 12500) ) stats_plot_configs <- list() stats_boxplot_configs <- list() delta_plot_configs <- list() # Overall statistics plots OrfRep <- first(df_summary$OrfRep) # this should correspond to the reference strain for (plot_type in c("scatter", "box")) { for (var in names(limits_map)) { y_limits <- limits_map[[var]] y_span <- y_limits[2] - y_limits[1] # Common plot configuration plot_config <- list( df = df_summary, plot_type = plot_type, x_var = "conc_num_factor_factor", y_var = var, shape = 16, x_label = paste0("[", unique(df_summary$Drug)[1], "]"), coord_cartesian = y_limits, x_breaks = unique(df_summary$conc_num_factor_factor), x_labels = as.character(unique(df_summary$conc_num)) ) # Add specific configurations for scatter and box plots if (plot_type == "scatter") { plot_config$title <- sprintf("%s Scatter RF for %s with SD", OrfRep, var) plot_config$error_bar <- TRUE plot_config$error_bar_params <- list( color = "red", mean_point = TRUE, y_mean_col = paste0("mean_mean_", var), y_sd_col = paste0("mean_sd_", var) ) plot_config$position <- "jitter" annotations <- list( list(x = 0.25, y = y_limits[1] + 0.08 * y_span, label = " NG =", size = 4), list(x = 0.25, y = y_limits[1] + 0.04 * y_span, label = " DB =", size = 4), list(x = 0.25, y = y_limits[1], label = " SM =", size = 4) ) for (x_val in unique(df_summary$conc_num_factor_factor)) { current_df <- df_summary %>% filter(!!sym(plot_config$x_var) == x_val) annotations <- append(annotations, list( list(x = x_val, y = y_limits[1] + 0.08 * y_span, label = first(current_df$NG, default = 0), size = 4), list(x = x_val, y = y_limits[1] + 0.04 * y_span, label = first(current_df$DB, default = 0), size = 4), list(x = x_val, y = y_limits[1], label = first(current_df$SM, default = 0), size = 4) )) } plot_config$annotations <- annotations stats_plot_configs <- append(stats_plot_configs, list(plot_config)) } else if (plot_type == "box") { plot_config$title <- sprintf("%s Box RF for %s with SD", OrfRep, var) plot_config$position <- "dodge" stats_boxplot_configs <- append(stats_boxplot_configs, list(plot_config)) } } } # Delta interaction plots delta_limits_map <- list( L = c(-60, 60), K = c(-60, 60), r = c(-0.6, 0.6), AUC = c(-6000, 6000) ) # Select the data grouping by data type if (type == "reference") { group_vars <- c("OrfRep", "Gene", "num") } else if (type == "deletion") { group_vars <- c("OrfRep", "Gene") } grouped_data <- df_interactions %>% group_by(across(all_of(group_vars))) %>% group_split() for (group_data in grouped_data) { # Build the plot title OrfRep <- first(group_data$OrfRep) Gene <- first(group_data$Gene) if (type == "reference") { num <- if ("num" %in% names(group_data)) first(group_data$num) else "" OrfRepTitle <- paste(OrfRep, Gene, num, sep = "_") } else if (type == "deletion") { OrfRepTitle <- OrfRep } for (var in names(delta_limits_map)) { y_limits <- delta_limits_map[[var]] y_span <- y_limits[2] - y_limits[1] y_var_name <- paste0("Delta_", var) # Anti-filter to select out-of-bounds rows out_of_bounds <- group_data %>% filter(is.na(!!sym(y_var_name)) | !!sym(y_var_name) < y_limits[1] | !!sym(y_var_name) > y_limits[2]) if (nrow(out_of_bounds) > 0) { message(sprintf("Filtered %d row(s) from '%s' because %s is outside of y-limits: [%f, %f]", nrow(out_of_bounds), OrfRepTitle, y_var_name, y_limits[1], y_limits[2] )) } # Do the actual filtering group_data_filtered <- group_data %>% filter(!is.na(!!sym(y_var_name)) & !!sym(y_var_name) >= y_limits[1] & !!sym(y_var_name) <= y_limits[2]) if (nrow(group_data_filtered) == 0) { message("Insufficient data for plot: ", OrfRepTitle, " ", var) next # skip plot if insufficient data is available } WT_sd_value <- first(group_data_filtered[[paste0("WT_sd_", var)]], default = 0) Z_Shift_value <- round(first(group_data_filtered[[paste0("Z_Shift_", var)]], default = 0), 2) Z_lm_value <- round(first(group_data_filtered[[paste0("Z_lm_", var)]], default = 0), 2) R_squared_value <- round(first(group_data_filtered[[paste0("R_Squared_", var)]], default = 0), 2) NG_value <- first(group_data_filtered$NG, default = 0) DB_value <- first(group_data_filtered$DB, default = 0) SM_value <- first(group_data_filtered$SM, default = 0) lm_intercept_col <- paste0("lm_intercept_", var) lm_slope_col <- paste0("lm_slope_", var) lm_intercept <- first(group_data_filtered[[lm_intercept_col]], default = 0) lm_slope <- first(group_data_filtered[[lm_slope_col]], default = 0) plot_config <- list( df = group_data_filtered, plot_type = "scatter", x_var = "conc_num_factor_factor", y_var = y_var_name, x_label = paste0("[", unique(df_summary$Drug)[1], "]"), shape = 16, title = paste(OrfRepTitle, Gene, sep = " "), title_size = rel(1.4), coord_cartesian = y_limits, annotations = list( list(x = 1, y = y_limits[2] - 0.1 * y_span, label = paste(" ZShift =", round(Z_Shift_value, 2))), list(x = 1, y = y_limits[2] - 0.2 * y_span, label = paste(" lm ZScore =", round(Z_lm_value, 2))), # list(x = 1, y = y_limits[2] - 0.3 * y_span, label = paste(" R-squared =", round(R_squared_value, 2))), list(x = 1, y = y_limits[1] + 0.1 * y_span, label = paste("NG =", NG_value)), list(x = 1, y = y_limits[1] + 0.05 * y_span, label = paste("DB =", DB_value)), list(x = 1, y = y_limits[1], label = paste("SM =", SM_value)) ), error_bar = TRUE, error_bar_params = list( custom_error_bar = list( ymin = paste0("0 - 2 * WT_sd_", var), ymax = paste0("0 + 2 * WT_sd_", var) ), color = "gray70", linewidth = 0.5 ), x_breaks = unique(group_data_filtered$conc_num_factor_factor), x_labels = as.character(unique(group_data_filtered$conc_num)), ylim_vals = y_limits, lm_line = list( intercept = lm_intercept, slope = lm_slope, color = "blue", linewidth = 0.8 ) ) delta_plot_configs <- append(delta_plot_configs, list(plot_config)) } } # Group delta plots in chunks of 12 per page chunk_size <- 12 delta_plot_chunks <- split(delta_plot_configs, ceiling(seq_along(delta_plot_configs) / chunk_size)) return(c( list(list(grid_layout = list(ncol = 2), plots = stats_plot_configs)), list(list(grid_layout = list(ncol = 2), plots = stats_boxplot_configs)), lapply(delta_plot_chunks, function(chunk) list(grid_layout = list(ncol = 4), plots = chunk)) )) } generate_rank_plot_configs <- function(df, is_lm = FALSE, filter_na = FALSE, overlap_color = FALSE) { sd_bands <- c(1, 2, 3) plot_configs <- list() variables <- c("L", "K") # Helper function to create a rank plot configuration create_plot_config <- function(variable, rank_var, zscore_var, y_label, sd_band, filter_na, with_annotations = TRUE) { num_enhancers <- sum(df[[zscore_var]] >= sd_band, na.rm = TRUE) num_suppressors <- sum(df[[zscore_var]] <= -sd_band, na.rm = TRUE) # Default plot config plot_config <- list( df = df, x_var = rank_var, y_var = zscore_var, x_label = "Rank", y_label = y_label, plot_type = "scatter", title = paste(y_label, "vs. Rank for", variable, "above", sd_band, "SD"), sd_band = sd_band, fill_positive = "#542788", fill_negative = "orange", alpha_positive = 0.3, alpha_negative = 0.3, shape = 3, size = 0.1, filter_na = filter_na, legend_position = "none" ) # Selectively add annotations if (with_annotations) { plot_config$annotations <- list( list( x = nrow(df) / 2, y = 10, label = paste("Deletion Enhancers =", num_enhancers) ), list( x = nrow(df) / 2, y = -10, label = paste("Deletion Suppressors =", num_suppressors) ) ) } return(plot_config) } # Generate plots for each variable for (variable in variables) { rank_var <- if (is_lm) paste0("Rank_lm_", variable) else paste0("Rank_", variable) zscore_var <- if (is_lm) paste0("Z_lm_", variable) else paste0("Avg_Zscore_", variable) y_label <- if (is_lm) paste("Int Z score", variable) else paste("Avg Z score", variable) # Loop through SD bands for (sd_band in sd_bands) { # Create plot with annotations plot_configs[[length(plot_configs) + 1]] <- create_plot_config(variable, rank_var, zscore_var, y_label, sd_band, filter_na, with_annotations = TRUE) # Create plot without annotations plot_configs[[length(plot_configs) + 1]] <- create_plot_config(variable, rank_var, zscore_var, y_label, sd_band, filter_na, with_annotations = FALSE) } } # Group delta plots in chunks of 6 per page chunk_size <- 6 plot_chunks <- split(plot_configs, ceiling(seq_along(plot_configs) / chunk_size)) return(c( lapply(plot_chunks, function(chunk) list(grid_layout = list(ncol = 3), plots = chunk)) )) } generate_correlation_plot_configs <- function(df, df_reference) { # Define relationships for different-variable correlations relationships <- list( list(x = "L", y = "K"), # x-var is predictor, y-var is reponse list(x = "L", y = "r"), list(x = "L", y = "AUC"), list(x = "K", y = "r"), list(x = "K", y = "AUC"), list(x = "r", y = "AUC") ) # Filter both dataframes for missing linear model zscores for plotting df <- df %>% filter(!is.na(Z_lm_L)) df_reference <- df_reference %>% filter(!is.na(Z_lm_L)) plot_configs <- list() # Iterate over the option to highlight cyan points (TRUE/FALSE) highlight_cyan_options <- c(FALSE, TRUE) for (highlight_cyan in highlight_cyan_options) { for (rel in relationships) { # Extract relevant variable names for Z_lm values x_var <- paste0("Z_lm_", rel$x) # predictor y_var <- paste0("Z_lm_", rel$y) # response print(paste("rel$x:", rel$x)) print(paste("rel$y:", rel$y)) print(paste("Generating correlation plot for response(y):", y_var, "and predictor(x):", x_var)) print(paste("Relationship suffix:", rel$y, "_", rel$x)) # Extract the R-squared, intercept, and slope from the df (first value) intercept <- df[[paste0("Z_lm_intercept_", rel$y, "_", rel$x)]][1] slope <- df[[paste0("Z_lm_slope_", rel$y, "_", rel$x)]][1] r_squared <- df[[paste0("Z_lm_R_squared_", rel$y, "_", rel$x)]][1] print(paste("intercept:", intercept)) print(paste("slope:", slope)) print(paste("r_squared:", r_squared)) r_squared_rounded <- round(r_squared, 4) r_squared_label <- paste("R-squared =", r_squared_rounded) print(paste("r_squared_label:", r_squared_label)) # Find the max and min of both dataframes for printing linear regression line xmin <- min(c(min(df[[x_var]]), min(df_reference[[x_var]]))) xmax <- max(c(max(df[[x_var]]), max(df_reference[[x_var]]))) # Generate the label for the plot plot_label <- paste("Interaction", rel$x, "vs.", rel$y) # Construct plot config plot_config <- list( df = df, df_reference = df_reference, x_var = x_var, y_var = y_var, plot_type = "scatter", title = plot_label, annotations = list( list( x = mean(df[[x_var]], na.rm = TRUE), y = mean(df[[y_var]], na.rm = TRUE), label = r_squared_label ) ), lm_line = list( intercept = intercept, slope = slope, color = "tomato3", linewidth = 0.8, xmin = xmin, xmax = xmax ), color = "gray70", filter_na = TRUE, cyan_points = highlight_cyan # include cyan points or not based on the loop ) plot_configs <- append(plot_configs, list(plot_config)) } } return(list(plots = plot_configs)) } main <- function() { lapply(names(args$experiments), function(exp_name) { exp <- args$experiments[[exp_name]] exp_path <- exp$path exp_sd <- exp$sd out_dir <- file.path(exp_path, "zscores") out_dir_qc <- file.path(exp_path, "zscores", "qc") dir.create(out_dir, recursive = TRUE, showWarnings = FALSE) dir.create(out_dir_qc, recursive = TRUE, showWarnings = FALSE) # Each list of plots corresponds to a separate file message("Loading and filtering data for experiment: ", exp_name) df <- load_and_filter_data(args$easy_results_file, sd = exp_sd) %>% update_gene_names(args$sgd_gene_list) %>% as_tibble() l_vs_k_plot_configs <- list( plots = list( list( df = df, x_var = "L", y_var = "K", plot_type = "scatter", tooltip_vars = c("OrfRep", "Gene", "delta_bg"), title = "Raw L vs K before quality control", color_var = "conc_num_factor_factor", error_bar = FALSE, legend_position = "right" ) ) ) message("Calculating summary statistics before quality control") df_stats <- calculate_summary_stats( # formerly X_stats_ALL df = df, variables = c("L", "K", "r", "AUC", "delta_bg"), group_vars = c("conc_num", "conc_num_factor_factor"))$df_with_stats frequency_delta_bg_plot_configs <- list( plots = list( list( df = df_stats, x_var = "delta_bg", y_var = NULL, plot_type = "density", title = "Density plot for Delta Background by [Drug] (All Data)", color_var = "conc_num_factor_factor", x_label = "Delta Background", y_label = "Density", error_bar = FALSE, legend_position = "right" ), list( df = df_stats, x_var = "delta_bg", y_var = NULL, plot_type = "bar", title = "Bar plot for Delta Background by [Drug] (All Data)", color_var = "conc_num_factor_factor", x_label = "Delta Background", y_label = "Count", error_bar = FALSE, legend_position = "right" ) ) ) message("Filtering rows above delta background tolerance for plotting") df_above_tolerance <- df %>% filter(DB == 1) above_threshold_plot_configs <- list( plots = list( list( df = df_above_tolerance, x_var = "L", y_var = "K", plot_type = "scatter", tooltip_vars = c("OrfRep", "Gene", "delta_bg"), title = paste("Raw L vs K for strains above Delta Background threshold of", round(df_above_tolerance$delta_bg_tolerance[[1]], 3), "or above"), color_var = "conc_num_factor_factor", position = "jitter", annotations = list( list( x = median(df_above_tolerance$L, na.rm = TRUE) / 2, y = median(df_above_tolerance$K, na.rm = TRUE) / 2, label = paste("# strains above Delta Background tolerance =", nrow(df_above_tolerance)) ) ), error_bar = FALSE, legend_position = "right" ) ) ) message("Setting rows above delta background tolerance to NA") df_na <- df %>% mutate(across(all_of(c("L", "K", "r", "AUC", "delta_bg")), ~ ifelse(DB == 1, NA, .))) # formerly X message("Calculating summary statistics across all strains") ss <- calculate_summary_stats( df = df_na, variables = c("L", "K", "r", "AUC", "delta_bg"), group_vars = c("conc_num", "conc_num_factor_factor")) df_na_ss <- ss$summary_stats df_na_stats <- ss$df_with_stats # formerly X_stats_ALL write.csv(df_na_ss, file = file.path(out_dir, "summary_stats_all_strains.csv"), row.names = FALSE) # This can help bypass missing values ggplot warnings during testing df_na_stats_filtered <- df_na_stats %>% filter(if_all(all_of(c("L", "K", "r", "AUC", "delta_bg")), is.finite)) message("Calculating summary statistics excluding zero values") df_no_zeros <- df_na %>% filter(L > 0) # formerly X_noZero df_no_zeros_stats <- calculate_summary_stats( df = df_no_zeros, variables = c("L", "K", "r", "AUC", "delta_bg"), group_vars = c("conc_num", "conc_num_factor_factor") )$df_with_stats message("Filtering by 2SD of K") df_na_within_2sd_k <- df_na_stats %>% filter(K >= (mean_K - 2 * sd_K) & K <= (mean_K + 2 * sd_K)) df_na_outside_2sd_k <- df_na_stats %>% filter(K < (mean_K - 2 * sd_K) | K > (mean_K + 2 * sd_K)) message("Calculating summary statistics for L within 2SD of K") # TODO We're omitting the original z_max calculation, not sure if needed? ss <- calculate_summary_stats(df_na_within_2sd_k, "L", # formerly X_stats_BY_L_within_2SD_K group_vars = c("conc_num", "conc_num_factor_factor"))$summary_stats write.csv(ss, file = file.path(out_dir_qc, "max_observed_L_vals_for_spots_within_2SD_K.csv"), row.names = FALSE) message("Calculating summary statistics for L outside 2SD of K") ss <- calculate_summary_stats(df_na_outside_2sd_k, "L", # formerly X_stats_BY_L_outside_2SD_K group_vars = c("conc_num", "conc_num_factor_factor")) df_na_l_outside_2sd_k_stats <- ss$df_with_stats write.csv(ss$summary_stats, file = file.path(out_dir, "max_observed_L_vals_for_spots_outside_2SD_K.csv"), row.names = FALSE) plate_analysis_plot_configs <- generate_plate_analysis_plot_configs( variables = c("L", "K", "r", "AUC", "delta_bg"), df_before = df_stats, df_after = df_na_stats_filtered ) plate_analysis_boxplot_configs <- generate_plate_analysis_plot_configs( variables = c("L", "K", "r", "AUC", "delta_bg"), df_before = df_stats, df_after = df_na_stats_filtered, plot_type = "box" ) plate_analysis_no_zeros_plot_configs <- generate_plate_analysis_plot_configs( variables = c("L", "K", "r", "AUC", "delta_bg"), stages = c("after"), # Only after QC df_after = df_no_zeros_stats ) plate_analysis_no_zeros_boxplot_configs <- generate_plate_analysis_plot_configs( variables = c("L", "K", "r", "AUC", "delta_bg"), stages = c("after"), # Only after QC df_after = df_no_zeros_stats, plot_type = "box" ) l_outside_2sd_k_plot_configs <- list( plots = list( list( df = df_na_l_outside_2sd_k_stats, x_var = "L", y_var = "K", plot_type = "scatter", title = "Raw L vs K for strains falling outside 2 SD of the K mean at each Conc", color_var = "conc_num_factor_factor", position = "jitter", tooltip_vars = c("OrfRep", "Gene", "delta_bg"), annotations = list( list( x = median(df_na_l_outside_2sd_k_stats$L, na.rm = TRUE) / 2, y = median(df_na_l_outside_2sd_k_stats$K, na.rm = TRUE) / 2, label = paste("Total strains:", nrow(df_na_l_outside_2sd_k_stats)) ) ), error_bar = FALSE, legend_position = "right" ) ) ) delta_bg_outside_2sd_k_plot_configs <- list( plots = list( list( df = df_na_l_outside_2sd_k_stats, x_var = "delta_bg", x_label = "Delta Background", y_var = "K", plot_type = "scatter", title = "Delta Background vs K for strains falling outside 2 SD of K", color_var = "conc_num_factor_factor", position = "jitter", tooltip_vars = c("OrfRep", "Gene", "delta_bg"), annotations = list( list( x = 0.05, y = 0.95, hjust = 0, vjust = 1, label = paste("Total strains:", nrow(df_na_l_outside_2sd_k_stats)), size = 5 ) ), error_bar = FALSE, legend_position = "right" ) ) ) message("Generating quality control plots in parallel") # future::plan(future::multicore, workers = parallel::detectCores()) future::plan(future::multisession, workers = 3) # generate 3 plot files in parallel plot_configs <- list( list(out_dir = out_dir_qc, filename = "L_vs_K_before_quality_control", plot_configs = l_vs_k_plot_configs, page_width = 12, page_height = 8), list(out_dir = out_dir_qc, filename = "frequency_delta_background", plot_configs = frequency_delta_bg_plot_configs, page_width = 12, page_height = 8), list(out_dir = out_dir_qc, filename = "L_vs_K_above_threshold", plot_configs = above_threshold_plot_configs, page_width = 12, page_height = 8), list(out_dir = out_dir_qc, filename = "plate_analysis", plot_configs = plate_analysis_plot_configs, page_width = 14, page_height = 9), list(out_dir = out_dir_qc, filename = "plate_analysis_boxplots", plot_configs = plate_analysis_boxplot_configs, page_width = 18, page_height = 9), list(out_dir = out_dir_qc, filename = "plate_analysis_no_zeros", plot_configs = plate_analysis_no_zeros_plot_configs, page_width = 14, page_height = 9), list(out_dir = out_dir_qc, filename = "plate_analysis_no_zeros_boxplots", plot_configs = plate_analysis_no_zeros_boxplot_configs, page_width = 18, page_height = 9), list(out_dir = out_dir_qc, filename = "L_vs_K_for_strains_2SD_outside_mean_K", plot_configs = l_outside_2sd_k_plot_configs, page_width = 10, page_height = 8), list(out_dir = out_dir_qc, filename = "delta_background_vs_K_for_strains_2SD_outside_mean_K", plot_configs = delta_bg_outside_2sd_k_plot_configs, page_width = 10, page_height = 8) ) # Parallelize background and quality control plot generation # furrr::future_map(plot_configs, function(config) { # generate_and_save_plots(config$out_dir, config$filename, config$plot_configs, # page_width = config$page_width, page_height = config$page_height) # }, .options = furrr_options(seed = TRUE)) # Loop over background strains # TODO currently only tested against one strain, if we want to do multiple strains we'll # have to rename or group the output files by dir or something so they don't get clobbered bg_strains <- c("YDL227C") lapply(bg_strains, function(strain) { message("Processing background strain: ", strain) # Handle missing data by setting zero values to NA # and then removing any rows with NA in L col df_bg <- df_na %>% filter(OrfRep == strain) %>% mutate( L = if_else(L == 0, NA, L), K = if_else(K == 0, NA, K), r = if_else(r == 0, NA, r), AUC = if_else(AUC == 0, NA, AUC) ) %>% filter(!is.na(L)) message("Calculating background summary statistics") ss_bg <- calculate_summary_stats(df_bg, c("L", "K", "r", "AUC", "delta_bg"), # formerly X_stats_BY group_vars = c("OrfRep", "Drug", "conc_num", "conc_num_factor_factor")) summary_stats_bg <- ss_bg$summary_stats df_bg_stats <- ss_bg$df_with_stats write.csv( summary_stats_bg, file = file.path(out_dir, paste0("summary_stats_background_strain_", strain, ".csv")), row.names = FALSE) message("Setting missing reference values to the highest theoretical value at each drug conc for L") df_reference <- df_na_stats %>% # formerly X2_RF filter(OrfRep == strain) %>% filter(!is.na(L)) %>% group_by(OrfRep, Drug, conc_num, conc_num_factor_factor) %>% mutate( max_l_theoretical = max(max_L, na.rm = TRUE), L = ifelse(L == 0 & !is.na(L) & conc_num > 0, max_l_theoretical, L), SM = ifelse(L >= max_l_theoretical & !is.na(L) & conc_num > 0, 1, 0), L = ifelse(L >= max_l_theoretical & !is.na(L) & conc_num > 0, max_l_theoretical, L)) %>% ungroup() message("Calculating reference strain summary statistics") df_reference_summary_stats <- calculate_summary_stats( # formerly X_stats_X2_RF df = df_reference, variables = c("L", "K", "r", "AUC"), group_vars = c("OrfRep", "Drug", "conc_num", "conc_num_factor_factor") )$df_with_stats # Summarise statistics for error bars df_reference_summary_stats <- df_reference_summary_stats %>% group_by(OrfRep, Drug, conc_num, conc_num_factor_factor) %>% mutate( mean_mean_L = first(mean_L), mean_sd_L = first(sd_L), mean_mean_K = first(mean_K), mean_sd_K = first(sd_K), mean_mean_r = first(mean_r), mean_sd_r = first(sd_r), mean_mean_AUC = first(mean_AUC), mean_sd_AUC = first(sd_AUC), .groups = "drop" ) message("Calculating reference strain interaction summary statistics") # formerly X_stats_interaction df_reference_interaction_stats <- calculate_summary_stats( df = df_reference, variables = c("L", "K", "r", "AUC"), group_vars = c("OrfRep", "Gene", "num", "Drug", "conc_num", "conc_num_factor_factor") )$df_with_stats message("Calculating reference strain interaction scores") reference_results <- calculate_interaction_scores(df_reference_interaction_stats, df_bg_stats, "reference") df_reference_calculations <- reference_results$calculations df_reference_interactions_joined <- reference_results$full_data df_reference_interactions <- reference_results$interactions write.csv(df_reference_calculations, file = file.path(out_dir, "zscore_calculations_reference.csv"), row.names = FALSE) write.csv(df_reference_interactions, file = file.path(out_dir, "zscore_interactions_reference.csv"), row.names = FALSE) # message("Generating reference interaction plots") # reference_plot_configs <- generate_interaction_plot_configs(df_reference_summary_stats, df_reference_interactions_joined, "reference") # generate_and_save_plots(out_dir, "interaction_plots_reference", reference_plot_configs, page_width = 16, page_height = 16) message("Setting missing deletion values to the highest theoretical value at each drug conc for L") df_deletion <- df_na_stats %>% # formerly X2 filter(OrfRep != strain) %>% filter(!is.na(L)) %>% group_by(OrfRep, Gene, conc_num, conc_num_factor_factor) %>% mutate( max_l_theoretical = max(max_L, na.rm = TRUE), L = ifelse(L == 0 & !is.na(L) & conc_num > 0, max_l_theoretical, L), SM = ifelse(L >= max_l_theoretical & !is.na(L) & conc_num > 0, 1, SM), L = ifelse(L >= max_l_theoretical & !is.na(L) & conc_num > 0, max_l_theoretical, L)) %>% ungroup() message("Calculating deletion strain(s) interaction summary statistics") df_deletion_stats <- calculate_summary_stats( df = df_deletion, variables = c("L", "K", "r", "AUC"), group_vars = c("OrfRep", "Gene", "Drug", "conc_num", "conc_num_factor_factor") )$df_with_stats message("Calculating deletion strain(s) interactions scores") deletion_results <- calculate_interaction_scores(df_deletion_stats, df_bg_stats, "deletion") df_calculations <- deletion_results$calculations df_interactions <- deletion_results$interactions df_interactions_joined <- deletion_results$full_data write.csv(df_calculations, file = file.path(out_dir, "zscore_calculations.csv"), row.names = FALSE) write.csv(df_interactions, file = file.path(out_dir, "zscore_interactions.csv"), row.names = FALSE) # message("Generating deletion interaction plots") # deletion_plot_configs <- generate_interaction_plot_configs(df_reference_summary_stats, df_interactions_joined, "deletion") # generate_and_save_plots(out_dir, "interaction_plots", deletion_plot_configs, page_width = 16, page_height = 16) # message("Writing enhancer/suppressor csv files") # interaction_threshold <- 2 # TODO add to study config? # enhancer_condition_L <- df_interactions$Avg_Zscore_L >= interaction_threshold # suppressor_condition_L <- df_interactions$Avg_Zscore_L <= -interaction_threshold # enhancer_condition_K <- df_interactions$Avg_Zscore_K >= interaction_threshold # suppressor_condition_K <- df_interactions$Avg_Zscore_K <= -interaction_threshold # enhancers_L <- df_interactions[enhancer_condition_L, ] # suppressors_L <- df_interactions[suppressor_condition_L, ] # enhancers_K <- df_interactions[enhancer_condition_K, ] # suppressors_K <- df_interactions[suppressor_condition_K, ] # enhancers_and_suppressors_L <- df_interactions[enhancer_condition_L | suppressor_condition_L, ] # enhancers_and_suppressors_K <- df_interactions[enhancer_condition_K | suppressor_condition_K, ] # write.csv(enhancers_L, file = file.path(out_dir, "zscore_interactions_deletion_enhancers_L.csv"), row.names = FALSE) # write.csv(suppressors_L, file = file.path(out_dir, "zscore_interactions_deletion_suppressors_L.csv"), row.names = FALSE) # write.csv(enhancers_K, file = file.path(out_dir, "zscore_interactions_deletion_enhancers_K.csv"), row.names = FALSE) # write.csv(suppressors_K, file = file.path(out_dir, "zscore_interactions_deletion_suppressors_K.csv"), row.names = FALSE) # write.csv(enhancers_and_suppressors_L, # file = file.path(out_dir, "zscore_interactions_deletion_enhancers_and_suppressors_L.csv"), row.names = FALSE) # write.csv(enhancers_and_suppressors_K, # file = file.path(out_dir, "zscore_interaction_deletion_enhancers_and_suppressors_K.csv"), row.names = FALSE) # message("Writing linear model enhancer/suppressor csv files") # lm_interaction_threshold <- 2 # TODO add to study config? # enhancers_lm_L <- df_interactions[df_interactions$Z_lm_L >= lm_interaction_threshold, ] # suppressors_lm_L <- df_interactions[df_interactions$Z_lm_L <= -lm_interaction_threshold, ] # enhancers_lm_K <- df_interactions[df_interactions$Z_lm_K >= lm_interaction_threshold, ] # suppressors_lm_K <- df_interactions[df_interactions$Z_lm_K <= -lm_interaction_threshold, ] # write.csv(enhancers_lm_L, file = file.path(out_dir, "zscore_interactions_deletion_enhancers_lm_L.csv"), row.names = FALSE) # write.csv(suppressors_lm_L, file = file.path(out_dir, "zscore_interactions_deletion_suppressors_lm_L.csv"), row.names = FALSE) # write.csv(enhancers_lm_K, file = file.path(out_dir, "zscore_interactions_deletion_enhancers_lm_K.csv"), row.names = FALSE) # write.csv(suppressors_lm_K, file = file.path(out_dir, "zscore_interactions_deletion_suppressors_lm_K.csv"), row.names = FALSE) # message("Generating rank plots") # rank_plot_configs <- generate_rank_plot_configs( # df_interactions, # is_lm = FALSE, # ) # generate_and_save_plots(out_dir, "rank_plots", rank_plot_configs, # page_width = 18, page_height = 12) # message("Generating ranked linear model plots") # rank_lm_plot_configs <- generate_rank_plot_configs( # df_interactions, # is_lm = TRUE, # ) # generate_and_save_plots(out_dir, "rank_plots_lm", rank_lm_plot_configs, # page_width = 18, page_height = 12) # message("Generating overlapped ranked plots") # rank_plot_filtered_configs <- generate_rank_plot_configs( # df_interactions, # is_lm = FALSE, # filter_na = TRUE, # overlap_color = TRUE # ) # generate_and_save_plots(out_dir, "rank_plots_na_rm", rank_plot_filtered_configs, # page_width = 18, page_height = 12) # message("Generating overlapped ranked linear model plots") # rank_plot_lm_filtered_configs <- generate_rank_plot_configs( # df_interactions, # is_lm = TRUE, # filter_na = TRUE, # overlap_color = TRUE # ) # generate_and_save_plots(out_dir, "rank_plots_lm_na_rm", rank_plot_lm_filtered_configs, # page_width = 18, page_height = 12) message("Generating correlation curve parameter pair plots") correlation_plot_configs <- generate_correlation_plot_configs( df_interactions, df_reference_interactions ) generate_and_save_plots(out_dir, "correlation_cpps", correlation_plot_configs, page_width = 10, page_height = 7) }) }) } main()