Squashed initial commit
This commit is contained in:
130
qhtcp-workflow/apps/python/DconJG2.py
Normal file
130
qhtcp-workflow/apps/python/DconJG2.py
Normal file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Improved code to determine the origin column (OligCol) without user input of the argument
|
||||
and removed the former sys.argv[2]. JWR 22_0816
|
||||
"""
|
||||
"""
|
||||
this code can be used for the parse of the REMc "-finalTable.csv" output file
|
||||
to make a series of subdatasets, which reflect the pedigree structure
|
||||
of the way cluters breaking up.
|
||||
"""
|
||||
|
||||
import sys, os, string, glob
|
||||
|
||||
try:
|
||||
data_file_Path = sys.argv[1]
|
||||
#print data_file_Path
|
||||
#cluster_ori_col_num = sys.argv[2]
|
||||
output_path = sys.argv[2]
|
||||
except:
|
||||
print ('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py /datasetPath/datasetfilename cluster_origin_column_num output_path_name')
|
||||
print ('Data file not found')
|
||||
sys.exit(1)
|
||||
|
||||
#define a function to reading files and generate the list
|
||||
def read_file(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
attributes = file.readline().strip().split(',')
|
||||
gene_list = [elements[1] for line in file for elements in [line.strip().split(',')]]
|
||||
return gene_list
|
||||
|
||||
# define a function to write the list into a file named in hierarchical series
|
||||
def write_cluster_orf_list(orf_list, output_dir, cluster_name):
|
||||
cluster_file_path = os.path.join(output_dir, f"cluster_name, 'txt')
|
||||
with open(cluster_file_path, 'w') as outfile:
|
||||
for orf in orf_list:
|
||||
outfile.write(orf.strip() + '\n')
|
||||
|
||||
# define a function to write the clusters information into a series of files
|
||||
def write_cluster_results(attributes, orf_list, data_dict, output_directory, cluster_name):
|
||||
file_path = os.path.join(output_directory, f"{cluster_name}-finaltable.csv")
|
||||
with open(file_path, 'w') as output_file:
|
||||
output_file.write(attributes)
|
||||
output_file.write('\n')
|
||||
for orf in orf_list:
|
||||
output_file.write(data_dict[orf.strip()].strip())
|
||||
output_file.write('\n')
|
||||
|
||||
# define a function to write the cluster name origina extensive final table
|
||||
def write_extended_final_table(attributes, data, ori_name_column_number, output_directory, output_file_name):
|
||||
output_file_path = os.path.join(output_directory, f"{output_file_name}-oriExtFinalTable.csv")
|
||||
with open(output_file_path, 'w') as output_file:
|
||||
output_file.write(attributes)
|
||||
output_file.write('\n')
|
||||
for orf in data:
|
||||
elements = data[orf].split(',')
|
||||
ori_name_list = elements[int(ori_name_column_number)-1].strip().split(';')
|
||||
for ori_name in ori_name_list:
|
||||
elements.append(orii_name.strip())
|
||||
output_file.write(','.join(elements))
|
||||
output_file.write('\n')
|
||||
|
||||
# Read the data file
|
||||
try:
|
||||
data = open(data_file_Path,'r')
|
||||
except OSError:
|
||||
print ('input file does not exists')
|
||||
|
||||
|
||||
# first the title line would be read and kept
|
||||
attributes = data.readline().strip().split(',')
|
||||
print(attributes)
|
||||
print(len(attributes))
|
||||
OrigCol= len(attributes) - 1
|
||||
print(OrigCol)
|
||||
|
||||
# then the data
|
||||
data_dict = {}
|
||||
for data_line in data:
|
||||
data_line = data_line.strip()
|
||||
line_elements = data_line.split(',')
|
||||
orf_identifier = line_elements[1].strip().upper()
|
||||
data_dict[orf_identifier] = ','.join(line_elements).upper()
|
||||
data.close()
|
||||
#print dataDic
|
||||
|
||||
print ("OrigCol is ", str(OrigCol))
|
||||
fileDic = {}
|
||||
for orf in dataDic:
|
||||
line = dataDic[orf].split(',')
|
||||
#read the cluster name len(attributes)
|
||||
clusterOrigin = line[int(OrigCol) - 1]
|
||||
|
||||
#clusterOrigin = line[int(cluster_ori_col_num) - 1]
|
||||
#print clusterOrigin
|
||||
clusterOrigin = clusterOrigin.strip()
|
||||
#print clusterOrigin
|
||||
clusterIdentifier = clusterOrigin.split(';')[0:-1]
|
||||
#print clusterIdentifier
|
||||
|
||||
for identifier in clusterIdentifier:
|
||||
identifier = identifier.strip()
|
||||
upper_identifier = identifier.upper()
|
||||
if upper_identifier not in fileDic:
|
||||
fileDic[upper_identifier] = line[1]
|
||||
else:
|
||||
fileDic[upper_identifier] += ',' + line[1]
|
||||
|
||||
input_file_identifier = data_file_Path.strip().split('/')[-1].strip().split('.csv')[-3]
|
||||
|
||||
#make the output folder
|
||||
try:
|
||||
os.mkdir(str(output_path)+str(input_file_identifier))
|
||||
except OSError:
|
||||
|
||||
|
||||
print ('dir exists')
|
||||
|
||||
#Writing the extensive ori name finaltable
|
||||
Writing_ext_final_table(attributeLine, dataDic,str(OrigCol),str(output_path)+str(input_file_identifier), str(input_file_identifier))
|
||||
#Writing_ext_final_table(attributeLine, dataDic,cluster_ori_col_num,str(output_path)+str(input_file_identifier), str(input_file_identifier))
|
||||
|
||||
#write the genelist files
|
||||
for cluster_name in fileDic:
|
||||
#print fileDic[cluster_name].split(',')
|
||||
Writing_clusterORF_list(fileDic[cluster_name].split(','), str(output_path)+str(input_file_identifier), cluster_name)
|
||||
#write the cluster result files
|
||||
Writing_cluster_results(attributeLine, fileDic[cluster_name].split(','), dataDic,str(output_path)+str(input_file_identifier),cluster_name)
|
||||
|
||||
|
||||
|
||||
103
qhtcp-workflow/apps/python/concatGTFResults.py
Normal file
103
qhtcp-workflow/apps/python/concatGTFResults.py
Normal file
@@ -0,0 +1,103 @@
|
||||
#!/usr/bin/env python
|
||||
# This code is to concatenate the batch GO Term Finder results (.tsv) generated from batch GTF perl code(Chris Johnson, U of Tulsa) into a list table
|
||||
|
||||
import os
|
||||
import glob
|
||||
|
||||
def list_files(directory):
|
||||
"""Return a list of all files in the given directory."""
|
||||
return glob.glob(os.path.join(directory, '*.txt.tsv'))
|
||||
|
||||
def concatenate_gtf_results(data_dir, output_file):
|
||||
"""Concatenate the GTF results into a single file."""
|
||||
output = open(output_file, 'w')
|
||||
|
||||
files = list_files(data_dir)
|
||||
files.sort()
|
||||
|
||||
for file_path in files:
|
||||
file_name = os.path.basename(file_path).rstrip('.txt.tsv')
|
||||
with open(file_path, 'r') as f:
|
||||
labels = f.readline().strip().split('\t')
|
||||
output.write('\t'.join(labels) + '\n')
|
||||
|
||||
for line in f:
|
||||
line = line.strip().strip('\t')
|
||||
if line:
|
||||
output.write(line + '\n')
|
||||
|
||||
output.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 3:
|
||||
print('Usage: python Concatenate_GTF_results.py data_dir output_file')
|
||||
sys.exit(1)
|
||||
|
||||
data_dir = sys.argv[1]
|
||||
output_file = sys.argv[2]
|
||||
|
||||
concatenate_gtf_results(data_dir, output_file)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Old version
|
||||
# def list_files(directory):
|
||||
# """Return a list of all files in the given directory."""
|
||||
# return glob.glob(os.path.join(directory, '*.txt.tsv'))
|
||||
|
||||
# try:
|
||||
# data_file_Path = sys.argv[1]
|
||||
# output_file_Path = sys.argv[2]
|
||||
# except:
|
||||
# print ('Usage: python Concatenate_GTF_results.py /datasetPath /outputFilePath_and_Name')
|
||||
# print ('Data file not found, error in given directory')
|
||||
# sys.exit(1)
|
||||
|
||||
# try:
|
||||
# output = open(output_file_Path, 'w')
|
||||
# except OSError:
|
||||
# print ('output file error')
|
||||
|
||||
# # get all the GTF result files in given directory
|
||||
# File_list = []
|
||||
# File_list = list_files(data_file_Path)
|
||||
# File_list.sort()
|
||||
|
||||
# i = 0
|
||||
# for file in File_list:
|
||||
# #parse the file names given in absolute path
|
||||
# file_name = file.strip().split('/')[-1]
|
||||
# file_name = file_name.rstrip('.txt.tsv')
|
||||
# # function to read tsv files from a given directory
|
||||
# #open the file
|
||||
# data = open(file,'r')
|
||||
# #reading the label line
|
||||
# labelLine = data.readline()
|
||||
# label = labelLine.strip().split('\t')
|
||||
# #write the label
|
||||
# #updates2010July26: update following label writing code
|
||||
# if i == 0:
|
||||
# # output.write('cluster origin')
|
||||
# for element in label:
|
||||
# output.write(element)
|
||||
# output.write('\t')
|
||||
# i = i + 1
|
||||
# #updates2010July26 End
|
||||
# #switch to the next line
|
||||
# output.write('\n')
|
||||
|
||||
# #read the GO terms
|
||||
# GOTermLines = data.readlines()
|
||||
# for GOTerm in GOTermLines:
|
||||
# GOTerm = GOTerm.strip().strip('\t')
|
||||
# if GOTerm != '':
|
||||
# #updates2010July26: remove the code to write the first column 'REMc cluster ID'
|
||||
# #output.write(file_name)
|
||||
# #output.write('\t')
|
||||
# ##updates2010July26 update end
|
||||
# output.write(GOTerm + '\n')
|
||||
# #output.write('\n')
|
||||
# output.close()
|
||||
|
||||
131
qhtcp-workflow/apps/python/dCon.py
Normal file
131
qhtcp-workflow/apps/python/dCon.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Reads the REMc "-finalTable.csv" output file and makes a series of subdatasets
|
||||
that reflect the pedigree structure of the way clusters break up.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import string
|
||||
import glob
|
||||
|
||||
|
||||
def reading_single_file(file_path):
|
||||
"""
|
||||
Reads a file and generates a list of gene names.
|
||||
"""
|
||||
with open(file_path, 'r') as data:
|
||||
attribute_line = data.readline().strip()
|
||||
attributes = attribute_line.split(',')
|
||||
|
||||
gene_list = []
|
||||
for dataline in data:
|
||||
dataline = dataline.strip()
|
||||
elements = dataline.split(',')
|
||||
gene_list.append(elements[1])
|
||||
|
||||
return gene_list
|
||||
|
||||
|
||||
def writing_cluster_orf_list(list, output_dir, real_cluster_ori_name):
|
||||
"""
|
||||
Writes a list of ORF names into a file in hierarchical series.
|
||||
"""
|
||||
outfile_path = os.path.join(output_dir, f"{real_cluster_ori_name}.txt")
|
||||
with open(outfile_path, 'w') as outfile:
|
||||
for orf in list:
|
||||
outfile.write(orf.strip())
|
||||
outfile.write('\n')
|
||||
|
||||
|
||||
def writing_cluster_results(attributes, orf_list, dic, output_dir, real_cluster_ori_name):
|
||||
"""
|
||||
Writes clusters information into a series of files.
|
||||
"""
|
||||
outfile_path = os.path.join(output_dir, f"{real_cluster_ori_name}-finaltable.csv")
|
||||
with open(outfile_path, 'w') as outfile:
|
||||
outfile.write(attributes)
|
||||
outfile.write('\n')
|
||||
for orf in orf_list:
|
||||
outfile.write(dic[orf.strip()].strip())
|
||||
outfile.write('\n')
|
||||
|
||||
|
||||
def writing_ext_final_table(attributes, dic, ori_name_col_num, output_dir, output_file_name):
|
||||
"""
|
||||
Writes the cluster name extensive final table.
|
||||
"""
|
||||
outfile_path = os.path.join(output_dir, f"{output_file_name}-oriExtFinalTable.csv")
|
||||
with open(outfile_path, 'w') as outfile:
|
||||
outfile.write(attributes)
|
||||
outfile.write('\n')
|
||||
for orf in dic:
|
||||
elements = dic[orf].split(',')
|
||||
ori_name_list = elements[int(ori_name_col_num) - 1].strip().split(';')
|
||||
for ori_name in ori_name_list:
|
||||
elements.append(ori_name.strip())
|
||||
outfile.write(','.join(elements))
|
||||
outfile.write('\n')
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to parse the REMc -finalTable.csv output file.
|
||||
"""
|
||||
try:
|
||||
data_file_path = sys.argv[1]
|
||||
output_path = sys.argv[2]
|
||||
except IndexError:
|
||||
print('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py '
|
||||
'/datasetPath/datasetfilename cluster_origin_column_num output_path_name')
|
||||
print('Data file not found')
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
with open(data_file_path, 'r') as data:
|
||||
attribute_line = data.readline().strip()
|
||||
attributes = attribute_line.split(',')
|
||||
orig_col = len(attributes) - 1
|
||||
|
||||
data_dict = {}
|
||||
for dataline in data:
|
||||
dataline = dataline.strip()
|
||||
elements = dataline.split(',')
|
||||
data_dict[str.upper(elements[1].strip())] = ','.join(elements).upper()
|
||||
|
||||
except FileNotFoundError:
|
||||
print('Input file does not exist')
|
||||
sys.exit(1)
|
||||
|
||||
file_dict = {}
|
||||
for orf in data_dict:
|
||||
line = data_dict[orf].split(',')
|
||||
cluster_origin = line[int(orig_col) - 1].strip()
|
||||
cluster_identifier = cluster_origin.split(';')[0:-1]
|
||||
|
||||
for i, identifier in enumerate(cluster_identifier):
|
||||
identifier = identifier.strip()
|
||||
if identifier not in file_dict:
|
||||
file_dict[identifier] = line[1]
|
||||
else:
|
||||
file_dict[identifier] = f"{file_dict[identifier]},{line[1]}"
|
||||
|
||||
input_file_identifier = os.path.basename(data_file_path).split('.csv')[0]
|
||||
output_dir = os.path.join(output_path, input_file_identifier)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Writing the extensive ori name finaltable
|
||||
writing_ext_final_table(attribute_line, data_dict, orig_col, output_dir, input_file_identifier)
|
||||
|
||||
# Writing the genelist files
|
||||
for cluster_name in file_dict:
|
||||
writing_cluster_orf_list(file_dict[cluster_name].split(','), output_dir, cluster_name)
|
||||
|
||||
# Writing the cluster result files
|
||||
writing_cluster_results(attribute_line, file_dict[cluster_name].split(','), data_dict,
|
||||
output_dir, cluster_name)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
125
qhtcp-workflow/apps/python/join_interactions.py
Normal file
125
qhtcp-workflow/apps/python/join_interactions.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import pandas as pd
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
# Function to parse and set arguments
|
||||
def parse_arguments():
|
||||
if len(sys.argv) == 1: # Interactive mode
|
||||
args = [
|
||||
"/home/bryan/documents/develop/scripts/hartmanlab/workflow/out/20240116_jhartman2_DoxoHLD",
|
||||
2,
|
||||
"/home/bryan/documents/develop/scripts/hartmanlab/workflow/out/20240116_jhartman2_DoxoHLD/StudyInfo.csv",
|
||||
"/home/bryan/documents/develop/scripts/hartmanlab/workflow/out/20240116_jhartman2_DoxoHLD/Exp1",
|
||||
"/home/bryan/documents/develop/scripts/hartmanlab/workflow/out/20240116_jhartman2_DoxoHLD/Exp2"
|
||||
]
|
||||
else:
|
||||
args = sys.argv[1:]
|
||||
|
||||
return {
|
||||
"out_dir": os.path.abspath(args[0]),
|
||||
"sd": float(args[1]),
|
||||
"study_info": os.path.abspath(args[2]),
|
||||
"input_dirs": args[3:]
|
||||
}
|
||||
|
||||
args = parse_arguments()
|
||||
|
||||
# Create an array for the zscores files
|
||||
def get_zscores_files(dirs):
|
||||
files = [os.path.join(study, "zscores", "zscores_interaction.csv")
|
||||
for study in dirs if os.path.exists(os.path.join(study, "zscores", "zscores_interaction.csv"))]
|
||||
return files
|
||||
|
||||
zscores_files = get_zscores_files(args['input_dirs'])
|
||||
print(f"The SD value is: {args['sd']}")
|
||||
|
||||
# Ensure there are enough files to compare
|
||||
if len(zscores_files) < 2:
|
||||
sys.exit("Not enough experiments to compare, exiting script")
|
||||
|
||||
# Function to join zscores files
|
||||
def join_zscores_files(files):
|
||||
joined_data = pd.read_csv(files[0])
|
||||
for file in files[1:]:
|
||||
temp_data = pd.read_csv(file)
|
||||
joined_data = pd.merge(joined_data, temp_data, on="OrfRep", how="outer")
|
||||
return joined_data
|
||||
|
||||
# Load and join zscores files
|
||||
joined_data = join_zscores_files(zscores_files)
|
||||
|
||||
# Order and select columns
|
||||
def order_and_select_columns(data):
|
||||
ordered_data = data[sorted(data.columns)]
|
||||
selected_headers = ordered_data.filter(regex="OrfRep|Gene|z_lm_k|z_shift_k|z_lm_l|z_shift_l")
|
||||
return selected_headers
|
||||
|
||||
selected_headers = order_and_select_columns(joined_data)
|
||||
|
||||
# Remove redundant columns like "Gene.1"
|
||||
def clean_headers(data, suffixes):
|
||||
suffixes_to_remove = [f"Gene.{i}" for i in range(1, suffixes+1)]
|
||||
return data.drop(columns=suffixes_to_remove, errors='ignore')
|
||||
|
||||
headSel = clean_headers(selected_headers, len(zscores_files) - 1)
|
||||
headSel2 = clean_headers(joined_data.filter(regex="OrfRep|Gene"), len(zscores_files) - 1)
|
||||
|
||||
# Fill NA values in Shift and Z_lm columns
|
||||
def fill_na_in_columns(data):
|
||||
for column in data.columns:
|
||||
if "Shift" in column:
|
||||
data[column].fillna(0.001, inplace=True)
|
||||
elif "Z_lm_" in column:
|
||||
data[column].fillna(0.0001, inplace=True)
|
||||
return data
|
||||
|
||||
headSel = fill_na_in_columns(headSel)
|
||||
|
||||
# Filter based on standard deviation
|
||||
def filter_by_sd(data, sd):
|
||||
if sd == 0:
|
||||
return data
|
||||
z_lm_cols = data.filter(regex="z_lm_")
|
||||
filter_vector = z_lm_cols.abs().ge(sd).any(axis=1)
|
||||
return data[filter_vector]
|
||||
|
||||
REMcRdy = filter_by_sd(headSel.filter(regex="OrfRep|Gene|z_lm_"), args['sd'])
|
||||
shiftOnly = filter_by_sd(headSel.filter(regex="OrfRep|Gene|z_shift"), args['sd'])
|
||||
|
||||
# Reorder columns to interleave Z_lm and Shift data
|
||||
def reorder_columns(data1, data2):
|
||||
combined_data = data1.copy()
|
||||
for i in range(2, data1.shape[1]):
|
||||
combined_data.insert(2 * i - 1, data2.columns[i], data2.iloc[:, i])
|
||||
return combined_data
|
||||
|
||||
combI = reorder_columns(headSel2, shiftOnly)
|
||||
|
||||
# Write output files
|
||||
REMcRdy.to_csv(os.path.join(args['out_dir'], "REMcRdy_lm_only.csv"), index=False, quotechar=False)
|
||||
shiftOnly.to_csv(os.path.join(args['out_dir'], "Shift_only.csv"), index=False, quotechar=False)
|
||||
|
||||
# Relabel headers using experiment names from StudyInfo.csv
|
||||
def relabel_headers(headers, labels):
|
||||
new_labels = headers.copy()
|
||||
for i, header in enumerate(headers):
|
||||
suffix = header.split('.')[-1]
|
||||
if suffix.isdigit() and int(suffix) in range(1, 4):
|
||||
exp_name = labels.iloc[int(suffix) - 1, 1]
|
||||
new_labels[i] = header.replace(f".{suffix}", f"_{exp_name}")
|
||||
return new_labels
|
||||
|
||||
LabelStd = pd.read_csv(args['study_info'])
|
||||
|
||||
shiftOnly.columns = relabel_headers(shiftOnly.columns, LabelStd)
|
||||
REMcRdy.columns = relabel_headers(REMcRdy.columns, LabelStd)
|
||||
|
||||
# Save relabeled files
|
||||
REMcRdy.to_csv(os.path.join(args['out_dir'], "REMcRdy_lm_only.csv"), index=False, quotechar=False)
|
||||
shiftOnly.to_csv(os.path.join(args['out_dir'], "Shift_only.csv"), index=False, quotechar=False)
|
||||
|
||||
# Save updated parameters
|
||||
LabelStd.iloc[:, 3] = args['sd']
|
||||
LabelStd.to_csv(os.path.join(args['out_dir'], "parameters.csv"), index=False)
|
||||
LabelStd.to_csv(args['study_info'], index=False)
|
||||
Reference in New Issue
Block a user