Commit earlier refactoring

This commit is contained in:
2024-07-29 11:44:45 -04:00
parent 29cbce0754
commit 527068e683
294 changed files with 5524008 additions and 0 deletions

View File

@@ -0,0 +1,130 @@
#!/usr/bin/env python
"""
Improved code to determine the origin column (OligCol) without user input of the argument
and removed the former sys.argv[2]. JWR 22_0816
"""
"""
this code can be used for the parse of the REMc "-finalTable.csv" output file
to make a series of subdatasets, which reflect the pedigree structure
of the way cluters breaking up.
"""
import sys, os, string, glob
try:
data_file_Path = sys.argv[1]
#print data_file_Path
#cluster_ori_col_num = sys.argv[2]
output_path = sys.argv[2]
except:
print ('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py /datasetPath/datasetfilename cluster_origin_column_num output_path_name')
print ('Data file not found')
sys.exit(1)
#define a function to reading files and generate the list
def read_file(file_path):
with open(file_path, 'r') as file:
attributes = file.readline().strip().split(',')
gene_list = [elements[1] for line in file for elements in [line.strip().split(',')]]
return gene_list
# define a function to write the list into a file named in hierarchical series
def write_cluster_orf_list(orf_list, output_dir, cluster_name):
cluster_file_path = os.path.join(output_dir, f"cluster_name, 'txt')
with open(cluster_file_path, 'w') as outfile:
for orf in orf_list:
outfile.write(orf.strip() + '\n')
# define a function to write the clusters information into a series of files
def write_cluster_results(attributes, orf_list, data_dict, output_directory, cluster_name):
file_path = os.path.join(output_directory, f"{cluster_name}-finaltable.csv")
with open(file_path, 'w') as output_file:
output_file.write(attributes)
output_file.write('\n')
for orf in orf_list:
output_file.write(data_dict[orf.strip()].strip())
output_file.write('\n')
# define a function to write the cluster name origina extensive final table
def write_extended_final_table(attributes, data, ori_name_column_number, output_directory, output_file_name):
output_file_path = os.path.join(output_directory, f"{output_file_name}-oriExtFinalTable.csv")
with open(output_file_path, 'w') as output_file:
output_file.write(attributes)
output_file.write('\n')
for orf in data:
elements = data[orf].split(',')
ori_name_list = elements[int(ori_name_column_number)-1].strip().split(';')
for ori_name in ori_name_list:
elements.append(orii_name.strip())
output_file.write(','.join(elements))
output_file.write('\n')
# Read the data file
try:
data = open(data_file_Path,'r')
except OSError:
print ('input file does not exists')
# first the title line would be read and kept
attributes = data.readline().strip().split(',')
print(attributes)
print(len(attributes))
OrigCol= len(attributes) - 1
print(OrigCol)
# then the data
data_dict = {}
for data_line in data:
data_line = data_line.strip()
line_elements = data_line.split(',')
orf_identifier = line_elements[1].strip().upper()
data_dict[orf_identifier] = ','.join(line_elements).upper()
data.close()
#print dataDic
print ("OrigCol is ", str(OrigCol))
fileDic = {}
for orf in dataDic:
line = dataDic[orf].split(',')
#read the cluster name len(attributes)
clusterOrigin = line[int(OrigCol) - 1]
#clusterOrigin = line[int(cluster_ori_col_num) - 1]
#print clusterOrigin
clusterOrigin = clusterOrigin.strip()
#print clusterOrigin
clusterIdentifier = clusterOrigin.split(';')[0:-1]
#print clusterIdentifier
for identifier in clusterIdentifier:
identifier = identifier.strip()
upper_identifier = identifier.upper()
if upper_identifier not in fileDic:
fileDic[upper_identifier] = line[1]
else:
fileDic[upper_identifier] += ',' + line[1]
input_file_identifier = data_file_Path.strip().split('/')[-1].strip().split('.csv')[-3]
#make the output folder
try:
os.mkdir(str(output_path)+str(input_file_identifier))
except OSError:
print ('dir exists')
#Writing the extensive ori name finaltable
Writing_ext_final_table(attributeLine, dataDic,str(OrigCol),str(output_path)+str(input_file_identifier), str(input_file_identifier))
#Writing_ext_final_table(attributeLine, dataDic,cluster_ori_col_num,str(output_path)+str(input_file_identifier), str(input_file_identifier))
#write the genelist files
for cluster_name in fileDic:
#print fileDic[cluster_name].split(',')
Writing_clusterORF_list(fileDic[cluster_name].split(','), str(output_path)+str(input_file_identifier), cluster_name)
#write the cluster result files
Writing_cluster_results(attributeLine, fileDic[cluster_name].split(','), dataDic,str(output_path)+str(input_file_identifier),cluster_name)

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python
# This code is to concatenate the batch GO Term Finder results (.tsv) generated from batch GTF perl code(Chris Johnson, U of Tulsa) into a list table
import os
import glob
def list_files(directory):
"""Return a list of all files in the given directory."""
return glob.glob(os.path.join(directory, '*.txt.tsv'))
def concatenate_gtf_results(data_dir, output_file):
"""Concatenate the GTF results into a single file."""
output = open(output_file, 'w')
files = list_files(data_dir)
files.sort()
for file_path in files:
file_name = os.path.basename(file_path).rstrip('.txt.tsv')
with open(file_path, 'r') as f:
labels = f.readline().strip().split('\t')
output.write('\t'.join(labels) + '\n')
for line in f:
line = line.strip().strip('\t')
if line:
output.write(line + '\n')
output.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: python Concatenate_GTF_results.py data_dir output_file')
sys.exit(1)
data_dir = sys.argv[1]
output_file = sys.argv[2]
concatenate_gtf_results(data_dir, output_file)
# Old version
# def list_files(directory):
# """Return a list of all files in the given directory."""
# return glob.glob(os.path.join(directory, '*.txt.tsv'))
# try:
# data_file_Path = sys.argv[1]
# output_file_Path = sys.argv[2]
# except:
# print ('Usage: python Concatenate_GTF_results.py /datasetPath /outputFilePath_and_Name')
# print ('Data file not found, error in given directory')
# sys.exit(1)
# try:
# output = open(output_file_Path, 'w')
# except OSError:
# print ('output file error')
# # get all the GTF result files in given directory
# File_list = []
# File_list = list_files(data_file_Path)
# File_list.sort()
# i = 0
# for file in File_list:
# #parse the file names given in absolute path
# file_name = file.strip().split('/')[-1]
# file_name = file_name.rstrip('.txt.tsv')
# # function to read tsv files from a given directory
# #open the file
# data = open(file,'r')
# #reading the label line
# labelLine = data.readline()
# label = labelLine.strip().split('\t')
# #write the label
# #updates2010July26: update following label writing code
# if i == 0:
# # output.write('cluster origin')
# for element in label:
# output.write(element)
# output.write('\t')
# i = i + 1
# #updates2010July26 End
# #switch to the next line
# output.write('\n')
# #read the GO terms
# GOTermLines = data.readlines()
# for GOTerm in GOTermLines:
# GOTerm = GOTerm.strip().strip('\t')
# if GOTerm != '':
# #updates2010July26: remove the code to write the first column 'REMc cluster ID'
# #output.write(file_name)
# #output.write('\t')
# ##updates2010July26 update end
# output.write(GOTerm + '\n')
# #output.write('\n')
# output.close()

View File

@@ -0,0 +1,131 @@
#!/usr/bin/env python
"""
Reads the REMc "-finalTable.csv" output file and makes a series of subdatasets
that reflect the pedigree structure of the way clusters break up.
"""
import sys
import os
import string
import glob
def reading_single_file(file_path):
"""
Reads a file and generates a list of gene names.
"""
with open(file_path, 'r') as data:
attribute_line = data.readline().strip()
attributes = attribute_line.split(',')
gene_list = []
for dataline in data:
dataline = dataline.strip()
elements = dataline.split(',')
gene_list.append(elements[1])
return gene_list
def writing_cluster_orf_list(list, output_dir, real_cluster_ori_name):
"""
Writes a list of ORF names into a file in hierarchical series.
"""
outfile_path = os.path.join(output_dir, f"{real_cluster_ori_name}.txt")
with open(outfile_path, 'w') as outfile:
for orf in list:
outfile.write(orf.strip())
outfile.write('\n')
def writing_cluster_results(attributes, orf_list, dic, output_dir, real_cluster_ori_name):
"""
Writes clusters information into a series of files.
"""
outfile_path = os.path.join(output_dir, f"{real_cluster_ori_name}-finaltable.csv")
with open(outfile_path, 'w') as outfile:
outfile.write(attributes)
outfile.write('\n')
for orf in orf_list:
outfile.write(dic[orf.strip()].strip())
outfile.write('\n')
def writing_ext_final_table(attributes, dic, ori_name_col_num, output_dir, output_file_name):
"""
Writes the cluster name extensive final table.
"""
outfile_path = os.path.join(output_dir, f"{output_file_name}-oriExtFinalTable.csv")
with open(outfile_path, 'w') as outfile:
outfile.write(attributes)
outfile.write('\n')
for orf in dic:
elements = dic[orf].split(',')
ori_name_list = elements[int(ori_name_col_num) - 1].strip().split(';')
for ori_name in ori_name_list:
elements.append(ori_name.strip())
outfile.write(','.join(elements))
outfile.write('\n')
def main():
"""
Main function to parse the REMc -finalTable.csv output file.
"""
try:
data_file_path = sys.argv[1]
output_path = sys.argv[2]
except IndexError:
print('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py '
'/datasetPath/datasetfilename cluster_origin_column_num output_path_name')
print('Data file not found')
sys.exit(1)
try:
with open(data_file_path, 'r') as data:
attribute_line = data.readline().strip()
attributes = attribute_line.split(',')
orig_col = len(attributes) - 1
data_dict = {}
for dataline in data:
dataline = dataline.strip()
elements = dataline.split(',')
data_dict[str.upper(elements[1].strip())] = ','.join(elements).upper()
except FileNotFoundError:
print('Input file does not exist')
sys.exit(1)
file_dict = {}
for orf in data_dict:
line = data_dict[orf].split(',')
cluster_origin = line[int(orig_col) - 1].strip()
cluster_identifier = cluster_origin.split(';')[0:-1]
for i, identifier in enumerate(cluster_identifier):
identifier = identifier.strip()
if identifier not in file_dict:
file_dict[identifier] = line[1]
else:
file_dict[identifier] = f"{file_dict[identifier]},{line[1]}"
input_file_identifier = os.path.basename(data_file_path).split('.csv')[0]
output_dir = os.path.join(output_path, input_file_identifier)
os.makedirs(output_dir, exist_ok=True)
# Writing the extensive ori name finaltable
writing_ext_final_table(attribute_line, data_dict, orig_col, output_dir, input_file_identifier)
# Writing the genelist files
for cluster_name in file_dict:
writing_cluster_orf_list(file_dict[cluster_name].split(','), output_dir, cluster_name)
# Writing the cluster result files
writing_cluster_results(attribute_line, file_dict[cluster_name].split(','), data_dict,
output_dir, cluster_name)
if __name__ == '__main__':
main()