132 lines
4.3 KiB
Python
132 lines
4.3 KiB
Python
#!/usr/bin/env python
|
|
"""
|
|
Reads the REMc "-finalTable.csv" output file and makes a series of subdatasets
|
|
that reflect the pedigree structure of the way clusters break up.
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import string
|
|
import glob
|
|
|
|
|
|
def reading_single_file(file_path):
|
|
"""
|
|
Reads a file and generates a list of gene names.
|
|
"""
|
|
with open(file_path, 'r') as data:
|
|
attribute_line = data.readline().strip()
|
|
attributes = attribute_line.split(',')
|
|
|
|
gene_list = []
|
|
for dataline in data:
|
|
dataline = dataline.strip()
|
|
elements = dataline.split(',')
|
|
gene_list.append(elements[1])
|
|
|
|
return gene_list
|
|
|
|
|
|
def writing_cluster_orf_list(list, output_dir, real_cluster_ori_name):
|
|
"""
|
|
Writes a list of ORF names into a file in hierarchical series.
|
|
"""
|
|
outfile_path = os.path.join(output_dir, f"{real_cluster_ori_name}.txt")
|
|
with open(outfile_path, 'w') as outfile:
|
|
for orf in list:
|
|
outfile.write(orf.strip())
|
|
outfile.write('\n')
|
|
|
|
|
|
def writing_cluster_results(attributes, orf_list, dic, output_dir, real_cluster_ori_name):
|
|
"""
|
|
Writes clusters information into a series of files.
|
|
"""
|
|
outfile_path = os.path.join(output_dir, f"{real_cluster_ori_name}-finaltable.csv")
|
|
with open(outfile_path, 'w') as outfile:
|
|
outfile.write(attributes)
|
|
outfile.write('\n')
|
|
for orf in orf_list:
|
|
outfile.write(dic[orf.strip()].strip())
|
|
outfile.write('\n')
|
|
|
|
|
|
def writing_ext_final_table(attributes, dic, ori_name_col_num, output_dir, output_file_name):
|
|
"""
|
|
Writes the cluster name extensive final table.
|
|
"""
|
|
outfile_path = os.path.join(output_dir, f"{output_file_name}-oriExtFinalTable.csv")
|
|
with open(outfile_path, 'w') as outfile:
|
|
outfile.write(attributes)
|
|
outfile.write('\n')
|
|
for orf in dic:
|
|
elements = dic[orf].split(',')
|
|
ori_name_list = elements[int(ori_name_col_num) - 1].strip().split(';')
|
|
for ori_name in ori_name_list:
|
|
elements.append(ori_name.strip())
|
|
outfile.write(','.join(elements))
|
|
outfile.write('\n')
|
|
|
|
|
|
def main():
|
|
"""
|
|
Main function to parse the REMc -finalTable.csv output file.
|
|
"""
|
|
try:
|
|
data_file_path = sys.argv[1]
|
|
output_path = sys.argv[2]
|
|
except IndexError:
|
|
print('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py '
|
|
'/datasetPath/datasetfilename cluster_origin_column_num output_path_name')
|
|
print('Data file not found')
|
|
sys.exit(1)
|
|
|
|
try:
|
|
with open(data_file_path, 'r') as data:
|
|
attribute_line = data.readline().strip()
|
|
attributes = attribute_line.split(',')
|
|
orig_col = len(attributes) - 1
|
|
|
|
data_dict = {}
|
|
for dataline in data:
|
|
dataline = dataline.strip()
|
|
elements = dataline.split(',')
|
|
data_dict[str.upper(elements[1].strip())] = ','.join(elements).upper()
|
|
|
|
except FileNotFoundError:
|
|
print('Input file does not exist')
|
|
sys.exit(1)
|
|
|
|
file_dict = {}
|
|
for orf in data_dict:
|
|
line = data_dict[orf].split(',')
|
|
cluster_origin = line[int(orig_col) - 1].strip()
|
|
cluster_identifier = cluster_origin.split(';')[0:-1]
|
|
|
|
for i, identifier in enumerate(cluster_identifier):
|
|
identifier = identifier.strip()
|
|
if identifier not in file_dict:
|
|
file_dict[identifier] = line[1]
|
|
else:
|
|
file_dict[identifier] = f"{file_dict[identifier]},{line[1]}"
|
|
|
|
input_file_identifier = os.path.basename(data_file_path).split('.csv')[0]
|
|
output_dir = os.path.join(output_path, input_file_identifier)
|
|
os.makedirs(output_dir, exist_ok=True)
|
|
|
|
# Writing the extensive ori name finaltable
|
|
writing_ext_final_table(attribute_line, data_dict, orig_col, output_dir, input_file_identifier)
|
|
|
|
# Writing the genelist files
|
|
for cluster_name in file_dict:
|
|
writing_cluster_orf_list(file_dict[cluster_name].split(','), output_dir, cluster_name)
|
|
|
|
# Writing the cluster result files
|
|
writing_cluster_results(attribute_line, file_dict[cluster_name].split(','), data_dict,
|
|
output_dir, cluster_name)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|
|
|