171 lines
6.0 KiB
Python
171 lines
6.0 KiB
Python
#!/usr/bin/env python
|
|
"""
|
|
Improved code to determine the origin column (OligCol) without user input of the argument
|
|
and removed the former sys.argv[2]. JWR 22_0816
|
|
"""
|
|
"""
|
|
this code can be used for the parse of the REMc "-finalTable.csv" output file
|
|
to make a series of subdatasets, which reflect the pedigree structure
|
|
of the way cluters breaking up.
|
|
"""
|
|
|
|
import sys, os, string, glob
|
|
|
|
#define a function to reading files and generate the list
|
|
|
|
def Reading_single_file(file_path):
|
|
# open the file
|
|
data = open(file_path,'r')
|
|
|
|
# reading the label lines
|
|
attributeLine = data.readline()
|
|
attributeLine = attributeLine.strip()
|
|
attributes = attributeLine.split(',')
|
|
|
|
gene_list=[]
|
|
# reading the data lines
|
|
for dataline in data.readlines():
|
|
dataline = dataline.strip()
|
|
Elements = dataline.split(',')
|
|
gene_list.append(Elements[1])
|
|
data.close()
|
|
return gene_list
|
|
|
|
# define a function to write the list into a file named in hierarchical series
|
|
def Writing_clusterORF_list(list, output_dir, real_cluster_ori_name):
|
|
# in Linux
|
|
outfile = open(output_dir + '/' + str(real_cluster_ori_name) + '.txt', 'w')
|
|
# in Windows
|
|
# outfile = open(output_dir + '\\' + str(real_cluster_ori_name) + '.txt', 'w')
|
|
for orf in list:
|
|
outfile.write(orf.strip(''))
|
|
outfile.write('\n')
|
|
outfile.close()
|
|
|
|
|
|
# define a function to write the clusters information into a series of files
|
|
def Writing_cluster_results(attributes, orf_list,dic, output_dir, real_cluster_ori_name):
|
|
# in Linux
|
|
outfile = open(output_dir + '/' + str(real_cluster_ori_name) + '-finaltable.csv', 'w')
|
|
# in Windows
|
|
# outfile = open(output_dir + '\\' + str(real_cluster_ori_name) + '-finaltable.csv', 'w')
|
|
outfile.write(attributes)
|
|
outfile.write('\n')
|
|
for orf in orf_list:
|
|
outfile.write(dic[orf.strip('')].strip(''))
|
|
outfile.write('\n')
|
|
outfile.close()
|
|
|
|
# define a function to write the cluster name origina extensive final table
|
|
def Writing_ext_final_table(attributes, dic,ori_name_col_num, output_dir, output_file_name):
|
|
# in Linux
|
|
outfile = open(str(output_dir) + '/'+ str(output_file_name) + '-oriExtFinalTable.csv', 'w')
|
|
# in Windows
|
|
#outfile = open(str(output_dir) + '\\'+ str(output_file_name) + '-oriExtFinalTable.csv', 'w')
|
|
outfile.write(attributes)
|
|
outfile.write('\n')
|
|
for orf in dic:
|
|
elements = dic[orf].split(',')
|
|
ori_name_list = elements[int(ori_name_col_num)-1].strip().split(';')
|
|
for ori_name in ori_name_list:
|
|
elements.append(ori_name.strip())
|
|
outfile.write(str(','.join(elements)))
|
|
outfile.write('\n')
|
|
outfile.close()
|
|
|
|
#*************************************************************************************************
|
|
# Main function
|
|
#*************************************************************************************************
|
|
|
|
try:
|
|
data_file_Path = sys.argv[1]
|
|
#print data_file_Path
|
|
#cluster_ori_col_num = sys.argv[2]
|
|
output_path = sys.argv[2]
|
|
except:
|
|
print ('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py /datasetPath/datasetfilename cluster_origin_column_num output_path_name')
|
|
print ('Data file not found')
|
|
sys.exit(1)
|
|
|
|
#Reading the final table
|
|
# read the file
|
|
try:
|
|
data = open(data_file_Path,'r')
|
|
except OSError:
|
|
print ('input file does not exists')
|
|
|
|
|
|
# first the title line would be read and kept
|
|
attributeLine = data.readline()
|
|
attributeLine = attributeLine.strip()
|
|
attributes = attributeLine.split(',')
|
|
print(attributes)
|
|
print(len(attributes))
|
|
OrigCol= len(attributes) - 1
|
|
print(OrigCol)
|
|
|
|
# then the data
|
|
dataDic = {}
|
|
for dataline in data.readlines():
|
|
# read the data by line and then add it into a dictionary
|
|
dataline = dataline.strip()
|
|
line = dataline.strip().split(',')
|
|
#use the only ORF column as the dataset dic ID
|
|
dataDic[str.upper(line[1].strip())] = ','.join(line).upper()
|
|
#dataDic[string.upper(line[1].strip())] = string.upper(','.join(line))
|
|
#return the dataset and close the data reading
|
|
data.close()
|
|
#print dataDic
|
|
|
|
|
|
print ("OrigCol is ", str(OrigCol))
|
|
fileDic = {}
|
|
for orf in dataDic:
|
|
line = dataDic[orf].split(',')
|
|
#read the cluster name len(attributes)
|
|
clusterOrigin = line[int(OrigCol) - 1]
|
|
|
|
#clusterOrigin = line[int(cluster_ori_col_num) - 1]
|
|
#print clusterOrigin
|
|
clusterOrigin = clusterOrigin.strip()
|
|
#print clusterOrigin
|
|
clusterIdentifier = clusterOrigin.split(';')[0:-1]
|
|
#print clusterIdentifier
|
|
|
|
for i in range(len(clusterIdentifier)):
|
|
#feed the file name dic
|
|
#print string.upper(clusterIdentifier[i].strip())
|
|
if str.upper(clusterIdentifier[i].strip()) not in fileDic:
|
|
fileDic[str.upper(clusterIdentifier[i].strip())]= line[1]
|
|
else:
|
|
fileDic[str.upper(clusterIdentifier[i].strip())]= fileDic.get(str.upper(clusterIdentifier[i].strip()))+ ',' + line[1]
|
|
|
|
#print str(i) + ':' + string.upper(clusterIdentifier[i].strip()) + ':'+ line[1]
|
|
|
|
#print fileDic
|
|
|
|
|
|
# parse the input file name
|
|
# in Linux
|
|
input_file_identifier = data_file_Path.strip().split('/')[-1].strip().split('.csv')[-3]
|
|
|
|
#make the output folder
|
|
try:
|
|
os.mkdir(str(output_path)+str(input_file_identifier))
|
|
except OSError:
|
|
|
|
|
|
print ('dir exists')
|
|
|
|
#Writing the extensive ori name finaltable
|
|
Writing_ext_final_table(attributeLine, dataDic,str(OrigCol),str(output_path)+str(input_file_identifier), str(input_file_identifier))
|
|
#Writing_ext_final_table(attributeLine, dataDic,cluster_ori_col_num,str(output_path)+str(input_file_identifier), str(input_file_identifier))
|
|
|
|
#write the genelist files
|
|
for cluster_name in fileDic:
|
|
#print fileDic[cluster_name].split(',')
|
|
Writing_clusterORF_list(fileDic[cluster_name].split(','), str(output_path)+str(input_file_identifier), cluster_name)
|
|
#write the cluster result files
|
|
Writing_cluster_results(attributeLine, fileDic[cluster_name].split(','), dataDic,str(output_path)+str(input_file_identifier),cluster_name)
|
|
|