DconJG2.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #!/usr/bin/env python
  2. """
  3. Improved code to determine the origin column (OligCol) without user input of the argument
  4. and removed the former sys.argv[2]. JWR 22_0816
  5. """
  6. """
  7. this code can be used for the parse of the REMc "-finalTable.csv" output file
  8. to make a series of subdatasets, which reflect the pedigree structure
  9. of the way cluters breaking up.
  10. """
  11. import sys, os, string, glob
  12. try:
  13. data_file_Path = sys.argv[1]
  14. #print data_file_Path
  15. #cluster_ori_col_num = sys.argv[2]
  16. output_path = sys.argv[2]
  17. except:
  18. print ('Usage: python parse_clustering_result_to_Pedigree_Dataset_and_genelist.py /datasetPath/datasetfilename cluster_origin_column_num output_path_name')
  19. print ('Data file not found')
  20. sys.exit(1)
  21. #define a function to reading files and generate the list
  22. def read_file(file_path):
  23. with open(file_path, 'r') as file:
  24. attributes = file.readline().strip().split(',')
  25. gene_list = [elements[1] for line in file for elements in [line.strip().split(',')]]
  26. return gene_list
  27. # define a function to write the list into a file named in hierarchical series
  28. def write_cluster_orf_list(orf_list, output_dir, cluster_name):
  29. cluster_file_path = os.path.join(output_dir, f"cluster_name, 'txt')
  30. with open(cluster_file_path, 'w') as outfile:
  31. for orf in orf_list:
  32. outfile.write(orf.strip() + '\n')
  33. # define a function to write the clusters information into a series of files
  34. def write_cluster_results(attributes, orf_list, data_dict, output_directory, cluster_name):
  35. file_path = os.path.join(output_directory, f"{cluster_name}-finaltable.csv")
  36. with open(file_path, 'w') as output_file:
  37. output_file.write(attributes)
  38. output_file.write('\n')
  39. for orf in orf_list:
  40. output_file.write(data_dict[orf.strip()].strip())
  41. output_file.write('\n')
  42. # define a function to write the cluster name origina extensive final table
  43. def write_extended_final_table(attributes, data, ori_name_column_number, output_directory, output_file_name):
  44. output_file_path = os.path.join(output_directory, f"{output_file_name}-oriExtFinalTable.csv")
  45. with open(output_file_path, 'w') as output_file:
  46. output_file.write(attributes)
  47. output_file.write('\n')
  48. for orf in data:
  49. elements = data[orf].split(',')
  50. ori_name_list = elements[int(ori_name_column_number)-1].strip().split(';')
  51. for ori_name in ori_name_list:
  52. elements.append(orii_name.strip())
  53. output_file.write(','.join(elements))
  54. output_file.write('\n')
  55. # Read the data file
  56. try:
  57. data = open(data_file_Path,'r')
  58. except OSError:
  59. print ('input file does not exists')
  60. # first the title line would be read and kept
  61. attributes = data.readline().strip().split(',')
  62. print(attributes)
  63. print(len(attributes))
  64. OrigCol= len(attributes) - 1
  65. print(OrigCol)
  66. # then the data
  67. data_dict = {}
  68. for data_line in data:
  69. data_line = data_line.strip()
  70. line_elements = data_line.split(',')
  71. orf_identifier = line_elements[1].strip().upper()
  72. data_dict[orf_identifier] = ','.join(line_elements).upper()
  73. data.close()
  74. #print dataDic
  75. print ("OrigCol is ", str(OrigCol))
  76. fileDic = {}
  77. for orf in dataDic:
  78. line = dataDic[orf].split(',')
  79. #read the cluster name len(attributes)
  80. clusterOrigin = line[int(OrigCol) - 1]
  81. #clusterOrigin = line[int(cluster_ori_col_num) - 1]
  82. #print clusterOrigin
  83. clusterOrigin = clusterOrigin.strip()
  84. #print clusterOrigin
  85. clusterIdentifier = clusterOrigin.split(';')[0:-1]
  86. #print clusterIdentifier
  87. for identifier in clusterIdentifier:
  88. identifier = identifier.strip()
  89. upper_identifier = identifier.upper()
  90. if upper_identifier not in fileDic:
  91. fileDic[upper_identifier] = line[1]
  92. else:
  93. fileDic[upper_identifier] += ',' + line[1]
  94. input_file_identifier = data_file_Path.strip().split('/')[-1].strip().split('.csv')[-3]
  95. #make the output folder
  96. try:
  97. os.mkdir(str(output_path)+str(input_file_identifier))
  98. except OSError:
  99. print ('dir exists')
  100. #Writing the extensive ori name finaltable
  101. Writing_ext_final_table(attributeLine, dataDic,str(OrigCol),str(output_path)+str(input_file_identifier), str(input_file_identifier))
  102. #Writing_ext_final_table(attributeLine, dataDic,cluster_ori_col_num,str(output_path)+str(input_file_identifier), str(input_file_identifier))
  103. #write the genelist files
  104. for cluster_name in fileDic:
  105. #print fileDic[cluster_name].split(',')
  106. Writing_clusterORF_list(fileDic[cluster_name].split(','), str(output_path)+str(input_file_identifier), cluster_name)
  107. #write the cluster result files
  108. Writing_cluster_results(attributeLine, fileDic[cluster_name].split(','), dataDic,str(output_path)+str(input_file_identifier),cluster_name)