You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

369 lines
13 KiB

  1. /*
  2. This file is part of BioD.
  3. Copyright (C) 2012 Artem Tarasov <lomereiter@gmail.com>
  4. Permission is hereby granted, free of charge, to any person obtaining a
  5. copy of this software and associated documentation files (the "Software"),
  6. to deal in the Software without restriction, including without limitation
  7. the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. and/or sell copies of the Software, and to permit persons to whom the
  9. Software is furnished to do so, subject to the following conditions:
  10. The above copyright notice and this permission notice shall be included in
  11. all copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  17. FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  18. DEALINGS IN THE SOFTWARE.
  19. */
  20. module bio.bam.utils.samheadermerger;
  21. import bio.sam.header;
  22. import bio.bam.validation.samheader;
  23. import std.array;
  24. import std.range;
  25. import std.algorithm;
  26. import std.conv;
  27. import std.typecons;
  28. import std.exception;
  29. import bio.bam.utils.graph;
  30. /// Class encapsulating functionality of merging several SAM headers
  31. /// into one. (In fact, its role is to just group several variables,
  32. /// so it could be replaced by a function returning a struct.)
  33. class SamHeaderMerger {
  34. /// Takes array of SAM headers as an input.
  35. this(SamHeader[] headers, bool validate_headers=false) {
  36. _headers = headers;
  37. _len = _headers.length;
  38. merged_header = new SamHeader();
  39. ref_id_map = new size_t[size_t][_len];
  40. ref_id_reverse_map = new size_t[size_t][_len];
  41. program_id_map = new string[string][_len];
  42. readgroup_id_map = new string[string][_len];
  43. if (validate_headers) {
  44. // TODO: make custom validator for producing better error message
  45. foreach (size_t i, header; _headers) {
  46. if (!isValid(header)) {
  47. throw new Exception("header #" ~ to!string(i) ~ " is invalid, can't merge");
  48. }
  49. }
  50. }
  51. auto expected = _headers[0].sorting_order;
  52. if (expected != SortingOrder.coordinate && expected != SortingOrder.queryname) {
  53. throw new Exception("file headers indicate that some files are not sorted");
  54. }
  55. foreach (header; _headers) {
  56. if (header.sorting_order != expected) {
  57. throw new Exception("sorting orders of files don't agree, can't merge");
  58. }
  59. }
  60. merged_header.sorting_order = expected;
  61. mergeSequenceDictionaries();
  62. mergeReadGroups();
  63. mergeProgramRecords();
  64. mergeComments();
  65. }
  66. /// The main result of merging -- new SamHeader
  67. SamHeader merged_header;
  68. /// Map: index of SamHeader in input array of headers -> old refID -> new refID
  69. size_t[size_t][] ref_id_map;
  70. /// the same for read group identifiers
  71. string[string][] readgroup_id_map;
  72. /// the same for program record identifiers
  73. string[string][] program_id_map;
  74. /// Map: index of SamHeader in input array of headers -> new refID -> old refID
  75. size_t[size_t][] ref_id_reverse_map;
  76. private:
  77. // NOTE FOR DEVELOPER:
  78. // for more info on what's going on here, read comments in sambamba/sambamba/merge.d
  79. SamHeader[] _headers;
  80. size_t _len; // number of headers
  81. static void addVerticeToDict(ref SqLine[string] dict, ref SqLine line) {
  82. if (line.name in dict) {
  83. if (line.length != dict[line.name].length) {
  84. // those two @SQ lines are highly unlikely to refer to the same
  85. // reference sequence if lengths are different
  86. throw new Exception("can't merge SAM headers: one of references with " ~
  87. "name " ~ line.name ~ " has length " ~
  88. to!string(dict[line.name].length) ~
  89. " while another one with the same name has length " ~
  90. to!string(line.length));
  91. }
  92. // TODO: merge individual tags?
  93. } else {
  94. dict[line.name] = line;
  95. }
  96. }
  97. void mergeSequenceDictionaries() {
  98. // make a directed graph out of reference sequences and do a topological sort
  99. SqLine[string] dict;
  100. // create a graph
  101. auto g = new DirectedGraph();
  102. foreach (header; _headers) {
  103. auto sequences = header.sequences.values;
  104. auto prev = sequences.front;
  105. addVerticeToDict(dict, prev);
  106. sequences.popFront();
  107. while (!sequences.empty) {
  108. auto cur = sequences.front;
  109. addVerticeToDict(dict, cur);
  110. g.addEdge(prev.name, cur.name);
  111. prev = cur;
  112. sequences.popFront();
  113. }
  114. }
  115. // get topologically sorted nodes
  116. foreach (v; g.topologicalSort()) {
  117. merged_header.sequences.add(dict[v]);
  118. }
  119. // make mappings
  120. foreach (size_t i, header; _headers) {
  121. foreach (size_t j, SqLine sq; header.sequences) {
  122. auto new_index = merged_header.sequences.getSequenceIndex(sq.name);
  123. ref_id_map[i][j] = to!size_t(new_index);
  124. ref_id_reverse_map[i][to!size_t(new_index)] = j;
  125. }
  126. }
  127. }
  128. // The reason to pass by reference is that when merging program records,
  129. // this function is called in a loop, and we need to keep some structures between calls.
  130. //
  131. // $(D dict) is a collection of Line structs, which will finally be part of the header;
  132. // $(D record_id_map) is an array of mappings (for each header) where old record identifier
  133. // is mapped into a new one;
  134. static void mergeHeaderLines(Line, R)(R records_with_file_ids, size_t file_count,
  135. ref HeaderLineDictionary!Line dict,
  136. ref string[string][] record_id_map)
  137. if (is(typeof(Line.identifier) == string) &&
  138. is(ElementType!R == Tuple!(Line, size_t)) &&
  139. (is(Line == RgLine) || is(Line == PgLine)))
  140. {
  141. // Map: record identifier -> record -> list of files
  142. size_t[][Line][string] id_to_record;
  143. foreach (record_and_file; records_with_file_ids) {
  144. auto rec = record_and_file[0];
  145. auto file_id = record_and_file[1];
  146. id_to_record[rec.identifier][rec] ~= file_id;
  147. }
  148. // Loop through all identifiers
  149. foreach (record_id, records_with_same_id; id_to_record) {
  150. // Several read groups/program records can share the
  151. // common identifier, and each one of them can be
  152. // presented in several files.
  153. //
  154. // If read groups/program records are equal
  155. // (i.e. all fields are equal) then they are treated
  156. // as a single read group/program record
  157. //
  158. // Here we iterate over those read groups/program records
  159. // and files where they were seen, renaming identifiers
  160. // in order to avoid collisions where necessary.
  161. foreach (rec, file_ids; records_with_same_id) {
  162. string new_id = record_id;
  163. if (record_id in dict) {
  164. // if already used ID is encountered,
  165. // find unused ID by adding ".N" to the old ID
  166. for (int i = 1; ; ++i) {
  167. new_id = record_id ~ "." ~ to!string(i);
  168. if (new_id !in dict) {
  169. break;
  170. }
  171. }
  172. }
  173. // save mapping
  174. foreach (file_id; file_ids) {
  175. record_id_map[file_id][record_id] = new_id;
  176. }
  177. // update merged header
  178. rec.identifier = new_id;
  179. dict.add(rec);
  180. }
  181. }
  182. }
  183. void mergeReadGroups() {
  184. Tuple!(RgLine, size_t)[] readgroups_with_file_ids;
  185. for (size_t i = 0; i < _len; i++)
  186. foreach (rg; _headers[i].read_groups.values)
  187. readgroups_with_file_ids ~= tuple(rg, i);
  188. auto dict = new RgLineDictionary();
  189. mergeHeaderLines(readgroups_with_file_ids, _len,
  190. dict, readgroup_id_map);
  191. merged_header.read_groups = dict;
  192. }
  193. void mergeProgramRecords() {
  194. Tuple!(PgLine, size_t)[] programs_with_file_ids;
  195. for (size_t i = 0; i < _len; i++)
  196. foreach (pg; _headers[i].programs.values)
  197. programs_with_file_ids ~= tuple(pg, i);
  198. auto vertices = partition!"a[0].previous_program !is null"(programs_with_file_ids);
  199. programs_with_file_ids = programs_with_file_ids[0 .. $ - vertices.length];
  200. auto dict = new PgLineDictionary();
  201. while (!vertices.empty) {
  202. // populates dict and program_id_map
  203. mergeHeaderLines!PgLine(vertices, _len, dict, program_id_map);
  204. // find children of current vertices
  205. auto old_ids = map!"tuple(a[0].identifier, a[1])"(vertices);
  206. vertices = partition!((Tuple!(PgLine, size_t) a) {
  207. return !canFind(old_ids, tuple(a[0].previous_program, a[1]));
  208. })(programs_with_file_ids);
  209. programs_with_file_ids = programs_with_file_ids[0 .. $ - vertices.length];
  210. // update PP tags in children
  211. foreach (ref pg_with_file_id; vertices) {
  212. auto pg = pg_with_file_id[0];
  213. auto file_id = pg_with_file_id[1];
  214. if (pg.previous_program !is null &&
  215. pg.previous_program in program_id_map[file_id])
  216. {
  217. auto new_id = program_id_map[file_id][pg.previous_program];
  218. if (new_id != pg.previous_program) {
  219. pg.previous_program = new_id;
  220. }
  221. }
  222. pg_with_file_id = tuple(pg, file_id);
  223. }
  224. }
  225. merged_header.programs = dict;
  226. }
  227. void mergeComments() {
  228. merged_header.comments = join(map!"a.comments"(_headers));
  229. }
  230. }
  231. unittest {
  232. import std.stdio;
  233. import std.algorithm;
  234. writeln("Testing SAM header merging...");
  235. auto h1 = new SamHeader();
  236. auto h2 = new SamHeader();
  237. auto h3 = new SamHeader();
  238. h1.sorting_order = SortingOrder.coordinate;
  239. h2.sorting_order = SortingOrder.coordinate;
  240. h3.sorting_order = SortingOrder.coordinate;
  241. // ---- fill reference sequence dictionaries -------------------------------
  242. h1.sequences.add(SqLine("A", 100));
  243. h1.sequences.add(SqLine("B", 200));
  244. h1.sequences.add(SqLine("C", 300));
  245. h2.sequences.add(SqLine("D", 100));
  246. h2.sequences.add(SqLine("B", 200));
  247. h2.sequences.add(SqLine("E", 300));
  248. h3.sequences.add(SqLine("A", 100));
  249. h3.sequences.add(SqLine("E", 300));
  250. h3.sequences.add(SqLine("C", 300));
  251. // expected: A B C
  252. // D E
  253. // ---- add a few read group records ---------------------------------------
  254. h1.read_groups.add(RgLine("A", "CN1"));
  255. h1.read_groups.add(RgLine("C", "CN3"));
  256. h2.read_groups.add(RgLine("B", "CN2"));
  257. h2.read_groups.add(RgLine("C", "CN4"));
  258. h3.read_groups.add(RgLine("B", "CN2"));
  259. h3.read_groups.add(RgLine("A", "CN4"));
  260. // ---- add some program records with a lot of name clashes ----------------
  261. h1.programs.add(PgLine("A", "X")); // .> C
  262. h1.programs.add(PgLine("B", "Y", "", "A")); // /
  263. h1.programs.add(PgLine("C", "Z", "", "B")); // A -> B -> D
  264. h1.programs.add(PgLine("D", "T", "", "B")); //
  265. h2.programs.add(PgLine("B", "Z")); // B -> A -> C
  266. h2.programs.add(PgLine("A", "Y", "", "B"));
  267. h2.programs.add(PgLine("C", "T", "", "A"));
  268. h3.programs.add(PgLine("D", "Y")); // D -> C -> B
  269. h3.programs.add(PgLine("C", "T", "", "D"));
  270. h3.programs.add(PgLine("B", "X", "", "C"));
  271. // expected result:
  272. //
  273. // .> C.1
  274. // /
  275. // A -> B.1 -> D.1
  276. //
  277. // B -> A.1 -> C.2
  278. //
  279. // D -> C -> B.2
  280. // ---- add some comments - just for the sake of completeness --------------
  281. h1.comments ~= "abc";
  282. h2.comments ~= ["def", "ghi"];
  283. // ------------------ merge these three headers ----------------------------
  284. auto merger = new SamHeaderMerger([h1, h2, h3]);
  285. auto h = merger.merged_header;
  286. assert(h.sorting_order == SortingOrder.coordinate);
  287. assert(equal(h.sequences.values,
  288. [SqLine("A", 100), SqLine("D", 100), SqLine("B", 200),
  289. SqLine("E", 300), SqLine("C", 300)]));
  290. assert(h.comments == ["abc", "def", "ghi"]);
  291. assert(equal(sort(array(map!"a.identifier"(h.programs.values))),
  292. ["A", "A.1", "B", "B.1", "B.2", "C", "C.1", "C.2", "D", "D.1"]));
  293. assert(equal(sort(array(map!"a.identifier"(h.read_groups.values))),
  294. ["A", "A.1", "B", "C", "C.1"]));
  295. }