This file is indexed.

/usr/share/pyshared/Bio/GenBank/Scanner.py is in python-biopython 1.58-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
# Copyright 2007-2010 by Peter Cock.  All rights reserved.
# Revisions copyright 2010 by Uri Laserson.  All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license.  Please see the LICENSE file that should have been included
# as part of this package.
#
# This code is NOT intended for direct use.  It provides a basic scanner
# (for use with a event consumer such as Bio.GenBank._FeatureConsumer)
# to parse a GenBank or EMBL file (with their shared INSDC feature table).
#
# It is used by Bio.GenBank to parse GenBank files
# It is also used by Bio.SeqIO to parse GenBank and EMBL files
#
# Feature Table Documentation:
# http://www.insdc.org/files/feature_table.html
# http://www.ncbi.nlm.nih.gov/projects/collab/FT/index.html
# ftp://ftp.ncbi.nih.gov/genbank/docs/
#
# 17-MAR-2009: added wgs, wgs_scafld for GenBank whole genome shotgun master records.
# These are GenBank files that summarize the content of a project, and provide lists of
# scaffold and contig files in the project. These will be in annotations['wgs'] and
# annotations['wgs_scafld']. These GenBank files do not have sequences. See
# http://groups.google.com/group/bionet.molbio.genbank/browse_thread/thread/51fb88bf39e7dc36
# http://is.gd/nNgk
# for more details of this format, and an example.
# Added by Ying Huang & Iddo Friedberg

import warnings
import os
import re
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_alphabet, generic_protein

class InsdcScanner(object):
    """Basic functions for breaking up a GenBank/EMBL file into sub sections.

    The International Nucleotide Sequence Database Collaboration (INSDC)
    between the DDBJ, EMBL, and GenBank.  These organisations all use the
    same "Feature Table" layout in their plain text flat file formats.

    However, the header and sequence sections of an EMBL file are very
    different in layout to those produced by GenBank/DDBJ."""

    #These constants get redefined with sensible values in the sub classes:
    RECORD_START = "XXX"  # "LOCUS       " or "ID   "
    HEADER_WIDTH = 3   # 12 or 5
    FEATURE_START_MARKERS = ["XXX***FEATURES***XXX"]
    FEATURE_END_MARKERS = ["XXX***END FEATURES***XXX"]
    FEATURE_QUALIFIER_INDENT = 0
    FEATURE_QUALIFIER_SPACER = ""
    SEQUENCE_HEADERS=["XXX"] #with right hand side spaces removed

    def __init__(self, debug=0):
        assert len(self.RECORD_START)==self.HEADER_WIDTH
        for marker in self.SEQUENCE_HEADERS:
            assert marker==marker.rstrip()
        assert len(self.FEATURE_QUALIFIER_SPACER)==self.FEATURE_QUALIFIER_INDENT
        self.debug = debug
        self.line = None

    def set_handle(self, handle):
        self.handle = handle
        self.line = ""

    def find_start(self):
        """Read in lines until find the ID/LOCUS line, which is returned.
        
        Any preamble (such as the header used by the NCBI on *.seq.gz archives)
        will we ignored."""
        while True:
            if self.line:
                line = self.line
                self.line = ""
            else:
                line = self.handle.readline()
            if not line:
                if self.debug : print "End of file"
                return None
            if line[:self.HEADER_WIDTH]==self.RECORD_START:
                if self.debug > 1: print "Found the start of a record:\n" + line
                break
            line = line.rstrip()
            if line == "//":
                if self.debug > 1: print "Skipping // marking end of last record"
            elif line == "":
                if self.debug > 1: print "Skipping blank line before record"
            else:
                #Ignore any header before the first ID/LOCUS line.
                if self.debug > 1:
                        print "Skipping header line before record:\n" + line
        self.line = line
        return line

    def parse_header(self):
        """Return list of strings making up the header

        New line characters are removed.

        Assumes you have just read in the ID/LOCUS line.
        """
        assert self.line[:self.HEADER_WIDTH]==self.RECORD_START, \
               "Not at start of record"
        
        header_lines = []
        while True:
            line = self.handle.readline()
            if not line:
                raise ValueError("Premature end of line during sequence data")
            line = line.rstrip()
            if line in self.FEATURE_START_MARKERS:
                if self.debug : print "Found header table"
                break
            #if line[:self.HEADER_WIDTH]==self.FEATURE_START_MARKER[:self.HEADER_WIDTH]:
            #    if self.debug : print "Found header table (?)"
            #    break
            if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
                if self.debug : print "Found start of sequence"
                break
            if line == "//":
                raise ValueError("Premature end of sequence data marker '//' found")
            header_lines.append(line)
        self.line = line
        return header_lines

    def parse_features(self, skip=False):
        """Return list of tuples for the features (if present)

        Each feature is returned as a tuple (key, location, qualifiers)
        where key and location are strings (e.g. "CDS" and
        "complement(join(490883..490885,1..879))") while qualifiers
        is a list of two string tuples (feature qualifier keys and values).

        Assumes you have already read to the start of the features table.
        """
        if self.line.rstrip() not in self.FEATURE_START_MARKERS:
            if self.debug : print "Didn't find any feature table"
            return []
        
        while self.line.rstrip() in self.FEATURE_START_MARKERS:
            self.line = self.handle.readline()

        features = []
        line = self.line
        while True:
            if not line:
                raise ValueError("Premature end of line during features table")
            if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
                if self.debug : print "Found start of sequence"
                break
            line = line.rstrip()
            if line == "//":
                raise ValueError("Premature end of features table, marker '//' found")
            if line in self.FEATURE_END_MARKERS:
                if self.debug : print "Found end of features"
                line = self.handle.readline()
                break
            if line[2:self.FEATURE_QUALIFIER_INDENT].strip() == "":
                #This is an empty feature line between qualifiers. Empty
                #feature lines within qualifiers are handled below (ignored).
                line = self.handle.readline()
                continue
            
            if skip:
                line = self.handle.readline()
                while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER:
                    line = self.handle.readline()
            else:
                #Build up a list of the lines making up this feature:
                if line[self.FEATURE_QUALIFIER_INDENT]!=" " \
                and " " in line[self.FEATURE_QUALIFIER_INDENT:]:
                    #The feature table design enforces a length limit on the feature keys.
                    #Some third party files (e.g. IGMT's EMBL like files) solve this by
                    #over indenting the location and qualifiers.
                    feature_key, line = line[2:].strip().split(None,1)
                    feature_lines = [line]
                    warnings.warn("Overindented %s feature?" % feature_key)
                else:
                    feature_key = line[2:self.FEATURE_QUALIFIER_INDENT].strip()
                    feature_lines = [line[self.FEATURE_QUALIFIER_INDENT:]]
                line = self.handle.readline()
                while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER \
                or line.rstrip() == "" : # cope with blank lines in the midst of a feature
                    #Use strip to remove any harmless trailing white space AND and leading
                    #white space (e.g. out of spec files with too much intentation)
                    feature_lines.append(line[self.FEATURE_QUALIFIER_INDENT:].strip())
                    line = self.handle.readline()
                features.append(self.parse_feature(feature_key, feature_lines))
        self.line = line
        return features

    def parse_feature(self, feature_key, lines):
        """Expects a feature as a list of strings, returns a tuple (key, location, qualifiers)

        For example given this GenBank feature:

             CDS             complement(join(490883..490885,1..879))
                             /locus_tag="NEQ001"
                             /note="conserved hypothetical [Methanococcus jannaschii];
                             COG1583:Uncharacterized ACR; IPR001472:Bipartite nuclear
                             localization signal; IPR002743: Protein of unknown
                             function DUF57"
                             /codon_start=1
                             /transl_table=11
                             /product="hypothetical protein"
                             /protein_id="NP_963295.1"
                             /db_xref="GI:41614797"
                             /db_xref="GeneID:2732620"
                             /translation="MRLLLELKALNSIDKKQLSNYLIQGFIYNILKNTEYSWLHNWKK
                             EKYFNFTLIPKKDIIENKRYYLIISSPDKRFIEVLHNKIKDLDIITIGLAQFQLRKTK
                             KFDPKLRFPWVTITPIVLREGKIVILKGDKYYKVFVKRLEELKKYNLIKKKEPILEEP
                             IEISLNQIKDGWKIIDVKDRYYDFRNKSFSAFSNWLRDLKEQSLRKYNNFCGKNFYFE
                             EAIFEGFTFYKTVSIRIRINRGEAVYIGTLWKELNVYRKLDKEEREFYKFLYDCGLGS
                             LNSMGFGFVNTKKNSAR"

        Then should give input key="CDS" and the rest of the data as a list of strings
        lines=["complement(join(490883..490885,1..879))", ..., "LNSMGFGFVNTKKNSAR"]
        where the leading spaces and trailing newlines have been removed.

        Returns tuple containing: (key as string, location string, qualifiers as list)
        as follows for this example:

        key = "CDS", string
        location = "complement(join(490883..490885,1..879))", string
        qualifiers = list of string tuples:

        [('locus_tag', '"NEQ001"'),
         ('note', '"conserved hypothetical [Methanococcus jannaschii];\nCOG1583:..."'),
         ('codon_start', '1'),
         ('transl_table', '11'),
         ('product', '"hypothetical protein"'),
         ('protein_id', '"NP_963295.1"'),
         ('db_xref', '"GI:41614797"'),
         ('db_xref', '"GeneID:2732620"'),
         ('translation', '"MRLLLELKALNSIDKKQLSNYLIQGFIYNILKNTEYSWLHNWKK\nEKYFNFT..."')]

        In the above example, the "note" and "translation" were edited for compactness,
        and they would contain multiple new line characters (displayed above as \n)

        If a qualifier is quoted (in this case, everything except codon_start and
        transl_table) then the quotes are NOT removed.

        Note that no whitespace is removed.
        """
        #Skip any blank lines
        iterator = iter(filter(None, lines))
        try:
            line = iterator.next()

            feature_location = line.strip()
            while feature_location[-1:]==",":
                #Multiline location, still more to come!
                line = iterator.next()
                feature_location += line.strip()

            qualifiers=[]

            for line in iterator:
                if line[0]=="/":
                    #New qualifier
                    i = line.find("=")
                    key = line[1:i] #does not work if i==-1
                    value = line[i+1:] #we ignore 'value' if i==-1
                    if i==-1:
                        #Qualifier with no key, e.g. /pseudo
                        key = line[1:]
                        qualifiers.append((key,None))
                    elif not value:
                        #ApE can output /note=
                        qualifiers.append((key,""))
                    elif value[0]=='"':
                        #Quoted...
                        if value[-1]!='"' or value!='"':
                            #No closing quote on the first line...
                            while value[-1] != '"':
                                value += "\n" + iterator.next()
                        else:
                            #One single line (quoted)
                            assert value == '"'
                            if self.debug : print "Quoted line %s:%s" % (key, value)
                        #DO NOT remove the quotes...
                        qualifiers.append((key,value))
                    else:
                        #Unquoted
                        #if debug : print "Unquoted line %s:%s" % (key,value)
                        qualifiers.append((key,value))
                else:
                    #Unquoted continuation
                    assert len(qualifiers) > 0
                    assert key==qualifiers[-1][0]
                    #if debug : print "Unquoted Cont %s:%s" % (key, line)
                    qualifiers[-1] = (key, qualifiers[-1][1] + "\n" + line)
            return (feature_key, feature_location, qualifiers)
        except StopIteration:
            #Bummer
            raise ValueError("Problem with '%s' feature:\n%s" \
                              % (feature_key, "\n".join(lines)))

    def parse_footer(self):
        """returns a tuple containing a list of any misc strings, and the sequence"""
        #This is a basic bit of code to scan and discard the sequence,
        #which was useful when developing the sub classes.
        if self.line in self.FEATURE_END_MARKERS:
            while self.line[:self.HEADER_WIDTH].rstrip() not in self.SEQUENCE_HEADERS:
                self.line = self.handle.readline()
                if not self.line:
                    raise ValueError("Premature end of file")
                self.line = self.line.rstrip()
            
        assert self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS, \
               "Not at start of sequence"
        while True:
            line = self.handle.readline()
            if not line : raise ValueError("Premature end of line during sequence data")
            line = line.rstrip()
            if line == "//" : break
        self.line = line
        return ([],"") #Dummy values!

    def _feed_first_line(self, consumer, line):
        """Handle the LOCUS/ID line, passing data to the comsumer
        
        This should be implemented by the EMBL / GenBank specific subclass
        
        Used by the parse_records() and parse() methods.
        """
        pass

    def _feed_header_lines(self, consumer, lines):
        """Handle the header lines (list of strings), passing data to the comsumer
        
        This should be implemented by the EMBL / GenBank specific subclass
        
        Used by the parse_records() and parse() methods.
        """
        pass


    def _feed_feature_table(self, consumer, feature_tuples):
        """Handle the feature table (list of tuples), passing data to the comsumer
        
        Used by the parse_records() and parse() methods.
        """
        consumer.start_feature_table()
        for feature_key, location_string, qualifiers in feature_tuples:
            consumer.feature_key(feature_key)
            consumer.location(location_string)
            for q_key, q_value in qualifiers:
                consumer.feature_qualifier_name([q_key])
                if q_value is not None:
                    consumer.feature_qualifier_description(q_value.replace("\n"," "))
                    
    def _feed_misc_lines(self, consumer, lines):
        """Handle any lines between features and sequence (list of strings), passing data to the consumer
        
        This should be implemented by the EMBL / GenBank specific subclass
        
        Used by the parse_records() and parse() methods.
        """
        pass

    def feed(self, handle, consumer, do_features=True):
        """Feed a set of data into the consumer.

        This method is intended for use with the "old" code in Bio.GenBank

        Arguments:
        handle - A handle with the information to parse.
        consumer - The consumer that should be informed of events.
        do_features - Boolean, should the features be parsed?
                      Skipping the features can be much faster.

        Return values:
        true  - Passed a record
        false - Did not find a record
        """        
        #Should work with both EMBL and GenBank files provided the
        #equivalent Bio.GenBank._FeatureConsumer methods are called...
        self.set_handle(handle)
        if not self.find_start():
            #Could not find (another) record
            consumer.data=None
            return False
                       
        #We use the above class methods to parse the file into a simplified format.
        #The first line, header lines and any misc lines after the features will be
        #dealt with by GenBank / EMBL specific derived classes.

        #First line and header:
        self._feed_first_line(consumer, self.line)
        self._feed_header_lines(consumer, self.parse_header())

        #Features (common to both EMBL and GenBank):
        if do_features:
            self._feed_feature_table(consumer, self.parse_features(skip=False))
        else:
            self.parse_features(skip=True) # ignore the data
        
        #Footer and sequence
        misc_lines, sequence_string = self.parse_footer()
        self._feed_misc_lines(consumer, misc_lines)

        consumer.sequence(sequence_string)
        #Calls to consumer.base_number() do nothing anyway
        consumer.record_end("//")

        assert self.line == "//"

        #And we are done
        return True

    def parse(self, handle, do_features=True):
        """Returns a SeqRecord (with SeqFeatures if do_features=True)

        See also the method parse_records() for use on multi-record files.
        """
        from Bio.GenBank import _FeatureConsumer
        from Bio.GenBank.utils import FeatureValueCleaner

        consumer = _FeatureConsumer(use_fuzziness = 1, 
                    feature_cleaner = FeatureValueCleaner())

        if self.feed(handle, consumer, do_features):
            return consumer.data
        else:
            return None

    
    def parse_records(self, handle, do_features=True):
        """Returns a SeqRecord object iterator

        Each record (from the ID/LOCUS line to the // line) becomes a SeqRecord

        The SeqRecord objects include SeqFeatures if do_features=True
        
        This method is intended for use in Bio.SeqIO
        """
        #This is a generator function
        while True:
            record = self.parse(handle, do_features)
            if record is None : break
            assert record.id is not None
            assert record.name != "<unknown name>"
            assert record.description != "<unknown description>"
            yield record

    def parse_cds_features(self, handle,
                           alphabet=generic_protein,
                           tags2id=('protein_id','locus_tag','product')):
        """Returns SeqRecord object iterator

        Each CDS feature becomes a SeqRecord.

        alphabet - Used for any sequence found in a translation field.
        tags2id  - Tupple of three strings, the feature keys to use
                   for the record id, name and description,

        This method is intended for use in Bio.SeqIO
        """
        self.set_handle(handle)
        while self.find_start():
            #Got an EMBL or GenBank record...
            self.parse_header() # ignore header lines!
            feature_tuples = self.parse_features()
            #self.parse_footer() # ignore footer lines!
            while True:
                line = self.handle.readline()
                if not line : break
                if line[:2]=="//" : break
            self.line = line.rstrip()

            #Now go though those features...
            for key, location_string, qualifiers in feature_tuples:
                if key=="CDS":
                    #Create SeqRecord
                    #================
                    #SeqRecord objects cannot be created with annotations, they
                    #must be added afterwards.  So create an empty record and
                    #then populate it:
                    record = SeqRecord(seq=None)
                    annotations = record.annotations

                    #Should we add a location object to the annotations?
                    #I *think* that only makes sense for SeqFeatures with their
                    #sub features...
                    annotations['raw_location'] = location_string.replace(' ','')

                    for (qualifier_name, qualifier_data) in qualifiers:
                        if qualifier_data is not None \
                        and qualifier_data[0]=='"' and qualifier_data[-1]=='"':
                            #Remove quotes
                            qualifier_data = qualifier_data[1:-1]
                        #Append the data to the annotation qualifier...
                        if qualifier_name == "translation":
                            assert record.seq is None, "Multiple translations!"
                            record.seq = Seq(qualifier_data.replace("\n",""), alphabet)
                        elif qualifier_name == "db_xref":
                            #its a list, possibly empty.  Its safe to extend
                            record.dbxrefs.append(qualifier_data)
                        else:
                            if qualifier_data is not None:
                                qualifier_data = qualifier_data.replace("\n"," ").replace("  "," ")
                            try:
                                annotations[qualifier_name] += " " + qualifier_data
                            except KeyError:
                                #Not an addition to existing data, its the first bit
                                annotations[qualifier_name]= qualifier_data
                        
                    #Fill in the ID, Name, Description
                    #=================================
                    try:
                        record.id = annotations[tags2id[0]]
                    except KeyError:
                        pass
                    try:
                        record.name = annotations[tags2id[1]]
                    except KeyError:
                        pass
                    try:
                        record.description = annotations[tags2id[2]]
                    except KeyError:
                        pass

                    yield record


class EmblScanner(InsdcScanner):
    """For extracting chunks of information in EMBL files"""

    RECORD_START = "ID   "
    HEADER_WIDTH = 5
    FEATURE_START_MARKERS = ["FH   Key             Location/Qualifiers","FH"]
    FEATURE_END_MARKERS = ["XX"] #XX can also mark the end of many things!
    FEATURE_QUALIFIER_INDENT = 21
    FEATURE_QUALIFIER_SPACER = "FT" + " " * (FEATURE_QUALIFIER_INDENT-2)
    SEQUENCE_HEADERS=["SQ", "CO"] #Remove trailing spaces

    def parse_footer(self):
        """returns a tuple containing a list of any misc strings, and the sequence"""
        assert self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS, \
            "Eh? '%s'" % self.line

        #Note that the SQ line can be split into several lines...
        misc_lines = []
        while self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
            misc_lines.append(self.line)
            self.line = self.handle.readline()
            if not self.line:
                raise ValueError("Premature end of file")
            self.line = self.line.rstrip()

        assert self.line[:self.HEADER_WIDTH] == " " * self.HEADER_WIDTH \
               or self.line.strip() == '//', repr(self.line)
        
        seq_lines = []
        line = self.line
        while True:
            if not line:
                raise ValueError("Premature end of file in sequence data")
            line = line.strip()
            if not line:
                raise ValueError("Blank line in sequence data")
            if line=='//':
                break
            assert self.line[:self.HEADER_WIDTH] == " " * self.HEADER_WIDTH, \
                   repr(self.line)
            #Remove tailing number now, remove spaces later
            seq_lines.append(line.rsplit(None,1)[0])
            line = self.handle.readline()
        self.line = line
        return (misc_lines, "".join(seq_lines).replace(" ", ""))

    def _feed_first_line(self, consumer, line):
        assert line[:self.HEADER_WIDTH].rstrip() == "ID"
        if line[self.HEADER_WIDTH:].count(";") == 6:
            #Looks like the semi colon separated style introduced in 2006
            self._feed_first_line_new(consumer, line)
        elif line[self.HEADER_WIDTH:].count(";") == 3:
            #Looks like the pre 2006 style
            self._feed_first_line_old(consumer, line)
        else:
            raise ValueError('Did not recognise the ID line layout:\n' + line)

    def _feed_first_line_old(self, consumer, line):
        #Expects an ID line in the style before 2006, e.g.
        #ID   SC10H5 standard; DNA; PRO; 4870 BP.
        #ID   BSUB9999   standard; circular DNA; PRO; 4214630 BP.
        assert line[:self.HEADER_WIDTH].rstrip() == "ID"
        fields = [line[self.HEADER_WIDTH:].split(None,1)[0]]
        fields.extend(line[self.HEADER_WIDTH:].split(None,1)[1].split(";"))
        fields = [entry.strip() for entry in fields]
        """
        The tokens represent:
           0. Primary accession number
           (space sep)
           1. ??? (e.g. standard)
           (semi-colon)
           2. Topology and/or Molecule type (e.g. 'circular DNA' or 'DNA')
           3. Taxonomic division (e.g. 'PRO')
           4. Sequence length (e.g. '4639675 BP.')
        """
        consumer.locus(fields[0]) #Should we also call the accession consumer?
        consumer.residue_type(fields[2])
        consumer.data_file_division(fields[3])
        self._feed_seq_length(consumer, fields[4])        

    def _feed_first_line_new(self, consumer, line):
        #Expects an ID line in the style introduced in 2006, e.g.
        #ID   X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
        #ID   CD789012; SV 4; linear; genomic DNA; HTG; MAM; 500 BP.
        assert line[:self.HEADER_WIDTH].rstrip() == "ID"
        fields = [data.strip() for data in line[self.HEADER_WIDTH:].strip().split(";")]
        assert len(fields) == 7
        """
        The tokens represent:
           0. Primary accession number
           1. Sequence version number
           2. Topology: 'circular' or 'linear'
           3. Molecule type (e.g. 'genomic DNA')
           4. Data class (e.g. 'STD')
           5. Taxonomic division (e.g. 'PRO')
           6. Sequence length (e.g. '4639675 BP.')
        """

        consumer.locus(fields[0])

        #Call the accession consumer now, to make sure we record
        #something as the record.id, in case there is no AC line
        consumer.accession(fields[0])

        #TODO - How to deal with the version field?  At the moment the consumer
        #will try and use this for the ID which isn't ideal for EMBL files.
        version_parts = fields[1].split()
        if len(version_parts)==2 \
        and version_parts[0]=="SV" \
        and version_parts[1].isdigit():
            consumer.version_suffix(version_parts[1])

        #Based on how the old GenBank parser worked, merge these two:
        consumer.residue_type(" ".join(fields[2:4])) #TODO - Store as two fields?

        #consumer.xxx(fields[4]) #TODO - What should we do with the data class?

        consumer.data_file_division(fields[5])

        self._feed_seq_length(consumer, fields[6])

    def _feed_seq_length(self, consumer, text):
        length_parts = text.split()
        assert len(length_parts) == 2
        assert length_parts[1].upper() in ["BP", "BP.", "AA."]
        consumer.size(length_parts[0])

    def _feed_header_lines(self, consumer, lines):
        EMBL_INDENT = self.HEADER_WIDTH
        EMBL_SPACER = " "  * EMBL_INDENT
        consumer_dict = {
            'AC' : 'accession',
            'SV' : 'version', # SV line removed in June 2006, now part of ID line
            'DE' : 'definition',
            #'RN' : 'reference_num',
            #'RC' : reference comment... TODO
            #'RP' : 'reference_bases',
            #'RX' : reference cross reference... DOI or Pubmed
            'RG' : 'consrtm', #optional consortium
            #'RA' : 'authors',
            #'RT' : 'title',
            'RL' : 'journal',
            'OS' : 'organism',
            'OC' : 'taxonomy',
            #'DR' : data reference
            'CC' : 'comment',
            #'XX' : splitter
        }
        #We have to handle the following specially:
        #RX (depending on reference type...)
        for line in lines:
            line_type = line[:EMBL_INDENT].strip()
            data = line[EMBL_INDENT:].strip()
            if line_type == 'XX':
                pass
            elif line_type == 'RN':
                # Reformat reference numbers for the GenBank based consumer
                # e.g. '[1]' becomes '1'
                if data[0] == "[" and data[-1] == "]" : data = data[1:-1]
                consumer.reference_num(data)
            elif line_type == 'RP':
                # Reformat reference numbers for the GenBank based consumer
                # e.g. '1-4639675' becomes '(bases 1 to 4639675)'
                # and '160-550, 904-1055' becomes '(bases 160 to 550; 904 to 1055)'
                parts = [bases.replace("-"," to ").strip() for bases in data.split(",")]
                consumer.reference_bases("(bases %s)" % "; ".join(parts))
            elif line_type == 'RT':
                #Remove the enclosing quotes and trailing semi colon.
                #Note the title can be split over multiple lines.
                if data.startswith('"'):
                    data = data[1:]
                if data.endswith('";'):
                    data = data[:-2]
                consumer.title(data)
            elif line_type == 'RX':
                # EMBL support three reference types at the moment:
                # - PUBMED    PUBMED bibliographic database (NLM)
                # - DOI       Digital Object Identifier (International DOI Foundation)
                # - AGRICOLA  US National Agriculture Library (NAL) of the US Department
                #             of Agriculture (USDA)
                #
                # Format:
                # RX  resource_identifier; identifier.
                #
                # e.g.
                # RX   DOI; 10.1016/0024-3205(83)90010-3.
                # RX   PUBMED; 264242.
                #
                # Currently our reference object only supports PUBMED and MEDLINE
                # (as these were in GenBank files?).
                key, value = data.split(";",1)
                if value.endswith(".") : value = value[:-1]
                value = value.strip()
                if key == "PUBMED":
                    consumer.pubmed_id(value)
                #TODO - Handle other reference types (here and in BioSQL bindings)
            elif line_type == 'CC':
                # Have to pass a list of strings for this one (not just a string)
                consumer.comment([data])
            elif line_type == 'DR':
                # Database Cross-reference, format:
                # DR   database_identifier; primary_identifier; secondary_identifier.
                #
                # e.g.
                # DR   MGI; 98599; Tcrb-V4.
                #
                # TODO - How should we store any secondary identifier?
                parts = data.rstrip(".").split(";")
                #Turn it into "database_identifier:primary_identifier" to
                #mimic the GenBank parser. e.g. "MGI:98599"
                consumer.dblink("%s:%s" % (parts[0].strip(),
                                           parts[1].strip()))
            elif line_type == 'RA':
                # Remove trailing ; at end of authors list
                consumer.authors(data.rstrip(";"))
            elif line_type == 'PR':
                # Remove trailing ; at end of the project reference
                # In GenBank files this corresponds to the old PROJECT
                # line which is being replaced with the DBLINK line.
                consumer.project(data.rstrip(";"))
            elif line_type in consumer_dict:
                #Its a semi-automatic entry!
                getattr(consumer, consumer_dict[line_type])(data)
            else:
                if self.debug:
                    print "Ignoring EMBL header line:\n%s" % line
        
    def _feed_misc_lines(self, consumer, lines):
        #TODO - Should we do something with the information on the SQ line(s)?
        lines.append("")
        line_iter = iter(lines)
        try:
            for line in line_iter:
                if line.startswith("CO   "):
                    line = line[5:].strip()
                    contig_location = line
                    while True:
                        line = line_iter.next()
                        if not line:
                            break
                        elif line.startswith("CO   "):
                            #Don't need to preseve the whitespace here.
                            contig_location += line[5:].strip()
                        else:
                            raise ValueError('Expected CO (contig) continuation line, got:\n' + line)
                    consumer.contig_location(contig_location)
            return
        except StopIteration:
            raise ValueError("Problem in misc lines before sequence")


class _ImgtScanner(EmblScanner):
    """For extracting chunks of information in IMGT (EMBL like) files (PRIVATE).
    
    IMGT files are like EMBL files but in order to allow longer feature types
    the features should be indented by 25 characters not 21 characters. In
    practice the IMGT flat files tend to use either 21 or 25 characters, so we
    must cope with both.
    
    This is private to encourage use of Bio.SeqIO rather than Bio.GenBank.
    """

    FEATURE_START_MARKERS = ["FH   Key             Location/Qualifiers",
                             "FH   Key             Location/Qualifiers (from EMBL)",
                             "FH   Key                 Location/Qualifiers",
                             "FH"]

    def parse_features(self, skip=False):
        """Return list of tuples for the features (if present)

        Each feature is returned as a tuple (key, location, qualifiers)
        where key and location are strings (e.g. "CDS" and
        "complement(join(490883..490885,1..879))") while qualifiers
        is a list of two string tuples (feature qualifier keys and values).

        Assumes you have already read to the start of the features table.
        """
        if self.line.rstrip() not in self.FEATURE_START_MARKERS:
            if self.debug : print "Didn't find any feature table"
            return []
        
        while self.line.rstrip() in self.FEATURE_START_MARKERS:
            self.line = self.handle.readline()

        bad_position_re = re.compile(r'([0-9]+)>{1}')
        
        features = []
        line = self.line
        while True:
            if not line:
                raise ValueError("Premature end of line during features table")
            if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
                if self.debug : print "Found start of sequence"
                break
            line = line.rstrip()
            if line == "//":
                raise ValueError("Premature end of features table, marker '//' found")
            if line in self.FEATURE_END_MARKERS:
                if self.debug : print "Found end of features"
                line = self.handle.readline()
                break
            if line[2:self.FEATURE_QUALIFIER_INDENT].strip() == "":
                #This is an empty feature line between qualifiers. Empty
                #feature lines within qualifiers are handled below (ignored).
                line = self.handle.readline()
                continue

            if skip:
                line = self.handle.readline()
                while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER:
                    line = self.handle.readline()
            else:
                assert line[:2] == "FT"
                try:
                    feature_key, location_start = line[2:].strip().split()
                except ValueError:
                    #e.g. "FT   TRANSMEMBRANE-REGION2163..2240\n"
                    #Assume indent of 25 as per IMGT spec, with the location
                    #start in column 26 (one-based).
                    feature_key = line[2:25].strip()
                    location_start = line[25:].strip()
                feature_lines = [location_start]
                line = self.handle.readline()
                while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER \
                or line.rstrip() == "" : # cope with blank lines in the midst of a feature
                    #Use strip to remove any harmless trailing white space AND and leading
                    #white space (copes with 21 or 26 indents and orther variants)
                    assert line[:2] == "FT"
                    feature_lines.append(line[self.FEATURE_QUALIFIER_INDENT:].strip())
                    line = self.handle.readline()
                feature_key, location, qualifiers = \
                                self.parse_feature(feature_key, feature_lines)
                #Try to handle known problems with IMGT locations here:
                if ">" in location:
                    #Nasty hack for common IMGT bug, should be >123 not 123>
                    #in a location string. At least here the meaning is clear, 
                    #and since it is so common I don't want to issue a warning
                    #warnings.warn("Feature location %s is invalid, "
                    #              "moving greater than sign before position"
                    #              % location)
                    location = bad_position_re.sub(r'>\1',location)
                features.append((feature_key, location, qualifiers))
        self.line = line
        return features

class GenBankScanner(InsdcScanner):
    """For extracting chunks of information in GenBank files"""

    RECORD_START = "LOCUS       "
    HEADER_WIDTH = 12
    FEATURE_START_MARKERS = ["FEATURES             Location/Qualifiers","FEATURES"]
    FEATURE_END_MARKERS = []
    FEATURE_QUALIFIER_INDENT = 21
    FEATURE_QUALIFIER_SPACER = " " * FEATURE_QUALIFIER_INDENT
    SEQUENCE_HEADERS=["CONTIG", "ORIGIN", "BASE COUNT", "WGS"] # trailing spaces removed

    def parse_footer(self):
        """returns a tuple containing a list of any misc strings, and the sequence"""
        assert self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS, \
               "Eh? '%s'" % self.line

        misc_lines = []
        while self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS \
        or self.line[:self.HEADER_WIDTH] == " "*self.HEADER_WIDTH \
        or "WGS" == self.line[:3]:
            misc_lines.append(self.line.rstrip())
            self.line = self.handle.readline()
            if not self.line:
                raise ValueError("Premature end of file")
            self.line = self.line

        assert self.line[:self.HEADER_WIDTH].rstrip() not in self.SEQUENCE_HEADERS, \
               "Eh? '%s'" % self.line

        #Now just consume the sequence lines until reach the // marker
        #or a CONTIG line
        seq_lines = []
        line = self.line
        while True:
            if not line:
                raise ValueError("Premature end of file in sequence data")
            line = line.rstrip()
            if not line:
                import warnings
                warnings.warn("Blank line in sequence data")
                line = self.handle.readline()
                continue
            if line=='//':
                break
            if line.find('CONTIG')==0:
                break
            if len(line) > 9 and  line[9:10]!=' ':
                raise ValueError("Sequence line mal-formed, '%s'" % line)
            seq_lines.append(line[10:]) #remove spaces later
            line = self.handle.readline()

        self.line = line
        #Seq("".join(seq_lines), self.alphabet)
        return (misc_lines,"".join(seq_lines).replace(" ",""))

    def _feed_first_line(self, consumer, line):
        """Scan over and parse GenBank LOCUS line (PRIVATE).

        This must cope with several variants, primarily the old and new column
        based standards from GenBank. Additionally EnsEMBL produces GenBank
        files where the LOCUS line is space separated rather that following
        the column based layout.

        We also try to cope with GenBank like files with partial LOCUS lines.
        """
        #####################################
        # LOCUS line                        #
        #####################################
        GENBANK_INDENT = self.HEADER_WIDTH
        GENBANK_SPACER = " "*GENBANK_INDENT
        assert line[0:GENBANK_INDENT] == 'LOCUS       ', \
               'LOCUS line does not start correctly:\n' + line

        #Have to break up the locus line, and handle the different bits of it.
        #There are at least two different versions of the locus line...
        if line[29:33] in [' bp ', ' aa ',' rc '] and line[55:62] == '       ':
            #Old... note we insist on the 55:62 being empty to avoid trying
            #to parse space separated LOCUS lines from Ensembl etc, see below.
            #
            #    Positions  Contents
            #    ---------  --------
            #    00:06      LOCUS
            #    06:12      spaces
            #    12:??      Locus name
            #    ??:??      space
            #    ??:29      Length of sequence, right-justified
            #    29:33      space, bp, space
            #    33:41      strand type
            #    41:42      space
            #    42:51      Blank (implies linear), linear or circular
            #    51:52      space
            #    52:55      The division code (e.g. BCT, VRL, INV)
            #    55:62      space
            #    62:73      Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
            #
            #assert line[29:33] in [' bp ', ' aa ',' rc '] , \
            #       'LOCUS line does not contain size units at expected position:\n' + line
            assert line[41:42] == ' ', \
                   'LOCUS line does not contain space at position 42:\n' + line
            assert line[42:51].strip() in ['','linear','circular'], \
                   'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line
            assert line[51:52] == ' ', \
                   'LOCUS line does not contain space at position 52:\n' + line
            #assert line[55:62] == '       ', \
            #      'LOCUS line does not contain spaces from position 56 to 62:\n' + line
            if line[62:73].strip():
                assert line[64:65] == '-', \
                       'LOCUS line does not contain - at position 65 in date:\n' + line
                assert line[68:69] == '-', \
                       'LOCUS line does not contain - at position 69 in date:\n' + line

            name_and_length_str = line[GENBANK_INDENT:29]
            while name_and_length_str.find('  ')!=-1:
                name_and_length_str = name_and_length_str.replace('  ',' ')
            name_and_length = name_and_length_str.split(' ')
            assert len(name_and_length)<=2, \
                   'Cannot parse the name and length in the LOCUS line:\n' + line
            assert len(name_and_length)!=1, \
                   'Name and length collide in the LOCUS line:\n' + line
                   #Should be possible to split them based on position, if
                   #a clear definition of the standard exists THAT AGREES with
                   #existing files.
            consumer.locus(name_and_length[0])
            consumer.size(name_and_length[1])
            #consumer.residue_type(line[33:41].strip())

            if line[33:51].strip() == "" and line[29:33] == ' aa ':
                #Amino acids -> protein (even if there is no residue type given)
                #We want to use a protein alphabet in this case, rather than a
                #generic one. Not sure if this is the best way to achieve this,
                #but it works because the scanner checks for this:
                consumer.residue_type("PROTEIN")
            else:
                consumer.residue_type(line[33:51].strip())

            consumer.data_file_division(line[52:55])
            if line[62:73].strip():
                consumer.date(line[62:73])
        elif line[40:44] in [' bp ', ' aa ',' rc '] \
        and line[54:64].strip() in ['','linear','circular']:
            #New... linear/circular/big blank test should avoid EnsEMBL style
            #LOCUS line being treated like a proper column based LOCUS line.
            #
            #    Positions  Contents
            #    ---------  --------
            #    00:06      LOCUS
            #    06:12      spaces
            #    12:??      Locus name
            #    ??:??      space
            #    ??:40      Length of sequence, right-justified
            #    40:44      space, bp, space
            #    44:47      Blank, ss-, ds-, ms-
            #    47:54      Blank, DNA, RNA, tRNA, mRNA, uRNA, snRNA, cDNA
            #    54:55      space
            #    55:63      Blank (implies linear), linear or circular
            #    63:64      space
            #    64:67      The division code (e.g. BCT, VRL, INV)
            #    67:68      space
            #    68:79      Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
            #
            assert line[40:44] in [' bp ', ' aa ',' rc '] , \
                   'LOCUS line does not contain size units at expected position:\n' + line
            assert line[44:47] in ['   ', 'ss-', 'ds-', 'ms-'], \
                   'LOCUS line does not have valid strand type (Single stranded, ...):\n' + line
            assert line[47:54].strip() == "" \
            or line[47:54].strip().find('DNA') != -1 \
            or line[47:54].strip().find('RNA') != -1, \
                   'LOCUS line does not contain valid sequence type (DNA, RNA, ...):\n' + line
            assert line[54:55] == ' ', \
                   'LOCUS line does not contain space at position 55:\n' + line
            assert line[55:63].strip() in ['','linear','circular'], \
                   'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line
            assert line[63:64] == ' ', \
                   'LOCUS line does not contain space at position 64:\n' + line
            assert line[67:68] == ' ', \
                   'LOCUS line does not contain space at position 68:\n' + line
            if line[68:79].strip():
                assert line[70:71] == '-', \
                       'LOCUS line does not contain - at position 71 in date:\n' + line
                assert line[74:75] == '-', \
                       'LOCUS line does not contain - at position 75 in date:\n' + line

            name_and_length_str = line[GENBANK_INDENT:40]
            while name_and_length_str.find('  ')!=-1:
                name_and_length_str = name_and_length_str.replace('  ',' ')
            name_and_length = name_and_length_str.split(' ')
            assert len(name_and_length)<=2, \
                   'Cannot parse the name and length in the LOCUS line:\n' + line
            assert len(name_and_length)!=1, \
                   'Name and length collide in the LOCUS line:\n' + line
                   #Should be possible to split them based on position, if
                   #a clear definition of the stand exists THAT AGREES with
                   #existing files.
            consumer.locus(name_and_length[0])
            consumer.size(name_and_length[1])

            if line[44:54].strip() == "" and line[40:44] == ' aa ':
                #Amino acids -> protein (even if there is no residue type given)
                #We want to use a protein alphabet in this case, rather than a
                #generic one. Not sure if this is the best way to achieve this,
                #but it works because the scanner checks for this:
                consumer.residue_type(("PROTEIN " + line[54:63]).strip())
            else:
                consumer.residue_type(line[44:63].strip())

            consumer.data_file_division(line[64:67])
            if line[68:79].strip():
                consumer.date(line[68:79])
        elif line[GENBANK_INDENT:].strip().count(" ")==0 : 
            #Truncated LOCUS line, as produced by some EMBOSS tools - see bug 1762
            #
            #e.g.
            #
            #    "LOCUS       U00096"
            #
            #rather than:
            #
            #    "LOCUS       U00096               4639675 bp    DNA     circular BCT"
            #
            #    Positions  Contents
            #    ---------  --------
            #    00:06      LOCUS
            #    06:12      spaces
            #    12:??      Locus name
            if line[GENBANK_INDENT:].strip() != "":
                consumer.locus(line[GENBANK_INDENT:].strip())
            else:
                #Must just have just "LOCUS       ", is this even legitimate?
                #We should be able to continue parsing... we need real world testcases!
                warnings.warn("Minimal LOCUS line found - is this correct?\n:%r" % line)
        elif len(line.split())==7 and line.split()[3] in ["aa","bp"]:
            #Cope with EnsEMBL genbank files which use space separation rather
            #than the expected column based layout. e.g.
            #LOCUS       HG531_PATCH 1000000 bp DNA HTG 18-JUN-2011
            #LOCUS       HG531_PATCH 759984 bp DNA HTG 18-JUN-2011
            #LOCUS       HG506_HG1000_1_PATCH 814959 bp DNA HTG 18-JUN-2011
            #LOCUS       HG506_HG1000_1_PATCH 1219964 bp DNA HTG 18-JUN-2011
            #Notice that the 'bp' can occur in the position expected by either
            #the old or the new fixed column standards (parsed above).
            splitline = line.split()
            consumer.locus(splitline[1])
            consumer.size(splitline[2])
            consumer.residue_type(splitline[4])
            consumer.data_file_division(splitline[5])
            consumer.date(splitline[6])
        elif len(line.split())>=4 and line.split()[3] in ["aa","bp"]:
            #Cope with EMBOSS seqret output where it seems the locus id can cause
            #the other fields to overflow.  We just IGNORE the other fields!
            warnings.warn("Malformed LOCUS line found - is this correct?\n:%r" % line)
            consumer.locus(line.split()[1])
            consumer.size(line.split()[2])
        elif len(line.split())>=4 and line.split()[-1] in ["aa","bp"]:
            #Cope with psuedo-GenBank files like this:
            #   "LOCUS       RNA5 complete       1718 bp"
            #Treat everything between LOCUS and the size as the identifier.
            warnings.warn("Malformed LOCUS line found - is this correct?\n:%r" % line)
            consumer.locus(line[5:].rsplit(None,2)[0].strip())
            consumer.size(line.split()[-2])
        else:
            raise ValueError('Did not recognise the LOCUS line layout:\n' + line)


    def _feed_header_lines(self, consumer, lines):
        #Following dictionary maps GenBank lines to the associated
        #consumer methods - the special cases like LOCUS where one
        #genbank line triggers several consumer calls have to be
        #handled individually.
        GENBANK_INDENT = self.HEADER_WIDTH
        GENBANK_SPACER = " "*GENBANK_INDENT
        consumer_dict = {
            'DEFINITION' : 'definition',
            'ACCESSION'  : 'accession',
            'NID'        : 'nid',
            'PID'        : 'pid',
            'DBSOURCE'   : 'db_source',
            'KEYWORDS'   : 'keywords',
            'SEGMENT'    : 'segment',
            'SOURCE'     : 'source',
            'AUTHORS'    : 'authors',
            'CONSRTM'    : 'consrtm',
            'PROJECT'    : 'project',
            'DBLINK'     : 'dblink',
            'TITLE'      : 'title',
            'JOURNAL'    : 'journal',
            'MEDLINE'    : 'medline_id',
            'PUBMED'     : 'pubmed_id',
            'REMARK'     : 'remark'}
        #We have to handle the following specially:
        #ORIGIN (locus, size, residue_type, data_file_division and date)
        #COMMENT (comment)
        #VERSION (version and gi)
        #REFERENCE (eference_num and reference_bases)
        #ORGANISM (organism and taxonomy)
        lines = filter(None,lines)
        lines.append("") #helps avoid getting StopIteration all the time
        line_iter = iter(lines)
        try:
            line = line_iter.next()
            while True:
                if not line : break
                line_type = line[:GENBANK_INDENT].strip()
                data = line[GENBANK_INDENT:].strip()

                if line_type == 'VERSION':
                    #Need to call consumer.version(), and maybe also consumer.gi() as well.
                    #e.g.
                    # VERSION     AC007323.5  GI:6587720
                    while data.find('  ')!=-1:
                        data = data.replace('  ',' ')
                    if data.find(' GI:')==-1:
                        consumer.version(data)
                    else:
                        if self.debug : print "Version [" + data.split(' GI:')[0] + "], gi [" + data.split(' GI:')[1] + "]"
                        consumer.version(data.split(' GI:')[0])
                        consumer.gi(data.split(' GI:')[1])
                    #Read in the next line!
                    line = line_iter.next()
                elif line_type == 'REFERENCE':
                    if self.debug >1 : print "Found reference [" + data + "]"
                    #Need to call consumer.reference_num() and consumer.reference_bases()
                    #e.g.
                    # REFERENCE   1  (bases 1 to 86436)
                    #
                    #Note that this can be multiline, see Bug 1968, e.g.
                    #
                    # REFERENCE   42 (bases 1517 to 1696; 3932 to 4112; 17880 to 17975; 21142 to
                    #             28259)
                    #
                    #For such cases we will call the consumer once only.
                    data = data.strip()

                    #Read in the next line, and see if its more of the reference:
                    while True:
                        line = line_iter.next()
                        if line[:GENBANK_INDENT] == GENBANK_SPACER:
                            #Add this continuation to the data string
                            data += " " + line[GENBANK_INDENT:]
                            if self.debug >1 : print "Extended reference text [" + data + "]"
                        else:
                            #End of the reference, leave this text in the variable "line"
                            break

                    #We now have all the reference line(s) stored in a string, data,
                    #which we pass to the consumer
                    while data.find('  ')!=-1:
                        data = data.replace('  ',' ')
                    if data.find(' ')==-1:
                        if self.debug >2 : print 'Reference number \"' + data + '\"'
                        consumer.reference_num(data)
                    else:
                        if self.debug >2 : print 'Reference number \"' + data[:data.find(' ')] + '\", \"' + data[data.find(' ')+1:] + '\"'
                        consumer.reference_num(data[:data.find(' ')])
                        consumer.reference_bases(data[data.find(' ')+1:])
                elif line_type == 'ORGANISM':
                    #Typically the first line is the organism, and subsequent lines
                    #are the taxonomy lineage.  However, given longer and longer
                    #species names (as more and more strains and sub strains get
                    #sequenced) the oragnism name can now get wrapped onto multiple
                    #lines.  The NCBI say we have to recognise the lineage line by
                    #the presense of semi-colon delimited entries.  In the long term,
                    #they are considering adding a new keyword (e.g. LINEAGE).
                    #See Bug 2591 for details.
                    organism_data = data
                    lineage_data = ""
                    while True:
                        line = line_iter.next()
                        if line[0:GENBANK_INDENT] == GENBANK_SPACER:
                            if lineage_data or ";" in line:
                                lineage_data += " " + line[GENBANK_INDENT:]
                            else:
                                organism_data += " " + line[GENBANK_INDENT:].strip()
                        else:
                            #End of organism and taxonomy
                            break
                    consumer.organism(organism_data)
                    if lineage_data.strip() == "" and self.debug > 1:
                        print "Taxonomy line(s) missing or blank"
                    consumer.taxonomy(lineage_data.strip())
                    del organism_data, lineage_data
                elif line_type == 'COMMENT':
                    if self.debug > 1 : print "Found comment"
                    #This can be multiline, and should call consumer.comment() once
                    #with a list where each entry is a line.
                    comment_list=[]
                    comment_list.append(data)
                    while True:
                        line = line_iter.next()
                        if line[0:GENBANK_INDENT] == GENBANK_SPACER:
                            data = line[GENBANK_INDENT:]
                            comment_list.append(data)
                            if self.debug > 2 : print "Comment continuation [" + data + "]"
                        else:
                            #End of the comment
                            break
                    consumer.comment(comment_list)
                    del comment_list
                elif line_type in consumer_dict:
                    #Its a semi-automatic entry!
                    #Now, this may be a multi line entry...
                    while True:
                        line = line_iter.next()
                        if line[0:GENBANK_INDENT] == GENBANK_SPACER:
                            data += ' ' + line[GENBANK_INDENT:]
                        else:
                            #We now have all the data for this entry:
                            getattr(consumer, consumer_dict[line_type])(data)
                            #End of continuation - return to top of loop!
                            break
                else:
                    if self.debug:
                        print "Ignoring GenBank header line:\n" % line
                    #Read in next line
                    line = line_iter.next()
        except StopIteration:
            raise ValueError("Problem in header")
        
    def _feed_misc_lines(self, consumer, lines):
        #Deals with a few misc lines between the features and the sequence
        GENBANK_INDENT = self.HEADER_WIDTH
        GENBANK_SPACER = " "*GENBANK_INDENT
        lines.append("")
        line_iter = iter(lines)
        try:
            for line in line_iter:
                if line.find('BASE COUNT')==0:
                    line = line[10:].strip()
                    if line:
                        if self.debug : print "base_count = " + line
                        consumer.base_count(line)
                if line.find("ORIGIN")==0:
                    line = line[6:].strip()
                    if line:
                        if self.debug : print "origin_name = " + line
                        consumer.origin_name(line)
                if line.find("WGS ")==0 :                        
                    line = line[3:].strip()
                    consumer.wgs(line)
                if line.find("WGS_SCAFLD")==0 :                        
                    line = line[10:].strip()
                    consumer.add_wgs_scafld(line)
                if line.find("CONTIG")==0:
                    line = line[6:].strip()
                    contig_location = line
                    while True:
                        line = line_iter.next()
                        if not line:
                            break
                        elif line[:GENBANK_INDENT]==GENBANK_SPACER:
                            #Don't need to preseve the whitespace here.
                            contig_location += line[GENBANK_INDENT:].rstrip()
                        else:
                            raise ValueError('Expected CONTIG continuation line, got:\n' + line)
                    consumer.contig_location(contig_location)
            return
        except StopIteration:
            raise ValueError("Problem in misc lines before sequence")
        
if __name__ == "__main__":
    from StringIO import StringIO

    gbk_example = \
"""LOCUS       SCU49845     5028 bp    DNA             PLN       21-JUN-1999
DEFINITION  Saccharomyces cerevisiae TCP1-beta gene, partial cds, and Axl2p
            (AXL2) and Rev7p (REV7) genes, complete cds.
ACCESSION   U49845
VERSION     U49845.1  GI:1293613
KEYWORDS    .
SOURCE      Saccharomyces cerevisiae (baker's yeast)
  ORGANISM  Saccharomyces cerevisiae
            Eukaryota; Fungi; Ascomycota; Saccharomycotina; Saccharomycetes;
            Saccharomycetales; Saccharomycetaceae; Saccharomyces.
REFERENCE   1  (bases 1 to 5028)
  AUTHORS   Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W.
  TITLE     Cloning and sequence of REV7, a gene whose function is required for
            DNA damage-induced mutagenesis in Saccharomyces cerevisiae
  JOURNAL   Yeast 10 (11), 1503-1509 (1994)
  PUBMED    7871890
REFERENCE   2  (bases 1 to 5028)
  AUTHORS   Roemer,T., Madden,K., Chang,J. and Snyder,M.
  TITLE     Selection of axial growth sites in yeast requires Axl2p, a novel
            plasma membrane glycoprotein
  JOURNAL   Genes Dev. 10 (7), 777-793 (1996)
  PUBMED    8846915
REFERENCE   3  (bases 1 to 5028)
  AUTHORS   Roemer,T.
  TITLE     Direct Submission
  JOURNAL   Submitted (22-FEB-1996) Terry Roemer, Biology, Yale University, New
            Haven, CT, USA
FEATURES             Location/Qualifiers
     source          1..5028
                     /organism="Saccharomyces cerevisiae"
                     /db_xref="taxon:4932"
                     /chromosome="IX"
                     /map="9"
     CDS             <1..206
                     /codon_start=3
                     /product="TCP1-beta"
                     /protein_id="AAA98665.1"
                     /db_xref="GI:1293614"
                     /translation="SSIYNGISTSGLDLNNGTIADMRQLGIVESYKLKRAVVSSASEA
                     AEVLLRVDNIIRARPRTANRQHM"
     gene            687..3158
                     /gene="AXL2"
     CDS             687..3158
                     /gene="AXL2"
                     /note="plasma membrane glycoprotein"
                     /codon_start=1
                     /function="required for axial budding pattern of S.
                     cerevisiae"
                     /product="Axl2p"
                     /protein_id="AAA98666.1"
                     /db_xref="GI:1293615"
                     /translation="MTQLQISLLLTATISLLHLVVATPYEAYPIGKQYPPVARVNESF
                     TFQISNDTYKSSVDKTAQITYNCFDLPSWLSFDSSSRTFSGEPSSDLLSDANTTLYFN
                     VILEGTDSADSTSLNNTYQFVVTNRPSISLSSDFNLLALLKNYGYTNGKNALKLDPNE
                     VFNVTFDRSMFTNEESIVSYYGRSQLYNAPLPNWLFFDSGELKFTGTAPVINSAIAPE
                     TSYSFVIIATDIEGFSAVEVEFELVIGAHQLTTSIQNSLIINVTDTGNVSYDLPLNYV
                     YLDDDPISSDKLGSINLLDAPDWVALDNATISGSVPDELLGKNSNPANFSVSIYDTYG
                     DVIYFNFEVVSTTDLFAISSLPNINATRGEWFSYYFLPSQFTDYVNTNVSLEFTNSSQ
                     DHDWVKFQSSNLTLAGEVPKNFDKLSLGLKANQGSQSQELYFNIIGMDSKITHSNHSA
                     NATSTRSSHHSTSTSSYTSSTYTAKISSTSAAATSSAPAALPAANKTSSHNKKAVAIA
                     CGVAIPLGVILVALICFLIFWRRRRENPDDENLPHAISGPDLNNPANKPNQENATPLN
                     NPFDDDASSYDDTSIARRLAALNTLKLDNHSATESDISSVDEKRDSLSGMNTYNDQFQ
                     SQSKEELLAKPPVQPPESPFFDPQNRSSSVYMDSEPAVNKSWRYTGNLSPVSDIVRDS
                     YGSQKTVDTEKLFDLEAPEKEKRTSRDVTMSSLDPWNSNISPSPVRKSVTPSPYNVTK
                     HRNRHLQNIQDSQSGKNGITPTTMSTSSSDDFVPVKDGENFCWVHSMEPDRRPSKKRL
                     VDFSNKSNVNVGQVKDIHGRIPEML"
     gene            complement(3300..4037)
                     /gene="REV7"
     CDS             complement(3300..4037)
                     /gene="REV7"
                     /codon_start=1
                     /product="Rev7p"
                     /protein_id="AAA98667.1"
                     /db_xref="GI:1293616"
                     /translation="MNRWVEKWLRVYLKCYINLILFYRNVYPPQSFDYTTYQSFNLPQ
                     FVPINRHPALIDYIEELILDVLSKLTHVYRFSICIINKKNDLCIEKYVLDFSELQHVD
                     KDDQIITETEVFDEFRSSLNSLIMHLEKLPKVNDDTITFEAVINAIELELGHKLDRNR
                     RVDSLEEKAEIERDSNWVKCQEDENLPDNNGFQPPKIKLTSLVGSDVGPLIIHQFSEK
                     LISGDDKILNGVYSQYEEGESIFGSLF"
ORIGIN
        1 gatcctccat atacaacggt atctccacct caggtttaga tctcaacaac ggaaccattg
       61 ccgacatgag acagttaggt atcgtcgaga gttacaagct aaaacgagca gtagtcagct
      121 ctgcatctga agccgctgaa gttctactaa gggtggataa catcatccgt gcaagaccaa
      181 gaaccgccaa tagacaacat atgtaacata tttaggatat acctcgaaaa taataaaccg
      241 ccacactgtc attattataa ttagaaacag aacgcaaaaa ttatccacta tataattcaa
      301 agacgcgaaa aaaaaagaac aacgcgtcat agaacttttg gcaattcgcg tcacaaataa
      361 attttggcaa cttatgtttc ctcttcgagc agtactcgag ccctgtctca agaatgtaat
      421 aatacccatc gtaggtatgg ttaaagatag catctccaca acctcaaagc tccttgccga
      481 gagtcgccct cctttgtcga gtaattttca cttttcatat gagaacttat tttcttattc
      541 tttactctca catcctgtag tgattgacac tgcaacagcc accatcacta gaagaacaga
      601 acaattactt aatagaaaaa ttatatcttc ctcgaaacga tttcctgctt ccaacatcta
      661 cgtatatcaa gaagcattca cttaccatga cacagcttca gatttcatta ttgctgacag
      721 ctactatatc actactccat ctagtagtgg ccacgcccta tgaggcatat cctatcggaa
      781 aacaataccc cccagtggca agagtcaatg aatcgtttac atttcaaatt tccaatgata
      841 cctataaatc gtctgtagac aagacagctc aaataacata caattgcttc gacttaccga
      901 gctggctttc gtttgactct agttctagaa cgttctcagg tgaaccttct tctgacttac
      961 tatctgatgc gaacaccacg ttgtatttca atgtaatact cgagggtacg gactctgccg
     1021 acagcacgtc tttgaacaat acataccaat ttgttgttac aaaccgtcca tccatctcgc
     1081 tatcgtcaga tttcaatcta ttggcgttgt taaaaaacta tggttatact aacggcaaaa
     1141 acgctctgaa actagatcct aatgaagtct tcaacgtgac ttttgaccgt tcaatgttca
     1201 ctaacgaaga atccattgtg tcgtattacg gacgttctca gttgtataat gcgccgttac
     1261 ccaattggct gttcttcgat tctggcgagt tgaagtttac tgggacggca ccggtgataa
     1321 actcggcgat tgctccagaa acaagctaca gttttgtcat catcgctaca gacattgaag
     1381 gattttctgc cgttgaggta gaattcgaat tagtcatcgg ggctcaccag ttaactacct
     1441 ctattcaaaa tagtttgata atcaacgtta ctgacacagg taacgtttca tatgacttac
     1501 ctctaaacta tgtttatctc gatgacgatc ctatttcttc tgataaattg ggttctataa
     1561 acttattgga tgctccagac tgggtggcat tagataatgc taccatttcc gggtctgtcc
     1621 cagatgaatt actcggtaag aactccaatc ctgccaattt ttctgtgtcc atttatgata
     1681 cttatggtga tgtgatttat ttcaacttcg aagttgtctc cacaacggat ttgtttgcca
     1741 ttagttctct tcccaatatt aacgctacaa ggggtgaatg gttctcctac tattttttgc
     1801 cttctcagtt tacagactac gtgaatacaa acgtttcatt agagtttact aattcaagcc
     1861 aagaccatga ctgggtgaaa ttccaatcat ctaatttaac attagctgga gaagtgccca
     1921 agaatttcga caagctttca ttaggtttga aagcgaacca aggttcacaa tctcaagagc
     1981 tatattttaa catcattggc atggattcaa agataactca ctcaaaccac agtgcgaatg
     2041 caacgtccac aagaagttct caccactcca cctcaacaag ttcttacaca tcttctactt
     2101 acactgcaaa aatttcttct acctccgctg ctgctacttc ttctgctcca gcagcgctgc
     2161 cagcagccaa taaaacttca tctcacaata aaaaagcagt agcaattgcg tgcggtgttg
     2221 ctatcccatt aggcgttatc ctagtagctc tcatttgctt cctaatattc tggagacgca
     2281 gaagggaaaa tccagacgat gaaaacttac cgcatgctat tagtggacct gatttgaata
     2341 atcctgcaaa taaaccaaat caagaaaacg ctacaccttt gaacaacccc tttgatgatg
     2401 atgcttcctc gtacgatgat acttcaatag caagaagatt ggctgctttg aacactttga
     2461 aattggataa ccactctgcc actgaatctg atatttccag cgtggatgaa aagagagatt
     2521 ctctatcagg tatgaataca tacaatgatc agttccaatc ccaaagtaaa gaagaattat
     2581 tagcaaaacc cccagtacag cctccagaga gcccgttctt tgacccacag aataggtctt
     2641 cttctgtgta tatggatagt gaaccagcag taaataaatc ctggcgatat actggcaacc
     2701 tgtcaccagt ctctgatatt gtcagagaca gttacggatc acaaaaaact gttgatacag
     2761 aaaaactttt cgatttagaa gcaccagaga aggaaaaacg tacgtcaagg gatgtcacta
     2821 tgtcttcact ggacccttgg aacagcaata ttagcccttc tcccgtaaga aaatcagtaa
     2881 caccatcacc atataacgta acgaagcatc gtaaccgcca cttacaaaat attcaagact
     2941 ctcaaagcgg taaaaacgga atcactccca caacaatgtc aacttcatct tctgacgatt
     3001 ttgttccggt taaagatggt gaaaattttt gctgggtcca tagcatggaa ccagacagaa
     3061 gaccaagtaa gaaaaggtta gtagattttt caaataagag taatgtcaat gttggtcaag
     3121 ttaaggacat tcacggacgc atcccagaaa tgctgtgatt atacgcaacg atattttgct
     3181 taattttatt ttcctgtttt attttttatt agtggtttac agatacccta tattttattt
     3241 agtttttata cttagagaca tttaatttta attccattct tcaaatttca tttttgcact
     3301 taaaacaaag atccaaaaat gctctcgccc tcttcatatt gagaatacac tccattcaaa
     3361 attttgtcgt caccgctgat taatttttca ctaaactgat gaataatcaa aggccccacg
     3421 tcagaaccga ctaaagaagt gagttttatt ttaggaggtt gaaaaccatt attgtctggt
     3481 aaattttcat cttcttgaca tttaacccag tttgaatccc tttcaatttc tgctttttcc
     3541 tccaaactat cgaccctcct gtttctgtcc aacttatgtc ctagttccaa ttcgatcgca
     3601 ttaataactg cttcaaatgt tattgtgtca tcgttgactt taggtaattt ctccaaatgc
     3661 ataatcaaac tatttaagga agatcggaat tcgtcgaaca cttcagtttc cgtaatgatc
     3721 tgatcgtctt tatccacatg ttgtaattca ctaaaatcta aaacgtattt ttcaatgcat
     3781 aaatcgttct ttttattaat aatgcagatg gaaaatctgt aaacgtgcgt taatttagaa
     3841 agaacatcca gtataagttc ttctatatag tcaattaaag caggatgcct attaatggga
     3901 acgaactgcg gcaagttgaa tgactggtaa gtagtgtagt cgaatgactg aggtgggtat
     3961 acatttctat aaaataaaat caaattaatg tagcatttta agtataccct cagccacttc
     4021 tctacccatc tattcataaa gctgacgcaa cgattactat tttttttttc ttcttggatc
     4081 tcagtcgtcg caaaaacgta taccttcttt ttccgacctt ttttttagct ttctggaaaa
     4141 gtttatatta gttaaacagg gtctagtctt agtgtgaaag ctagtggttt cgattgactg
     4201 atattaagaa agtggaaatt aaattagtag tgtagacgta tatgcatatg tatttctcgc
     4261 ctgtttatgt ttctacgtac ttttgattta tagcaagggg aaaagaaata catactattt
     4321 tttggtaaag gtgaaagcat aatgtaaaag ctagaataaa atggacgaaa taaagagagg
     4381 cttagttcat cttttttcca aaaagcaccc aatgataata actaaaatga aaaggatttg
     4441 ccatctgtca gcaacatcag ttgtgtgagc aataataaaa tcatcacctc cgttgccttt
     4501 agcgcgtttg tcgtttgtat cttccgtaat tttagtctta tcaatgggaa tcataaattt
     4561 tccaatgaat tagcaatttc gtccaattct ttttgagctt cttcatattt gctttggaat
     4621 tcttcgcact tcttttccca ttcatctctt tcttcttcca aagcaacgat ccttctaccc
     4681 atttgctcag agttcaaatc ggcctctttc agtttatcca ttgcttcctt cagtttggct
     4741 tcactgtctt ctagctgttg ttctagatcc tggtttttct tggtgtagtt ctcattatta
     4801 gatctcaagt tattggagtc ttcagccaat tgctttgtat cagacaattg actctctaac
     4861 ttctccactt cactgtcgag ttgctcgttt ttagcggaca aagatttaat ctcgttttct
     4921 ttttcagtgt tagattgctc taattctttg agctgttctc tcagctcctc atatttttct
     4981 tgccatgact cagattctaa ttttaagcta ttcaatttct ctttgatc
//"""

    # GenBank format protein (aka GenPept) file from:
    # http://www.molecularevolution.org/resources/fileformats/
    gbk_example2 = \
"""LOCUS       AAD51968                 143 aa            linear   BCT 21-AUG-2001
DEFINITION  transcriptional regulator RovA [Yersinia enterocolitica].
ACCESSION   AAD51968
VERSION     AAD51968.1  GI:5805369
DBSOURCE    locus AF171097 accession AF171097.1
KEYWORDS    .
SOURCE      Yersinia enterocolitica
  ORGANISM  Yersinia enterocolitica
            Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales;
            Enterobacteriaceae; Yersinia.
REFERENCE   1  (residues 1 to 143)
  AUTHORS   Revell,P.A. and Miller,V.L.
  TITLE     A chromosomally encoded regulator is required for expression of the
            Yersinia enterocolitica inv gene and for virulence
  JOURNAL   Mol. Microbiol. 35 (3), 677-685 (2000)
  MEDLINE   20138369
   PUBMED   10672189
REFERENCE   2  (residues 1 to 143)
  AUTHORS   Revell,P.A. and Miller,V.L.
  TITLE     Direct Submission
  JOURNAL   Submitted (22-JUL-1999) Molecular Microbiology, Washington
            University School of Medicine, Campus Box 8230, 660 South Euclid,
            St. Louis, MO 63110, USA
COMMENT     Method: conceptual translation.
FEATURES             Location/Qualifiers
     source          1..143
                     /organism="Yersinia enterocolitica"
                     /mol_type="unassigned DNA"
                     /strain="JB580v"
                     /serotype="O:8"
                     /db_xref="taxon:630"
     Protein         1..143
                     /product="transcriptional regulator RovA"
                     /name="regulates inv expression"
     CDS             1..143
                     /gene="rovA"
                     /coded_by="AF171097.1:380..811"
                     /note="regulator of virulence"
                     /transl_table=11
ORIGIN      
        1 mestlgsdla rlvrvwrali dhrlkplelt qthwvtlhni nrlppeqsqi qlakaigieq
       61 pslvrtldql eekglitrht candrrakri klteqsspii eqvdgvicst rkeilggisp
      121 deiellsgli dklerniiql qsk
//
"""
    
    embl_example="""ID   X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
XX
AC   X56734; S46826;
XX
DT   12-SEP-1991 (Rel. 29, Created)
DT   25-NOV-2005 (Rel. 85, Last updated, Version 11)
XX
DE   Trifolium repens mRNA for non-cyanogenic beta-glucosidase
XX
KW   beta-glucosidase.
XX
OS   Trifolium repens (white clover)
OC   Eukaryota; Viridiplantae; Streptophyta; Embryophyta; Tracheophyta;
OC   Spermatophyta; Magnoliophyta; eudicotyledons; core eudicotyledons; rosids;
OC   eurosids I; Fabales; Fabaceae; Papilionoideae; Trifolieae; Trifolium.
XX
RN   [5]
RP   1-1859
RX   PUBMED; 1907511.
RA   Oxtoby E., Dunn M.A., Pancoro A., Hughes M.A.;
RT   "Nucleotide and derived amino acid sequence of the cyanogenic
RT   beta-glucosidase (linamarase) from white clover (Trifolium repens L.)";
RL   Plant Mol. Biol. 17(2):209-219(1991).
XX
RN   [6]
RP   1-1859
RA   Hughes M.A.;
RT   ;
RL   Submitted (19-NOV-1990) to the EMBL/GenBank/DDBJ databases.
RL   Hughes M.A., University of Newcastle Upon Tyne, Medical School, Newcastle
RL   Upon Tyne, NE2 4HH, UK
XX
FH   Key             Location/Qualifiers
FH
FT   source          1..1859
FT                   /organism="Trifolium repens"
FT                   /mol_type="mRNA"
FT                   /clone_lib="lambda gt10"
FT                   /clone="TRE361"
FT                   /tissue_type="leaves"
FT                   /db_xref="taxon:3899"
FT   CDS             14..1495
FT                   /product="beta-glucosidase"
FT                   /EC_number="3.2.1.21"
FT                   /note="non-cyanogenic"
FT                   /db_xref="GOA:P26204"
FT                   /db_xref="InterPro:IPR001360"
FT                   /db_xref="InterPro:IPR013781"
FT                   /db_xref="UniProtKB/Swiss-Prot:P26204"
FT                   /protein_id="CAA40058.1"
FT                   /translation="MDFIVAIFALFVISSFTITSTNAVEASTLLDIGNLSRSSFPRGFI
FT                   FGAGSSAYQFEGAVNEGGRGPSIWDTFTHKYPEKIRDGSNADITVDQYHRYKEDVGIMK
FT                   DQNMDSYRFSISWPRILPKGKLSGGINHEGIKYYNNLINELLANGIQPFVTLFHWDLPQ
FT                   VLEDEYGGFLNSGVINDFRDYTDLCFKEFGDRVRYWSTLNEPWVFSNSGYALGTNAPGR
FT                   CSASNVAKPGDSGTGPYIVTHNQILAHAEAVHVYKTKYQAYQKGKIGITLVSNWLMPLD
FT                   DNSIPDIKAAERSLDFQFGLFMEQLTTGDYSKSMRRIVKNRLPKFSKFESSLVNGSFDF
FT                   IGINYYSSSYISNAPSHGNAKPSYSTNPMTNISFEKHGIPLGPRAASIWIYVYPYMFIQ
FT                   EDFEIFCYILKINITILQFSITENGMNEFNDATLPVEEALLNTYRIDYYYRHLYYIRSA
FT                   IRAGSNVKGFYAWSFLDCNEWFAGFTVRFGLNFVD"
FT   mRNA            1..1859
FT                   /experiment="experimental evidence, no additional details
FT                   recorded"
XX
SQ   Sequence 1859 BP; 609 A; 314 C; 355 G; 581 T; 0 other;
     aaacaaacca aatatggatt ttattgtagc catatttgct ctgtttgtta ttagctcatt        60
     cacaattact tccacaaatg cagttgaagc ttctactctt cttgacatag gtaacctgag       120
     tcggagcagt tttcctcgtg gcttcatctt tggtgctgga tcttcagcat accaatttga       180
     aggtgcagta aacgaaggcg gtagaggacc aagtatttgg gataccttca cccataaata       240
     tccagaaaaa ataagggatg gaagcaatgc agacatcacg gttgaccaat atcaccgcta       300
     caaggaagat gttgggatta tgaaggatca aaatatggat tcgtatagat tctcaatctc       360
     ttggccaaga atactcccaa agggaaagtt gagcggaggc ataaatcacg aaggaatcaa       420
     atattacaac aaccttatca acgaactatt ggctaacggt atacaaccat ttgtaactct       480
     ttttcattgg gatcttcccc aagtcttaga agatgagtat ggtggtttct taaactccgg       540
     tgtaataaat gattttcgag actatacgga tctttgcttc aaggaatttg gagatagagt       600
     gaggtattgg agtactctaa atgagccatg ggtgtttagc aattctggat atgcactagg       660
     aacaaatgca ccaggtcgat gttcggcctc caacgtggcc aagcctggtg attctggaac       720
     aggaccttat atagttacac acaatcaaat tcttgctcat gcagaagctg tacatgtgta       780
     taagactaaa taccaggcat atcaaaaggg aaagataggc ataacgttgg tatctaactg       840
     gttaatgcca cttgatgata atagcatacc agatataaag gctgccgaga gatcacttga       900
     cttccaattt ggattgttta tggaacaatt aacaacagga gattattcta agagcatgcg       960
     gcgtatagtt aaaaaccgat tacctaagtt ctcaaaattc gaatcaagcc tagtgaatgg      1020
     ttcatttgat tttattggta taaactatta ctcttctagt tatattagca atgccccttc      1080
     acatggcaat gccaaaccca gttactcaac aaatcctatg accaatattt catttgaaaa      1140
     acatgggata cccttaggtc caagggctgc ttcaatttgg atatatgttt atccatatat      1200
     gtttatccaa gaggacttcg agatcttttg ttacatatta aaaataaata taacaatcct      1260
     gcaattttca atcactgaaa atggtatgaa tgaattcaac gatgcaacac ttccagtaga      1320
     agaagctctt ttgaatactt acagaattga ttactattac cgtcacttat actacattcg      1380
     ttctgcaatc agggctggct caaatgtgaa gggtttttac gcatggtcat ttttggactg      1440
     taatgaatgg tttgcaggct ttactgttcg ttttggatta aactttgtag attagaaaga      1500
     tggattaaaa aggtacccta agctttctgc ccaatggtac aagaactttc tcaaaagaaa      1560
     ctagctagta ttattaaaag aactttgtag tagattacag tacatcgttt gaagttgagt      1620
     tggtgcacct aattaaataa aagaggttac tcttaacata tttttaggcc attcgttgtg      1680
     aagttgttag gctgttattt ctattatact atgttgtagt aataagtgca ttgttgtacc      1740
     agaagctatg atcataacta taggttgatc cttcatgtat cagtttgatg ttgagaatac      1800
     tttgaattaa aagtcttttt ttattttttt aaaaaaaaaa aaaaaaaaaa aaaaaaaaa       1859
//
"""

    print "GenBank CDS Iteration"
    print "====================="

    g = GenBankScanner()
    for record in g.parse_cds_features(StringIO(gbk_example)):
        print record
        
    g = GenBankScanner()
    for record in g.parse_cds_features(StringIO(gbk_example2),
                  tags2id=('gene','locus_tag','product')):
        print record

    g = GenBankScanner()
    for record in g.parse_cds_features(StringIO(gbk_example + "\n" + gbk_example2),
                                       tags2id=('gene','locus_tag','product')):
        print record

    print
    print "GenBank Iteration"
    print "================="
    g = GenBankScanner()
    for record in g.parse_records(StringIO(gbk_example),do_features=False):
        print record.id, record.name, record.description
        print record.seq

    g = GenBankScanner()
    for record in g.parse_records(StringIO(gbk_example),do_features=True):
        print record.id, record.name, record.description
        print record.seq

    g = GenBankScanner()
    for record in g.parse_records(StringIO(gbk_example2),do_features=False):
        print record.id, record.name, record.description
        print record.seq

    g = GenBankScanner()
    for record in g.parse_records(StringIO(gbk_example2),do_features=True):
        print record.id, record.name, record.description
        print record.seq

    print
    print "EMBL CDS Iteration"
    print "=================="

    e = EmblScanner()
    for record in e.parse_cds_features(StringIO(embl_example)):
        print record
        
    print
    print "EMBL Iteration"
    print "=============="
    e = EmblScanner()
    for record in e.parse_records(StringIO(embl_example),do_features=True):
        print record.id, record.name, record.description
        print record.seq