source: pyyaml/trunk/lib/yaml/scanner.py @ 188

Revision 188, 51.6 KB checked in by xi, 9 years ago (diff)

Fix several problems caused by ill-formed documents.

The line number is not calculated correctly for DOS-style line breaks.

Fix error reporting in remove_possible_simple_key. The problem is caused by the document:

+foo: &A bar
+*A ]

Raise an error for a complex key which is not indented correctly, for instance:

? "foo"
 : "bar"
RevLine 
[39]1
[55]2# Scanner produces tokens of the following types:
[118]3# STREAM-START
4# STREAM-END
[55]5# DIRECTIVE(name, value)
6# DOCUMENT-START
7# DOCUMENT-END
8# BLOCK-SEQUENCE-START
9# BLOCK-MAPPING-START
10# BLOCK-END
11# FLOW-SEQUENCE-START
12# FLOW-MAPPING-START
13# FLOW-SEQUENCE-END
14# FLOW-MAPPING-END
15# BLOCK-ENTRY
16# FLOW-ENTRY
17# KEY
18# VALUE
19# ALIAS(value)
20# ANCHOR(value)
21# TAG(value)
22# SCALAR(value, plain)
[57]23#
24# Read comments in the Scanner code for more details.
25#
[43]26
[46]27__all__ = ['Scanner', 'ScannerError']
[43]28
[52]29from error import MarkedYAMLError
[46]30from tokens import *
[39]31
[52]32class ScannerError(MarkedYAMLError):
33    pass
[51]34
[43]35class SimpleKey:
[51]36    # See below simple keys treatment.
37
[116]38    def __init__(self, token_number, required, index, line, column, mark):
[43]39        self.token_number = token_number
40        self.required = required
41        self.index = index
42        self.line = line
43        self.column = column
[116]44        self.mark = mark
[43]45
[39]46class Scanner:
47
[136]48    def __init__(self):
[39]49        """Initialize the scanner."""
[136]50        # It is assumed that Scanner and Reader will have a common descendant.
51        # Reader do the dirty work of checking for BOM and converting the
52        # input data to Unicode. It also adds NUL to the end.
[39]53        #
[46]54        # Reader supports the following methods
[136]55        #   self.peek(i=0)       # peek the next i-th character
56        #   self.prefix(l=1)     # peek the next l characters
57        #   self.forward(l=1)    # read the next l characters and move the pointer.
[39]58
59        # Had we reached the end of the stream?
60        self.done = False
61
62        # The number of unclosed '{' and '['. `flow_level == 0` means block
63        # context.
64        self.flow_level = 0
65
66        # List of processed tokens that are not yet emitted.
67        self.tokens = []
68
[118]69        # Add the STREAM-START token.
70        self.fetch_stream_start()
71
[39]72        # Number of tokens that were emitted through the `get_token` method.
73        self.tokens_taken = 0
74
75        # The current indentation level.
76        self.indent = -1
77
78        # Past indentation levels.
79        self.indents = []
80
[43]81        # Variables related to simple keys treatment.
[39]82
83        # A simple key is a key that is not denoted by the '?' indicator.
84        # Example of simple keys:
85        #   ---
86        #   block simple key: value
87        #   ? not a simple key:
88        #   : { flow simple key: value }
89        # We emit the KEY token before all keys, so when we find a potential
90        # simple key, we try to locate the corresponding ':' indicator.
91        # Simple keys should be limited to a single line and 1024 characters.
92
[43]93        # Can a simple key start at the current position? A simple key may
94        # start:
95        # - at the beginning of the line, not counting indentation spaces
96        #       (in block context),
97        # - after '{', '[', ',' (in the flow context),
98        # - after '?', ':', '-' (in the block context).
[60]99        # In the block context, this flag also signifies if a block collection
[43]100        # may start at the current position.
101        self.allow_simple_key = True
[39]102
103        # Keep track of possible simple keys. This is a dictionary. The key
104        # is `flow_level`; there can be no more that one possible simple key
[43]105        # for each level. The value is a SimpleKey record:
[116]106        #   (token_number, required, index, line, column, mark)
[43]107        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
108        # '[', or '{' tokens.
[39]109        self.possible_simple_keys = {}
110
[51]111    # Public methods.
[39]112
[136]113    def check_token(self, *choices):
[51]114        # Check if the next token is one of the given types.
[43]115        while self.need_more_tokens():
[39]116            self.fetch_more_tokens()
117        if self.tokens:
[136]118            if not choices:
119                return True
[51]120            for choice in choices:
121                if isinstance(self.tokens[0], choice):
122                    return True
123        return False
124
[136]125    def peek_token(self):
[51]126        # Return the next token, but do not delete if from the queue.
127        while self.need_more_tokens():
128            self.fetch_more_tokens()
129        if self.tokens:
[39]130            return self.tokens[0]
131
[136]132    def get_token(self):
[51]133        # Return the next token.
[39]134        while self.need_more_tokens():
135            self.fetch_more_tokens()
136        if self.tokens:
137            self.tokens_taken += 1
138            return self.tokens.pop(0)
139
[51]140    def __iter__(self):
141        # Iterator protocol.
142        while self.need_more_tokens():
143            self.fetch_more_tokens()
144        while self.tokens:
145            self.tokens_taken += 1
146            yield self.tokens.pop(0)
147            while self.need_more_tokens():
148                self.fetch_more_tokens()
149
[43]150    # Private methods.
[39]151
152    def need_more_tokens(self):
153        if self.done:
154            return False
155        if not self.tokens:
156            return True
157        # The current token may be a potential simple key, so we
158        # need to look further.
[43]159        self.stale_possible_simple_keys()
[39]160        if self.next_possible_simple_key() == self.tokens_taken:
161            return True
162
163    def fetch_more_tokens(self):
164
165        # Eat whitespaces and comments until we reach the next token.
[43]166        self.scan_to_next_token()
[39]167
[43]168        # Remove obsolete possible simple keys.
169        self.stale_possible_simple_keys()
170
[39]171        # Compare the current indentation and column. It may add some tokens
[43]172        # and decrease the current indentation level.
[136]173        self.unwind_indent(self.column)
[39]174
175        # Peek the next character.
[136]176        ch = self.peek()
[39]177
[48]178        # Is it the end of stream?
[43]179        if ch == u'\0':
[48]180            return self.fetch_stream_end()
[39]181
182        # Is it a directive?
183        if ch == u'%' and self.check_directive():
184            return self.fetch_directive()
185
186        # Is it the document start?
187        if ch == u'-' and self.check_document_start():
188            return self.fetch_document_start()
189
190        # Is it the document end?
191        if ch == u'.' and self.check_document_end():
192            return self.fetch_document_end()
193
[52]194        # TODO: support for BOM within a stream.
195        #if ch == u'\uFEFF':
196        #    return self.fetch_bom()    <-- issue BOMToken
197
[39]198        # Note: the order of the following checks is NOT significant.
199
200        # Is it the flow sequence start indicator?
201        if ch == u'[':
202            return self.fetch_flow_sequence_start()
203
204        # Is it the flow mapping start indicator?
205        if ch == u'{':
206            return self.fetch_flow_mapping_start()
207
208        # Is it the flow sequence end indicator?
209        if ch == u']':
210            return self.fetch_flow_sequence_end()
211
212        # Is it the flow mapping end indicator?
213        if ch == u'}':
214            return self.fetch_flow_mapping_end()
215
[51]216        # Is it the flow entry indicator?
[188]217        if ch == u',':
[51]218            return self.fetch_flow_entry()
[43]219
[51]220        # Is it the block entry indicator?
[188]221        if ch == u'-' and self.check_block_entry():
[51]222            return self.fetch_block_entry()
223
[39]224        # Is it the key indicator?
225        if ch == u'?' and self.check_key():
226            return self.fetch_key()
227
228        # Is it the value indicator?
229        if ch == u':' and self.check_value():
230            return self.fetch_value()
231
232        # Is it an alias?
233        if ch == u'*':
234            return self.fetch_alias()
235
236        # Is it an anchor?
237        if ch == u'&':
238            return self.fetch_anchor()
239
[43]240        # Is it a tag?
[39]241        if ch == u'!':
242            return self.fetch_tag()
243
[43]244        # Is it a literal scalar?
245        if ch == u'|' and not self.flow_level:
[39]246            return self.fetch_literal()
247
248        # Is it a folded scalar?
[43]249        if ch == u'>' and not self.flow_level:
[39]250            return self.fetch_folded()
251
252        # Is it a single quoted scalar?
253        if ch == u'\'':
254            return self.fetch_single()
255
256        # Is it a double quoted scalar?
257        if ch == u'\"':
258            return self.fetch_double()
259
[43]260        # It must be a plain scalar then.
[39]261        if self.check_plain():
262            return self.fetch_plain()
263
[43]264        # No? It's an error. Let's produce a nice error message.
[48]265        raise ScannerError("while scanning for the next token", None,
266                "found character %r that cannot start any token"
[136]267                % ch.encode('utf-8'), self.get_mark())
[39]268
[43]269    # Simple keys treatment.
270
271    def next_possible_simple_key(self):
272        # Return the number of the nearest possible simple key. Actually we
273        # don't need to loop through the whole dictionary. We may replace it
274        # with the following code:
275        #   if not self.possible_simple_keys:
276        #       return None
277        #   return self.possible_simple_keys[
278        #           min(self.possible_simple_keys.keys())].token_number
279        min_token_number = None
280        for level in self.possible_simple_keys:
281            key = self.possible_simple_keys[level]
282            if min_token_number is None or key.token_number < min_token_number:
283                min_token_number = key.token_number
284        return min_token_number
285
286    def stale_possible_simple_keys(self):
287        # Remove entries that are no longer possible simple keys. According to
288        # the YAML specification, simple keys
289        # - should be limited to a single line,
290        # - should be no longer than 1024 characters.
291        # Disabling this procedure will allow simple keys of any length and
292        # height (may cause problems if indentation is broken though).
293        for level in self.possible_simple_keys.keys():
294            key = self.possible_simple_keys[level]
[136]295            if key.line != self.line  \
296                    or self.index-key.index > 1024:
[43]297                if key.required:
[116]298                    raise ScannerError("while scanning a simple key", key.mark,
[136]299                            "could not found expected ':'", self.get_mark())
[43]300                del self.possible_simple_keys[level]
301
302    def save_possible_simple_key(self):
303        # The next token may start a simple key. We check if it's possible
304        # and save its position. This function is called for
305        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
306
307        # Check if a simple key is required at the current position.
[136]308        required = not self.flow_level and self.indent == self.column
[43]309
[47]310        # A simple key is required only if it is the first token in the current
311        # line. Therefore it is always allowed.
312        assert self.allow_simple_key or not required
313
[43]314        # The next token might be a simple key. Let's save it's number and
315        # position.
316        if self.allow_simple_key:
317            self.remove_possible_simple_key()
318            token_number = self.tokens_taken+len(self.tokens)
319            key = SimpleKey(token_number, required,
[136]320                    self.index, self.line, self.column, self.get_mark())
[43]321            self.possible_simple_keys[self.flow_level] = key
322
323    def remove_possible_simple_key(self):
324        # Remove the saved possible key position at the current flow level.
325        if self.flow_level in self.possible_simple_keys:
326            key = self.possible_simple_keys[self.flow_level]
[47]327           
[188]328            if key.required:
329                raise ScannerError("while scanning a simple key", key.mark,
330                        "could not found expected ':'", self.get_mark())
[43]331
[188]332            del self.possible_simple_keys[self.flow_level]
333
[43]334    # Indentation functions.
335
336    def unwind_indent(self, column):
337
[117]338        ## In flow context, tokens should respect indentation.
339        ## Actually the condition should be `self.indent >= column` according to
340        ## the spec. But this condition will prohibit intuitively correct
341        ## constructions such as
342        ## key : {
343        ## }
344        #if self.flow_level and self.indent > column:
345        #    raise ScannerError(None, None,
346        #            "invalid intendation or unclosed '[' or '{'",
[136]347        #            self.get_mark())
[43]348
[117]349        # In the flow context, indentation is ignored. We make the scanner less
350        # restrictive then specification requires.
351        if self.flow_level:
352            return
353
[43]354        # In block context, we may need to issue the BLOCK-END tokens.
355        while self.indent > column:
[136]356            mark = self.get_mark()
[43]357            self.indent = self.indents.pop()
[116]358            self.tokens.append(BlockEndToken(mark, mark))
[43]359
360    def add_indent(self, column):
361        # Check if we need to increase indentation.
362        if self.indent < column:
363            self.indents.append(self.indent)
364            self.indent = column
365            return True
366        return False
367
368    # Fetchers.
369
[118]370    def fetch_stream_start(self):
371        # We always add STREAM-START as the first token and STREAM-END as the
372        # last token.
373
374        # Read the token.
[136]375        mark = self.get_mark()
[118]376       
[130]377        # Add STREAM-START.
378        self.tokens.append(StreamStartToken(mark, mark,
[136]379            encoding=self.encoding))
[118]380       
381
[48]382    def fetch_stream_end(self):
[39]383
384        # Set the current intendation to -1.
[43]385        self.unwind_indent(-1)
[39]386
387        # Reset everything (not really needed).
[43]388        self.allow_simple_key = False
[39]389        self.possible_simple_keys = {}
390
[43]391        # Read the token.
[136]392        mark = self.get_mark()
[43]393       
[118]394        # Add STREAM-END.
[116]395        self.tokens.append(StreamEndToken(mark, mark))
[39]396
[136]397        # The steam is finished.
[39]398        self.done = True
399
[43]400    def fetch_directive(self):
401       
402        # Set the current intendation to -1.
403        self.unwind_indent(-1)
[39]404
[43]405        # Reset simple keys.
406        self.remove_possible_simple_key()
407        self.allow_simple_key = False
[39]408
[43]409        # Scan and add DIRECTIVE.
[47]410        self.tokens.append(self.scan_directive())
[39]411
412    def fetch_document_start(self):
[44]413        self.fetch_document_indicator(DocumentStartToken)
[39]414
[43]415    def fetch_document_end(self):
[44]416        self.fetch_document_indicator(DocumentEndToken)
[43]417
418    def fetch_document_indicator(self, TokenClass):
419
[39]420        # Set the current intendation to -1.
[43]421        self.unwind_indent(-1)
[39]422
[43]423        # Reset simple keys. Note that there could not be a block collection
424        # after '---'.
425        self.remove_possible_simple_key()
426        self.allow_simple_key = False
[39]427
[43]428        # Add DOCUMENT-START or DOCUMENT-END.
[136]429        start_mark = self.get_mark()
430        self.forward(3)
431        end_mark = self.get_mark()
[116]432        self.tokens.append(TokenClass(start_mark, end_mark))
[39]433
[43]434    def fetch_flow_sequence_start(self):
[44]435        self.fetch_flow_collection_start(FlowSequenceStartToken)
[39]436
[43]437    def fetch_flow_mapping_start(self):
[44]438        self.fetch_flow_collection_start(FlowMappingStartToken)
[43]439
440    def fetch_flow_collection_start(self, TokenClass):
441
[44]442        # '[' and '{' may start a simple key.
443        self.save_possible_simple_key()
444
[43]445        # Increase the flow level.
446        self.flow_level += 1
447
448        # Simple keys are allowed after '[' and '{'.
449        self.allow_simple_key = True
450
451        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
[136]452        start_mark = self.get_mark()
453        self.forward()
454        end_mark = self.get_mark()
[116]455        self.tokens.append(TokenClass(start_mark, end_mark))
[39]456
[43]457    def fetch_flow_sequence_end(self):
[44]458        self.fetch_flow_collection_end(FlowSequenceEndToken)
[39]459
[43]460    def fetch_flow_mapping_end(self):
[44]461        self.fetch_flow_collection_end(FlowMappingEndToken)
[43]462
463    def fetch_flow_collection_end(self, TokenClass):
464
465        # Reset possible simple key on the current level.
466        self.remove_possible_simple_key()
467
468        # Decrease the flow level.
469        self.flow_level -= 1
470
471        # No simple keys after ']' or '}'.
472        self.allow_simple_key = False
473
474        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
[136]475        start_mark = self.get_mark()
476        self.forward()
477        end_mark = self.get_mark()
[116]478        self.tokens.append(TokenClass(start_mark, end_mark))
[39]479
[51]480    def fetch_flow_entry(self):
[39]481
[51]482        # Simple keys are allowed after ','.
483        self.allow_simple_key = True
484
485        # Reset possible simple key on the current level.
486        self.remove_possible_simple_key()
487
488        # Add FLOW-ENTRY.
[136]489        start_mark = self.get_mark()
490        self.forward()
491        end_mark = self.get_mark()
[116]492        self.tokens.append(FlowEntryToken(start_mark, end_mark))
[51]493
494    def fetch_block_entry(self):
495
[43]496        # Block context needs additional checks.
497        if not self.flow_level:
[39]498
[43]499            # Are we allowed to start a new entry?
500            if not self.allow_simple_key:
[47]501                raise ScannerError(None, None,
502                        "sequence entries are not allowed here",
[136]503                        self.get_mark())
[39]504
[43]505            # We may need to add BLOCK-SEQUENCE-START.
[136]506            if self.add_indent(self.column):
507                mark = self.get_mark()
[116]508                self.tokens.append(BlockSequenceStartToken(mark, mark))
[39]509
[51]510        # It's an error for the block entry to occur in the flow context,
511        # but we let the parser detect this.
512        else:
513            pass
514
515        # Simple keys are allowed after '-'.
[43]516        self.allow_simple_key = True
[39]517
[43]518        # Reset possible simple key on the current level.
519        self.remove_possible_simple_key()
[39]520
[51]521        # Add BLOCK-ENTRY.
[136]522        start_mark = self.get_mark()
523        self.forward()
524        end_mark = self.get_mark()
[116]525        self.tokens.append(BlockEntryToken(start_mark, end_mark))
[39]526
[43]527    def fetch_key(self):
528       
529        # Block context needs additional checks.
530        if not self.flow_level:
[39]531
[43]532            # Are we allowed to start a key (not nessesary a simple)?
533            if not self.allow_simple_key:
[47]534                raise ScannerError(None, None,
535                        "mapping keys are not allowed here",
[136]536                        self.get_mark())
[43]537
538            # We may need to add BLOCK-MAPPING-START.
[136]539            if self.add_indent(self.column):
540                mark = self.get_mark()
[116]541                self.tokens.append(BlockMappingStartToken(mark, mark))
[43]542
543        # Simple keys are allowed after '?' in the block context.
544        self.allow_simple_key = not self.flow_level
545
546        # Reset possible simple key on the current level.
547        self.remove_possible_simple_key()
548
549        # Add KEY.
[136]550        start_mark = self.get_mark()
551        self.forward()
552        end_mark = self.get_mark()
[116]553        self.tokens.append(KeyToken(start_mark, end_mark))
[39]554
[43]555    def fetch_value(self):
[39]556
[43]557        # Do we determine a simple key?
558        if self.flow_level in self.possible_simple_keys:
[39]559
[43]560            # Add KEY.
561            key = self.possible_simple_keys[self.flow_level]
562            del self.possible_simple_keys[self.flow_level]
563            self.tokens.insert(key.token_number-self.tokens_taken,
[116]564                    KeyToken(key.mark, key.mark))
[39]565
[43]566            # If this key starts a new block mapping, we need to add
567            # BLOCK-MAPPING-START.
568            if not self.flow_level:
569                if self.add_indent(key.column):
570                    self.tokens.insert(key.token_number-self.tokens_taken,
[116]571                            BlockMappingStartToken(key.mark, key.mark))
[37]572
[43]573            # There cannot be two simple keys one after another.
574            self.allow_simple_key = False
[37]575
[43]576        # It must be a part of a complex key.
577        else:
578           
[47]579            # Block context needs additional checks.
580            # (Do we really need them? They will be catched by the parser
581            # anyway.)
582            if not self.flow_level:
583
584                # We are allowed to start a complex value if and only if
585                # we can start a simple key.
586                if not self.allow_simple_key:
587                    raise ScannerError(None, None,
588                            "mapping values are not allowed here",
[136]589                            self.get_mark())
[47]590
[188]591            # If this value starts a new block mapping, we need to add
592            # BLOCK-MAPPING-START.  It will be detected as an error later by
593            # the parser.
594            if not self.flow_level:
595                if self.add_indent(self.column):
596                    mark = self.get_mark()
597                    self.tokens.append(BlockMappingStartToken(mark, mark))
598
[43]599            # Simple keys are allowed after ':' in the block context.
600            self.allow_simple_key = not self.flow_level
[37]601
[43]602            # Reset possible simple key on the current level.
603            self.remove_possible_simple_key()
[37]604
[43]605        # Add VALUE.
[136]606        start_mark = self.get_mark()
607        self.forward()
608        end_mark = self.get_mark()
[116]609        self.tokens.append(ValueToken(start_mark, end_mark))
[37]610
[43]611    def fetch_alias(self):
[37]612
[43]613        # ALIAS could be a simple key.
614        self.save_possible_simple_key()
[37]615
[43]616        # No simple keys after ALIAS.
617        self.allow_simple_key = False
[37]618
[43]619        # Scan and add ALIAS.
[47]620        self.tokens.append(self.scan_anchor(AliasToken))
[37]621
[43]622    def fetch_anchor(self):
[37]623
[43]624        # ANCHOR could start a simple key.
625        self.save_possible_simple_key()
[37]626
[43]627        # No simple keys after ANCHOR.
628        self.allow_simple_key = False
[37]629
[43]630        # Scan and add ANCHOR.
[47]631        self.tokens.append(self.scan_anchor(AnchorToken))
[37]632
[43]633    def fetch_tag(self):
[37]634
[43]635        # TAG could start a simple key.
636        self.save_possible_simple_key()
[37]637
[43]638        # No simple keys after TAG.
639        self.allow_simple_key = False
[37]640
[43]641        # Scan and add TAG.
[47]642        self.tokens.append(self.scan_tag())
[37]643
[43]644    def fetch_literal(self):
[130]645        self.fetch_block_scalar(style='|')
[37]646
[43]647    def fetch_folded(self):
[130]648        self.fetch_block_scalar(style='>')
[37]649
[130]650    def fetch_block_scalar(self, style):
[37]651
[43]652        # A simple key may follow a block scalar.
653        self.allow_simple_key = True
[37]654
[43]655        # Reset possible simple key on the current level.
656        self.remove_possible_simple_key()
[37]657
[43]658        # Scan and add SCALAR.
[130]659        self.tokens.append(self.scan_block_scalar(style))
[37]660
[43]661    def fetch_single(self):
[130]662        self.fetch_flow_scalar(style='\'')
[37]663
[43]664    def fetch_double(self):
[130]665        self.fetch_flow_scalar(style='"')
[37]666
[130]667    def fetch_flow_scalar(self, style):
[37]668
[43]669        # A flow scalar could be a simple key.
670        self.save_possible_simple_key()
[37]671
[43]672        # No simple keys after flow scalars.
673        self.allow_simple_key = False
[37]674
[43]675        # Scan and add SCALAR.
[130]676        self.tokens.append(self.scan_flow_scalar(style))
[37]677
[43]678    def fetch_plain(self):
[37]679
[43]680        # A plain scalar could be a simple key.
681        self.save_possible_simple_key()
[37]682
[43]683        # No simple keys after plain scalars. But note that `scan_plain` will
684        # change this flag if the scan is finished at the beginning of the
685        # line.
686        self.allow_simple_key = False
[37]687
[43]688        # Scan and add SCALAR. May change `allow_simple_key`.
[47]689        self.tokens.append(self.scan_plain())
[37]690
[43]691    # Checkers.
[37]692
[43]693    def check_directive(self):
[37]694
[43]695        # DIRECTIVE:        ^ '%' ...
696        # The '%' indicator is already checked.
[136]697        if self.column == 0:
[43]698            return True
[37]699
[43]700    def check_document_start(self):
[37]701
[43]702        # DOCUMENT-START:   ^ '---' (' '|'\n')
[136]703        if self.column == 0:
704            if self.prefix(3) == u'---'  \
705                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
[43]706                return True
[37]707
[43]708    def check_document_end(self):
[37]709
[43]710        # DOCUMENT-END:     ^ '...' (' '|'\n')
[136]711        if self.column == 0:
712            if self.prefix(3) == u'...'  \
713                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
[43]714                return True
[37]715
[51]716    def check_block_entry(self):
[43]717
[51]718        # BLOCK-ENTRY:      '-' (' '|'\n')
[136]719        return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
[43]720
721    def check_key(self):
722
723        # KEY(flow context):    '?'
724        if self.flow_level:
[37]725            return True
[43]726
727        # KEY(block context):   '?' (' '|'\n')
[37]728        else:
[136]729            return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
[37]730
[43]731    def check_value(self):
732
733        # VALUE(flow context):  ':'
734        if self.flow_level:
[37]735            return True
[43]736
737        # VALUE(block context): ':' (' '|'\n')
[37]738        else:
[136]739            return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
[37]740
[43]741    def check_plain(self):
[37]742
[48]743        # A plain scalar may start with any non-space character except:
744        #   '-', '?', ':', ',', '[', ']', '{', '}',
745        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
746        #   '%', '@', '`'.
747        #
748        # It may also start with
749        #   '-', '?', ':'
750        # if it is followed by a non-space character.
751        #
752        # Note that we limit the last rule to the block context (except the
753        # '-' character) because we want the flow context to be space
754        # independent.
[136]755        ch = self.peek()
[48]756        return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
[136]757                or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
[132]758                        and (ch == u'-' or (not self.flow_level and ch in u'?:')))
[48]759
[43]760    # Scanners.
761
762    def scan_to_next_token(self):
[47]763        # We ignore spaces, line breaks and comments.
764        # If we find a line break in the block context, we set the flag
765        # `allow_simple_key` on.
[51]766        # The byte order mark is stripped if it's the first character in the
767        # stream. We do not yet support BOM inside the stream as the
768        # specification requires. Any such mark will be considered as a part
769        # of the document.
[52]770        #
771        # TODO: We need to make tab handling rules more sane. A good rule is
772        #   Tabs cannot precede tokens
773        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
774        #   KEY(block), VALUE(block), BLOCK-ENTRY
775        # So the checking code is
776        #   if <TAB>:
777        #       self.allow_simple_keys = False
778        # We also need to add the check for `allow_simple_keys == True` to
779        # `unwind_indent` before issuing BLOCK-END.
780        # Scanners for block, flow, and plain scalars need to be modified.
781
[136]782        if self.index == 0 and self.peek() == u'\uFEFF':
783            self.forward()
[43]784        found = False
785        while not found:
[136]786            while self.peek() == u' ':
787                self.forward()
788            if self.peek() == u'#':
789                while self.peek() not in u'\0\r\n\x85\u2028\u2029':
790                    self.forward()
[47]791            if self.scan_line_break():
[43]792                if not self.flow_level:
793                    self.allow_simple_key = True
[37]794            else:
[43]795                found = True
[37]796
[43]797    def scan_directive(self):
[48]798        # See the specification for details.
[136]799        start_mark = self.get_mark()
800        self.forward()
[116]801        name = self.scan_directive_name(start_mark)
[48]802        value = None
803        if name == u'YAML':
[116]804            value = self.scan_yaml_directive_value(start_mark)
[136]805            end_mark = self.get_mark()
[48]806        elif name == u'TAG':
[116]807            value = self.scan_tag_directive_value(start_mark)
[136]808            end_mark = self.get_mark()
[43]809        else:
[136]810            end_mark = self.get_mark()
811            while self.peek() not in u'\0\r\n\x85\u2028\u2029':
812                self.forward()
[116]813        self.scan_directive_ignored_line(start_mark)
814        return DirectiveToken(name, value, start_mark, end_mark)
[48]815
[116]816    def scan_directive_name(self, start_mark):
[48]817        # See the specification for details.
818        length = 0
[136]819        ch = self.peek(length)
[48]820        while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
821                or ch in u'-_':
822            length += 1
[136]823            ch = self.peek(length)
[48]824        if not length:
[116]825            raise ScannerError("while scanning a directive", start_mark,
[52]826                    "expected alphabetic or numeric character, but found %r"
[136]827                    % ch.encode('utf-8'), self.get_mark())
828        value = self.prefix(length)
829        self.forward(length)
830        ch = self.peek()
[48]831        if ch not in u'\0 \r\n\x85\u2028\u2029':
[116]832            raise ScannerError("while scanning a directive", start_mark,
[48]833                    "expected alphabetic or numeric character, but found %r"
[136]834                    % ch.encode('utf-8'), self.get_mark())
[48]835        return value
836
[116]837    def scan_yaml_directive_value(self, start_mark):
[48]838        # See the specification for details.
[136]839        while self.peek() == u' ':
840            self.forward()
[116]841        major = self.scan_yaml_directive_number(start_mark)
[136]842        if self.peek() != '.':
[116]843            raise ScannerError("while scanning a directive", start_mark,
[52]844                    "expected a digit or '.', but found %r"
[136]845                    % self.peek().encode('utf-8'),
846                    self.get_mark())
847        self.forward()
[116]848        minor = self.scan_yaml_directive_number(start_mark)
[136]849        if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
[116]850            raise ScannerError("while scanning a directive", start_mark,
[52]851                    "expected a digit or ' ', but found %r"
[136]852                    % self.peek().encode('utf-8'),
853                    self.get_mark())
[48]854        return (major, minor)
[37]855
[116]856    def scan_yaml_directive_number(self, start_mark):
[48]857        # See the specification for details.
[136]858        ch = self.peek()
[48]859        if not (u'0' <= ch <= '9'):
[116]860            raise ScannerError("while scanning a directive", start_mark,
[48]861                    "expected a digit, but found %r" % ch.encode('utf-8'),
[136]862                    self.get_mark())
[48]863        length = 0
[136]864        while u'0' <= self.peek(length) <= u'9':
[48]865            length += 1
[136]866        value = int(self.prefix(length))
867        self.forward(length)
[48]868        return value
869
[116]870    def scan_tag_directive_value(self, start_mark):
[48]871        # See the specification for details.
[136]872        while self.peek() == u' ':
873            self.forward()
[116]874        handle = self.scan_tag_directive_handle(start_mark)
[136]875        while self.peek() == u' ':
876            self.forward()
[116]877        prefix = self.scan_tag_directive_prefix(start_mark)
[48]878        return (handle, prefix)
879
[116]880    def scan_tag_directive_handle(self, start_mark):
[48]881        # See the specification for details.
[116]882        value = self.scan_tag_handle('directive', start_mark)
[136]883        ch = self.peek()
[52]884        if ch != u' ':
[116]885            raise ScannerError("while scanning a directive", start_mark,
[48]886                    "expected ' ', but found %r" % ch.encode('utf-8'),
[136]887                    self.get_mark())
[48]888        return value
889
[116]890    def scan_tag_directive_prefix(self, start_mark):
[48]891        # See the specification for details.
[116]892        value = self.scan_tag_uri('directive', start_mark)
[136]893        ch = self.peek()
[48]894        if ch not in u'\0 \r\n\x85\u2028\u2029':
[116]895            raise ScannerError("while scanning a directive", start_mark,
[48]896                    "expected ' ', but found %r" % ch.encode('utf-8'),
[136]897                    self.get_mark())
[48]898        return value
899
[116]900    def scan_directive_ignored_line(self, start_mark):
[48]901        # See the specification for details.
[136]902        while self.peek() == u' ':
903            self.forward()
904        if self.peek() == u'#':
905            while self.peek() not in u'\0\r\n\x85\u2028\u2029':
906                self.forward()
907        ch = self.peek()
[48]908        if ch not in u'\0\r\n\x85\u2028\u2029':
[116]909            raise ScannerError("while scanning a directive", start_mark,
[48]910                    "expected a comment or a line break, but found %r"
[136]911                        % ch.encode('utf-8'), self.get_mark())
[48]912        self.scan_line_break()
913
[43]914    def scan_anchor(self, TokenClass):
[48]915        # The specification does not restrict characters for anchors and
916        # aliases. This may lead to problems, for instance, the document:
917        #   [ *alias, value ]
918        # can be interpteted in two ways, as
919        #   [ "value" ]
920        # and
921        #   [ *alias , "value" ]
922        # Therefore we restrict aliases to numbers and ASCII letters.
[136]923        start_mark = self.get_mark()
924        indicator = self.peek()
[48]925        if indicator == '*':
926            name = 'alias'
927        else:
928            name = 'anchor'
[136]929        self.forward()
[48]930        length = 0
[136]931        ch = self.peek(length)
[48]932        while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
933                or ch in u'-_':
934            length += 1
[136]935            ch = self.peek(length)
[48]936        if not length:
[116]937            raise ScannerError("while scanning an %s" % name, start_mark,
[52]938                    "expected alphabetic or numeric character, but found %r"
[136]939                    % ch.encode('utf-8'), self.get_mark())
940        value = self.prefix(length)
941        self.forward(length)
942        ch = self.peek()
[48]943        if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
[116]944            raise ScannerError("while scanning an %s" % name, start_mark,
[48]945                    "expected alphabetic or numeric character, but found %r"
[136]946                    % ch.encode('utf-8'), self.get_mark())
947        end_mark = self.get_mark()
[116]948        return TokenClass(value, start_mark, end_mark)
[37]949
[43]950    def scan_tag(self):
[48]951        # See the specification for details.
[136]952        start_mark = self.get_mark()
953        ch = self.peek(1)
[48]954        if ch == u'<':
955            handle = None
[136]956            self.forward(2)
[116]957            suffix = self.scan_tag_uri('tag', start_mark)
[136]958            if self.peek() != u'>':
[116]959                raise ScannerError("while parsing a tag", start_mark,
[136]960                        "expected '>', but found %r" % self.peek().encode('utf-8'),
961                        self.get_mark())
962            self.forward()
[48]963        elif ch in u'\0 \t\r\n\x85\u2028\u2029':
964            handle = None
965            suffix = u'!'
[136]966            self.forward()
[48]967        else:
968            length = 1
969            use_handle = False
970            while ch not in u'\0 \r\n\x85\u2028\u2029':
971                if ch == u'!':
972                    use_handle = True
973                    break
974                length += 1
[136]975                ch = self.peek(length)
[48]976            handle = u'!'
977            if use_handle:
[116]978                handle = self.scan_tag_handle('tag', start_mark)
[48]979            else:
980                handle = u'!'
[136]981                self.forward()
[116]982            suffix = self.scan_tag_uri('tag', start_mark)
[136]983        ch = self.peek()
[48]984        if ch not in u'\0 \r\n\x85\u2028\u2029':
[116]985            raise ScannerError("while scanning a tag", start_mark,
[48]986                    "expected ' ', but found %r" % ch.encode('utf-8'),
[136]987                    self.get_mark())
[48]988        value = (handle, suffix)
[136]989        end_mark = self.get_mark()
[116]990        return TagToken(value, start_mark, end_mark)
[43]991
[130]992    def scan_block_scalar(self, style):
[48]993        # See the specification for details.
994
[130]995        if style == '>':
996            folded = True
997        else:
998            folded = False
999
[48]1000        chunks = []
[136]1001        start_mark = self.get_mark()
[48]1002
1003        # Scan the header.
[136]1004        self.forward()
[116]1005        chomping, increment = self.scan_block_scalar_indicators(start_mark)
1006        self.scan_block_scalar_ignored_line(start_mark)
[48]1007
1008        # Determine the indentation level and go to the first non-empty line.
1009        min_indent = self.indent+1
1010        if min_indent < 1:
1011            min_indent = 1
1012        if increment is None:
[116]1013            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
[48]1014            indent = max(min_indent, max_indent)
1015        else:
1016            indent = min_indent+increment-1
[116]1017            breaks, end_mark = self.scan_block_scalar_breaks(indent)
[48]1018        line_break = u''
1019
1020        # Scan the inner part of the block scalar.
[136]1021        while self.column == indent and self.peek() != u'\0':
[48]1022            chunks.extend(breaks)
[136]1023            leading_non_space = self.peek() not in u' \t'
[48]1024            length = 0
[136]1025            while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
[48]1026                length += 1
[136]1027            chunks.append(self.prefix(length))
1028            self.forward(length)
[48]1029            line_break = self.scan_line_break()
[116]1030            breaks, end_mark = self.scan_block_scalar_breaks(indent)
[136]1031            if self.column == indent and self.peek() != u'\0':
[130]1032
[48]1033                # Unfortunately, folding rules are ambiguous.
1034                #
1035                # This is the folding according to the specification:
[51]1036               
1037                if folded and line_break == u'\n'   \
[136]1038                        and leading_non_space and self.peek() not in u' \t':
[51]1039                    if not breaks:
1040                        chunks.append(u' ')
1041                else:
1042                    chunks.append(line_break)
1043               
1044                # This is Clark Evans's interpretation (also in the spec
1045                # examples):
[48]1046                #
[51]1047                #if folded and line_break == u'\n':
[48]1048                #    if not breaks:
[136]1049                #        if self.peek() not in ' \t':
[51]1050                #            chunks.append(u' ')
1051                #        else:
1052                #            chunks.append(line_break)
[48]1053                #else:
1054                #    chunks.append(line_break)
1055            else:
1056                break
1057
1058        # Chomp the tail.
1059        if chomping is not False:
1060            chunks.append(line_break)
1061        if chomping is True:
1062            chunks.extend(breaks)
1063
1064        # We are done.
[130]1065        return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
1066                style)
[48]1067
[116]1068    def scan_block_scalar_indicators(self, start_mark):
[48]1069        # See the specification for details.
1070        chomping = None
1071        increment = None
[136]1072        ch = self.peek()
[48]1073        if ch in u'+-':
1074            if ch == '+':
1075                chomping = True
1076            else:
1077                chomping = False
[136]1078            self.forward()
1079            ch = self.peek()
[48]1080            if ch in u'0123456789':
1081                increment = int(ch)
1082                if increment == 0:
[116]1083                    raise ScannerError("while scanning a block scalar", start_mark,
[48]1084                            "expected indentation indicator in the range 1-9, but found 0",
[136]1085                            self.get_mark())
1086                self.forward()
[48]1087        elif ch in u'0123456789':
1088            increment = int(ch)
1089            if increment == 0:
[116]1090                raise ScannerError("while scanning a block scalar", start_mark,
[48]1091                        "expected indentation indicator in the range 1-9, but found 0",
[136]1092                        self.get_mark())
1093            self.forward()
1094            ch = self.peek()
[48]1095            if ch in u'+-':
1096                if ch == '+':
1097                    chomping = True
1098                else:
1099                    chomping = False
[136]1100                self.forward()
1101        ch = self.peek()
[48]1102        if ch not in u'\0 \r\n\x85\u2028\u2029':
[116]1103            raise ScannerError("while scanning a block scalar", start_mark,
[48]1104                    "expected chomping or indentation indicators, but found %r"
[136]1105                        % ch.encode('utf-8'), self.get_mark())
[48]1106        return chomping, increment
1107
[116]1108    def scan_block_scalar_ignored_line(self, start_mark):
[48]1109        # See the specification for details.
[136]1110        while self.peek() == u' ':
1111            self.forward()
1112        if self.peek() == u'#':
1113            while self.peek() not in u'\0\r\n\x85\u2028\u2029':
1114                self.forward()
1115        ch = self.peek()
[48]1116        if ch not in u'\0\r\n\x85\u2028\u2029':
[116]1117            raise ScannerError("while scanning a block scalar", start_mark,
[48]1118                    "expected a comment or a line break, but found %r"
[136]1119                        % ch.encode('utf-8'), self.get_mark())
[48]1120        self.scan_line_break()
[43]1121
[48]1122    def scan_block_scalar_indentation(self):
1123        # See the specification for details.
1124        chunks = []
1125        max_indent = 0
[136]1126        end_mark = self.get_mark()
1127        while self.peek() in u' \r\n\x85\u2028\u2029':
1128            if self.peek() != u' ':
[48]1129                chunks.append(self.scan_line_break())
[136]1130                end_mark = self.get_mark()
[48]1131            else:
[136]1132                self.forward()
1133                if self.column > max_indent:
1134                    max_indent = self.column
[116]1135        return chunks, max_indent, end_mark
[48]1136
1137    def scan_block_scalar_breaks(self, indent):
1138        # See the specification for details.
1139        chunks = []
[136]1140        end_mark = self.get_mark()
1141        while self.column < indent and self.peek() == u' ':
1142            self.forward()
1143        while self.peek() in u'\r\n\x85\u2028\u2029':
[48]1144            chunks.append(self.scan_line_break())
[136]1145            end_mark = self.get_mark()
1146            while self.column < indent and self.peek() == u' ':
1147                self.forward()
[116]1148        return chunks, end_mark
[48]1149
[130]1150    def scan_flow_scalar(self, style):
[48]1151        # See the specification for details.
[117]1152        # Note that we loose indentation rules for quoted scalars. Quoted
1153        # scalars don't need to adhere indentation because " and ' clearly
1154        # mark the beginning and the end of them. Therefore we are less
1155        # restrictive then the specification requires. We only need to check
1156        # that document separators are not included in scalars.
[130]1157        if style == '"':
1158            double = True
1159        else:
1160            double = False
[48]1161        chunks = []
[136]1162        start_mark = self.get_mark()
1163        quote = self.peek()
1164        self.forward()
[117]1165        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
[136]1166        while self.peek() != quote:
[117]1167            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
1168            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
[136]1169        self.forward()
1170        end_mark = self.get_mark()
[130]1171        return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
1172                style)
[48]1173
1174    ESCAPE_REPLACEMENTS = {
1175        u'0':   u'\0',
1176        u'a':   u'\x07',
1177        u'b':   u'\x08',
1178        u't':   u'\x09',
1179        u'\t':  u'\x09',
1180        u'n':   u'\x0A',
1181        u'v':   u'\x0B',
1182        u'f':   u'\x0C',
1183        u'r':   u'\x0D',
1184        u'e':   u'\x1B',
1185        u' ':   u'\x20',
1186        u'\"':  u'\"',
1187        u'\\':  u'\\',
1188        u'N':   u'\x85',
1189        u'_':   u'\xA0',
1190        u'L':   u'\u2028',
1191        u'P':   u'\u2029',
1192    }
1193
1194    ESCAPE_CODES = {
1195        u'x':   2,
1196        u'u':   4,
1197        u'U':   8,
1198    }
1199
[117]1200    def scan_flow_scalar_non_spaces(self, double, start_mark):
[48]1201        # See the specification for details.
1202        chunks = []
1203        while True:
1204            length = 0
[136]1205            while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
[48]1206                length += 1
1207            if length:
[136]1208                chunks.append(self.prefix(length))
1209                self.forward(length)
1210            ch = self.peek()
1211            if not double and ch == u'\'' and self.peek(1) == u'\'':
[48]1212                chunks.append(u'\'')
[136]1213                self.forward(2)
[48]1214            elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
1215                chunks.append(ch)
[136]1216                self.forward()
[48]1217            elif double and ch == u'\\':
[136]1218                self.forward()
1219                ch = self.peek()
[48]1220                if ch in self.ESCAPE_REPLACEMENTS:
1221                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
[136]1222                    self.forward()
[48]1223                elif ch in self.ESCAPE_CODES:
1224                    length = self.ESCAPE_CODES[ch]
[136]1225                    self.forward()
[48]1226                    for k in range(length):
[136]1227                        if self.peek(k) not in u'0123456789ABCDEFabcdef':
[116]1228                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
[48]1229                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
[136]1230                                        (length, self.peek(k).encode('utf-8')), self.get_mark())
1231                    code = int(self.prefix(length), 16)
[48]1232                    chunks.append(unichr(code))
[136]1233                    self.forward(length)
[48]1234                elif ch in u'\r\n\x85\u2028\u2029':
1235                    self.scan_line_break()
[117]1236                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
[48]1237                else:
[116]1238                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
[136]1239                            "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
[37]1240            else:
[48]1241                return chunks
[37]1242
[117]1243    def scan_flow_scalar_spaces(self, double, start_mark):
[48]1244        # See the specification for details.
1245        chunks = []
1246        length = 0
[136]1247        while self.peek(length) in u' \t':
[48]1248            length += 1
[136]1249        whitespaces = self.prefix(length)
1250        self.forward(length)
1251        ch = self.peek()
[48]1252        if ch == u'\0':
[116]1253            raise ScannerError("while scanning a quoted scalar", start_mark,
[136]1254                    "found unexpected end of stream", self.get_mark())
[48]1255        elif ch in u'\r\n\x85\u2028\u2029':
1256            line_break = self.scan_line_break()
[117]1257            breaks = self.scan_flow_scalar_breaks(double, start_mark)
[48]1258            if line_break != u'\n':
1259                chunks.append(line_break)
1260            elif not breaks:
1261                chunks.append(u' ')
1262            chunks.extend(breaks)
1263        else:
1264            chunks.append(whitespaces)
1265        return chunks
1266
[117]1267    def scan_flow_scalar_breaks(self, double, start_mark):
[48]1268        # See the specification for details.
1269        chunks = []
1270        while True:
[117]1271            # Instead of checking indentation, we check for document
1272            # separators.
[136]1273            prefix = self.prefix(3)
[117]1274            if (prefix == u'---' or prefix == u'...')   \
[136]1275                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
[116]1276                raise ScannerError("while scanning a quoted scalar", start_mark,
[136]1277                        "found unexpected document separator", self.get_mark())
1278            while self.peek() in u' \t':
1279                self.forward()
1280            if self.peek() in u'\r\n\x85\u2028\u2029':
[48]1281                chunks.append(self.scan_line_break())
1282            else:
1283                return chunks
1284
[43]1285    def scan_plain(self):
[48]1286        # See the specification for details.
1287        # We add an additional restriction for the flow context:
[117]1288        #   plain scalars in the flow context cannot contain ',', ':' and '?'.
[48]1289        # We also keep track of the `allow_simple_key` flag here.
[117]1290        # Indentation rules are loosed for the flow context.
[48]1291        chunks = []
[136]1292        start_mark = self.get_mark()
[116]1293        end_mark = start_mark
[43]1294        indent = self.indent+1
[117]1295        # We allow zero indentation for scalars, but then we need to check for
1296        # document separators at the beginning of the line.
1297        #if indent == 0:
1298        #    indent = 1
[48]1299        spaces = []
[43]1300        while True:
[48]1301            length = 0
[136]1302            if self.peek() == u'#':
[43]1303                break
[48]1304            while True:
[136]1305                ch = self.peek(length)
[48]1306                if ch in u'\0 \t\r\n\x85\u2028\u2029'   \
1307                        or (not self.flow_level and ch == u':' and
[136]1308                                self.peek(length+1) in u'\0 \t\r\n\x28\u2028\u2029') \
[48]1309                        or (self.flow_level and ch in u',:?[]{}'):
1310                    break
1311                length += 1
[149]1312            # It's not clear what we should do with ':' in the flow context.
1313            if (self.flow_level and ch == u':'
1314                    and self.peek(length+1) not in u'\0 \t\r\n\x28\u2028\u2029,[]{}'):
1315                self.forward(length)
1316                raise ScannerError("while scanning a plain scalar", start_mark,
1317                    "found unexpected ':'", self.get_mark(),
1318                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
[48]1319            if length == 0:
[43]1320                break
[48]1321            self.allow_simple_key = False
1322            chunks.extend(spaces)
[136]1323            chunks.append(self.prefix(length))
1324            self.forward(length)
1325            end_mark = self.get_mark()
[117]1326            spaces = self.scan_plain_spaces(indent, start_mark)
[136]1327            if not spaces or self.peek() == u'#' \
1328                    or (not self.flow_level and self.column < indent):
[48]1329                break
[116]1330        return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
[37]1331
[117]1332    def scan_plain_spaces(self, indent, start_mark):
[48]1333        # See the specification for details.
1334        # The specification is really confusing about tabs in plain scalars.
1335        # We just forbid them completely. Do not use tabs in YAML!
1336        chunks = []
1337        length = 0
[136]1338        while self.peek(length) in u' ':
[48]1339            length += 1
[136]1340        whitespaces = self.prefix(length)
1341        self.forward(length)
1342        ch = self.peek()
[48]1343        if ch in u'\r\n\x85\u2028\u2029':
1344            line_break = self.scan_line_break()
1345            self.allow_simple_key = True
[136]1346            prefix = self.prefix(3)
[117]1347            if (prefix == u'---' or prefix == u'...')   \
[136]1348                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
[117]1349                return
[48]1350            breaks = []
[136]1351            while self.peek() in u' \r\n\x85\u2028\u2029':
1352                if self.peek() == ' ':
1353                    self.forward()
[48]1354                else:
1355                    breaks.append(self.scan_line_break())
[136]1356                    prefix = self.prefix(3)
[117]1357                    if (prefix == u'---' or prefix == u'...')   \
[136]1358                            and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
[117]1359                        return
[48]1360            if line_break != u'\n':
1361                chunks.append(line_break)
1362            elif not breaks:
1363                chunks.append(u' ')
1364            chunks.extend(breaks)
1365        elif whitespaces:
1366            chunks.append(whitespaces)
1367        return chunks
1368
[116]1369    def scan_tag_handle(self, name, start_mark):
[48]1370        # See the specification for details.
1371        # For some strange reasons, the specification does not allow '_' in
1372        # tag handles. I have allowed it anyway.
[136]1373        ch = self.peek()
[52]1374        if ch != u'!':
[116]1375            raise ScannerError("while scanning a %s" % name, start_mark,
[48]1376                    "expected '!', but found %r" % ch.encode('utf-8'),
[136]1377                    self.get_mark())
[48]1378        length = 1
[136]1379        ch = self.peek(length)
[48]1380        if ch != u' ':
1381            while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
1382                    or ch in u'-_':
1383                length += 1
[136]1384                ch = self.peek(length)
[48]1385            if ch != u'!':
[136]1386                self.forward(length)
[116]1387                raise ScannerError("while scanning a %s" % name, start_mark,
[48]1388                        "expected '!', but found %r" % ch.encode('utf-8'),
[136]1389                        self.get_mark())
[48]1390            length += 1
[136]1391        value = self.prefix(length)
1392        self.forward(length)
[48]1393        return value
1394
[116]1395    def scan_tag_uri(self, name, start_mark):
[48]1396        # See the specification for details.
1397        # Note: we do not check if URI is well-formed.
1398        chunks = []
1399        length = 0
[136]1400        ch = self.peek(length)
[48]1401        while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
1402                or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
1403            if ch == u'%':
[136]1404                chunks.append(self.prefix(length))
1405                self.forward(length)
[48]1406                length = 0
[116]1407                chunks.append(self.scan_uri_escapes(name, start_mark))
[48]1408            else:
1409                length += 1
[136]1410            ch = self.peek(length)
[48]1411        if length:
[136]1412            chunks.append(self.prefix(length))
1413            self.forward(length)
[48]1414            length = 0
1415        if not chunks:
[116]1416            raise ScannerError("while parsing a %s" % name, start_mark,
[48]1417                    "expected URI, but found %r" % ch.encode('utf-8'),
[136]1418                    self.get_mark())
[48]1419        return u''.join(chunks)
1420
[116]1421    def scan_uri_escapes(self, name, start_mark):
[48]1422        # See the specification for details.
1423        bytes = []
[136]1424        mark = self.get_mark()
1425        while self.peek() == u'%':
1426            self.forward()
[48]1427            for k in range(2):
[136]1428                if self.peek(k) not in u'0123456789ABCDEFabcdef':
[116]1429                    raise ScannerError("while scanning a %s" % name, start_mark,
[48]1430                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
[136]1431                                (self.peek(k).encode('utf-8')), self.get_mark())
1432            bytes.append(chr(int(self.prefix(2), 16)))
1433            self.forward(2)
[48]1434        try:
1435            value = unicode(''.join(bytes), 'utf-8')
1436        except UnicodeDecodeError, exc:
[116]1437            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
[48]1438        return value
1439
[47]1440    def scan_line_break(self):
1441        # Transforms:
1442        #   '\r\n'      :   '\n'
1443        #   '\r'        :   '\n'
1444        #   '\n'        :   '\n'
1445        #   '\x85'      :   '\n'
1446        #   '\u2028'    :   '\u2028'
1447        #   '\u2029     :   '\u2029'
1448        #   default     :   ''
[136]1449        ch = self.peek()
[47]1450        if ch in u'\r\n\x85':
[136]1451            if self.prefix(2) == u'\r\n':
1452                self.forward(2)
[47]1453            else:
[136]1454                self.forward()
[47]1455            return u'\n'
1456        elif ch in u'\u2028\u2029':
[136]1457            self.forward()
[47]1458            return ch
1459        return u''
1460
[45]1461#try:
1462#    import psyco
1463#    psyco.bind(Scanner)
1464#except ImportError:
1465#    pass
1466
Note: See TracBrowser for help on using the repository browser.