source: pyyaml/trunk/lib3/yaml/scanner.py @ 328

Revision 328, 50.6 KB checked in by xi, 5 years ago (diff)

Added basic support for Python 3 (Thanks idadesub(at)users(dot)sourceforge(dot)net).

RevLine 
[39]1
[55]2# Scanner produces tokens of the following types:
[118]3# STREAM-START
4# STREAM-END
[55]5# DIRECTIVE(name, value)
6# DOCUMENT-START
7# DOCUMENT-END
8# BLOCK-SEQUENCE-START
9# BLOCK-MAPPING-START
10# BLOCK-END
11# FLOW-SEQUENCE-START
12# FLOW-MAPPING-START
13# FLOW-SEQUENCE-END
14# FLOW-MAPPING-END
15# BLOCK-ENTRY
16# FLOW-ENTRY
17# KEY
18# VALUE
19# ALIAS(value)
20# ANCHOR(value)
21# TAG(value)
[222]22# SCALAR(value, plain, style)
[57]23#
24# Read comments in the Scanner code for more details.
25#
[43]26
[46]27__all__ = ['Scanner', 'ScannerError']
[43]28
[328]29from .error import MarkedYAMLError
30from .tokens import *
[39]31
[52]32class ScannerError(MarkedYAMLError):
33    pass
[51]34
[328]35class SimpleKey:
[51]36    # See below simple keys treatment.
37
[116]38    def __init__(self, token_number, required, index, line, column, mark):
[43]39        self.token_number = token_number
40        self.required = required
41        self.index = index
42        self.line = line
43        self.column = column
[116]44        self.mark = mark
[43]45
[328]46class Scanner:
[39]47
[136]48    def __init__(self):
[39]49        """Initialize the scanner."""
[136]50        # It is assumed that Scanner and Reader will have a common descendant.
51        # Reader do the dirty work of checking for BOM and converting the
52        # input data to Unicode. It also adds NUL to the end.
[39]53        #
[46]54        # Reader supports the following methods
[136]55        #   self.peek(i=0)       # peek the next i-th character
56        #   self.prefix(l=1)     # peek the next l characters
57        #   self.forward(l=1)    # read the next l characters and move the pointer.
[39]58
59        # Had we reached the end of the stream?
60        self.done = False
61
62        # The number of unclosed '{' and '['. `flow_level == 0` means block
63        # context.
64        self.flow_level = 0
65
66        # List of processed tokens that are not yet emitted.
67        self.tokens = []
68
[118]69        # Add the STREAM-START token.
70        self.fetch_stream_start()
71
[39]72        # Number of tokens that were emitted through the `get_token` method.
73        self.tokens_taken = 0
74
75        # The current indentation level.
76        self.indent = -1
77
78        # Past indentation levels.
79        self.indents = []
80
[43]81        # Variables related to simple keys treatment.
[39]82
83        # A simple key is a key that is not denoted by the '?' indicator.
84        # Example of simple keys:
85        #   ---
86        #   block simple key: value
87        #   ? not a simple key:
88        #   : { flow simple key: value }
89        # We emit the KEY token before all keys, so when we find a potential
90        # simple key, we try to locate the corresponding ':' indicator.
91        # Simple keys should be limited to a single line and 1024 characters.
92
[43]93        # Can a simple key start at the current position? A simple key may
94        # start:
95        # - at the beginning of the line, not counting indentation spaces
96        #       (in block context),
97        # - after '{', '[', ',' (in the flow context),
98        # - after '?', ':', '-' (in the block context).
[60]99        # In the block context, this flag also signifies if a block collection
[43]100        # may start at the current position.
101        self.allow_simple_key = True
[39]102
103        # Keep track of possible simple keys. This is a dictionary. The key
104        # is `flow_level`; there can be no more that one possible simple key
[43]105        # for each level. The value is a SimpleKey record:
[116]106        #   (token_number, required, index, line, column, mark)
[43]107        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
108        # '[', or '{' tokens.
[39]109        self.possible_simple_keys = {}
110
[51]111    # Public methods.
[39]112
[136]113    def check_token(self, *choices):
[51]114        # Check if the next token is one of the given types.
[43]115        while self.need_more_tokens():
[39]116            self.fetch_more_tokens()
117        if self.tokens:
[136]118            if not choices:
119                return True
[51]120            for choice in choices:
121                if isinstance(self.tokens[0], choice):
122                    return True
123        return False
124
[136]125    def peek_token(self):
[51]126        # Return the next token, but do not delete if from the queue.
127        while self.need_more_tokens():
128            self.fetch_more_tokens()
129        if self.tokens:
[39]130            return self.tokens[0]
131
[136]132    def get_token(self):
[51]133        # Return the next token.
[39]134        while self.need_more_tokens():
135            self.fetch_more_tokens()
136        if self.tokens:
137            self.tokens_taken += 1
138            return self.tokens.pop(0)
139
[43]140    # Private methods.
[39]141
142    def need_more_tokens(self):
143        if self.done:
144            return False
145        if not self.tokens:
146            return True
147        # The current token may be a potential simple key, so we
148        # need to look further.
[43]149        self.stale_possible_simple_keys()
[39]150        if self.next_possible_simple_key() == self.tokens_taken:
151            return True
152
153    def fetch_more_tokens(self):
154
155        # Eat whitespaces and comments until we reach the next token.
[43]156        self.scan_to_next_token()
[39]157
[43]158        # Remove obsolete possible simple keys.
159        self.stale_possible_simple_keys()
160
[39]161        # Compare the current indentation and column. It may add some tokens
[43]162        # and decrease the current indentation level.
[136]163        self.unwind_indent(self.column)
[39]164
165        # Peek the next character.
[136]166        ch = self.peek()
[39]167
[48]168        # Is it the end of stream?
[328]169        if ch == '\0':
[48]170            return self.fetch_stream_end()
[39]171
172        # Is it a directive?
[328]173        if ch == '%' and self.check_directive():
[39]174            return self.fetch_directive()
175
176        # Is it the document start?
[328]177        if ch == '-' and self.check_document_start():
[39]178            return self.fetch_document_start()
179
180        # Is it the document end?
[328]181        if ch == '.' and self.check_document_end():
[39]182            return self.fetch_document_end()
183
[52]184        # TODO: support for BOM within a stream.
[328]185        #if ch == '\uFEFF':
[52]186        #    return self.fetch_bom()    <-- issue BOMToken
187
[39]188        # Note: the order of the following checks is NOT significant.
189
190        # Is it the flow sequence start indicator?
[328]191        if ch == '[':
[39]192            return self.fetch_flow_sequence_start()
193
194        # Is it the flow mapping start indicator?
[328]195        if ch == '{':
[39]196            return self.fetch_flow_mapping_start()
197
198        # Is it the flow sequence end indicator?
[328]199        if ch == ']':
[39]200            return self.fetch_flow_sequence_end()
201
202        # Is it the flow mapping end indicator?
[328]203        if ch == '}':
[39]204            return self.fetch_flow_mapping_end()
205
[51]206        # Is it the flow entry indicator?
[328]207        if ch == ',':
[51]208            return self.fetch_flow_entry()
[43]209
[51]210        # Is it the block entry indicator?
[328]211        if ch == '-' and self.check_block_entry():
[51]212            return self.fetch_block_entry()
213
[39]214        # Is it the key indicator?
[328]215        if ch == '?' and self.check_key():
[39]216            return self.fetch_key()
217
218        # Is it the value indicator?
[328]219        if ch == ':' and self.check_value():
[39]220            return self.fetch_value()
221
222        # Is it an alias?
[328]223        if ch == '*':
[39]224            return self.fetch_alias()
225
226        # Is it an anchor?
[328]227        if ch == '&':
[39]228            return self.fetch_anchor()
229
[43]230        # Is it a tag?
[328]231        if ch == '!':
[39]232            return self.fetch_tag()
233
[43]234        # Is it a literal scalar?
[328]235        if ch == '|' and not self.flow_level:
[39]236            return self.fetch_literal()
237
238        # Is it a folded scalar?
[328]239        if ch == '>' and not self.flow_level:
[39]240            return self.fetch_folded()
241
242        # Is it a single quoted scalar?
[328]243        if ch == '\'':
[39]244            return self.fetch_single()
245
246        # Is it a double quoted scalar?
[328]247        if ch == '\"':
[39]248            return self.fetch_double()
249
[43]250        # It must be a plain scalar then.
[39]251        if self.check_plain():
252            return self.fetch_plain()
253
[43]254        # No? It's an error. Let's produce a nice error message.
[48]255        raise ScannerError("while scanning for the next token", None,
[328]256                "found character %r that cannot start any token" % ch,
257                self.get_mark())
[39]258
[43]259    # Simple keys treatment.
260
261    def next_possible_simple_key(self):
262        # Return the number of the nearest possible simple key. Actually we
263        # don't need to loop through the whole dictionary. We may replace it
264        # with the following code:
265        #   if not self.possible_simple_keys:
266        #       return None
267        #   return self.possible_simple_keys[
268        #           min(self.possible_simple_keys.keys())].token_number
269        min_token_number = None
270        for level in self.possible_simple_keys:
271            key = self.possible_simple_keys[level]
272            if min_token_number is None or key.token_number < min_token_number:
273                min_token_number = key.token_number
274        return min_token_number
275
276    def stale_possible_simple_keys(self):
277        # Remove entries that are no longer possible simple keys. According to
278        # the YAML specification, simple keys
279        # - should be limited to a single line,
280        # - should be no longer than 1024 characters.
281        # Disabling this procedure will allow simple keys of any length and
282        # height (may cause problems if indentation is broken though).
[328]283        for level in list(self.possible_simple_keys):
[43]284            key = self.possible_simple_keys[level]
[136]285            if key.line != self.line  \
286                    or self.index-key.index > 1024:
[43]287                if key.required:
[116]288                    raise ScannerError("while scanning a simple key", key.mark,
[136]289                            "could not found expected ':'", self.get_mark())
[43]290                del self.possible_simple_keys[level]
291
292    def save_possible_simple_key(self):
293        # The next token may start a simple key. We check if it's possible
294        # and save its position. This function is called for
295        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
296
297        # Check if a simple key is required at the current position.
[136]298        required = not self.flow_level and self.indent == self.column
[43]299
[47]300        # A simple key is required only if it is the first token in the current
301        # line. Therefore it is always allowed.
302        assert self.allow_simple_key or not required
303
[43]304        # The next token might be a simple key. Let's save it's number and
305        # position.
306        if self.allow_simple_key:
307            self.remove_possible_simple_key()
308            token_number = self.tokens_taken+len(self.tokens)
309            key = SimpleKey(token_number, required,
[136]310                    self.index, self.line, self.column, self.get_mark())
[43]311            self.possible_simple_keys[self.flow_level] = key
312
313    def remove_possible_simple_key(self):
314        # Remove the saved possible key position at the current flow level.
315        if self.flow_level in self.possible_simple_keys:
316            key = self.possible_simple_keys[self.flow_level]
[47]317           
[188]318            if key.required:
319                raise ScannerError("while scanning a simple key", key.mark,
320                        "could not found expected ':'", self.get_mark())
[43]321
[188]322            del self.possible_simple_keys[self.flow_level]
323
[43]324    # Indentation functions.
325
326    def unwind_indent(self, column):
327
[117]328        ## In flow context, tokens should respect indentation.
329        ## Actually the condition should be `self.indent >= column` according to
330        ## the spec. But this condition will prohibit intuitively correct
331        ## constructions such as
332        ## key : {
333        ## }
334        #if self.flow_level and self.indent > column:
335        #    raise ScannerError(None, None,
336        #            "invalid intendation or unclosed '[' or '{'",
[136]337        #            self.get_mark())
[43]338
[117]339        # In the flow context, indentation is ignored. We make the scanner less
340        # restrictive then specification requires.
341        if self.flow_level:
342            return
343
[43]344        # In block context, we may need to issue the BLOCK-END tokens.
345        while self.indent > column:
[136]346            mark = self.get_mark()
[43]347            self.indent = self.indents.pop()
[116]348            self.tokens.append(BlockEndToken(mark, mark))
[43]349
350    def add_indent(self, column):
351        # Check if we need to increase indentation.
352        if self.indent < column:
353            self.indents.append(self.indent)
354            self.indent = column
355            return True
356        return False
357
358    # Fetchers.
359
[118]360    def fetch_stream_start(self):
361        # We always add STREAM-START as the first token and STREAM-END as the
362        # last token.
363
364        # Read the token.
[136]365        mark = self.get_mark()
[118]366       
[130]367        # Add STREAM-START.
368        self.tokens.append(StreamStartToken(mark, mark,
[136]369            encoding=self.encoding))
[118]370       
371
[48]372    def fetch_stream_end(self):
[39]373
374        # Set the current intendation to -1.
[43]375        self.unwind_indent(-1)
[39]376
377        # Reset everything (not really needed).
[43]378        self.allow_simple_key = False
[39]379        self.possible_simple_keys = {}
380
[43]381        # Read the token.
[136]382        mark = self.get_mark()
[43]383       
[118]384        # Add STREAM-END.
[116]385        self.tokens.append(StreamEndToken(mark, mark))
[39]386
[136]387        # The steam is finished.
[39]388        self.done = True
389
[43]390    def fetch_directive(self):
391       
392        # Set the current intendation to -1.
393        self.unwind_indent(-1)
[39]394
[43]395        # Reset simple keys.
396        self.remove_possible_simple_key()
397        self.allow_simple_key = False
[39]398
[43]399        # Scan and add DIRECTIVE.
[47]400        self.tokens.append(self.scan_directive())
[39]401
402    def fetch_document_start(self):
[44]403        self.fetch_document_indicator(DocumentStartToken)
[39]404
[43]405    def fetch_document_end(self):
[44]406        self.fetch_document_indicator(DocumentEndToken)
[43]407
408    def fetch_document_indicator(self, TokenClass):
409
[39]410        # Set the current intendation to -1.
[43]411        self.unwind_indent(-1)
[39]412
[43]413        # Reset simple keys. Note that there could not be a block collection
414        # after '---'.
415        self.remove_possible_simple_key()
416        self.allow_simple_key = False
[39]417
[43]418        # Add DOCUMENT-START or DOCUMENT-END.
[136]419        start_mark = self.get_mark()
420        self.forward(3)
421        end_mark = self.get_mark()
[116]422        self.tokens.append(TokenClass(start_mark, end_mark))
[39]423
[43]424    def fetch_flow_sequence_start(self):
[44]425        self.fetch_flow_collection_start(FlowSequenceStartToken)
[39]426
[43]427    def fetch_flow_mapping_start(self):
[44]428        self.fetch_flow_collection_start(FlowMappingStartToken)
[43]429
430    def fetch_flow_collection_start(self, TokenClass):
431
[44]432        # '[' and '{' may start a simple key.
433        self.save_possible_simple_key()
434
[43]435        # Increase the flow level.
436        self.flow_level += 1
437
438        # Simple keys are allowed after '[' and '{'.
439        self.allow_simple_key = True
440
441        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
[136]442        start_mark = self.get_mark()
443        self.forward()
444        end_mark = self.get_mark()
[116]445        self.tokens.append(TokenClass(start_mark, end_mark))
[39]446
[43]447    def fetch_flow_sequence_end(self):
[44]448        self.fetch_flow_collection_end(FlowSequenceEndToken)
[39]449
[43]450    def fetch_flow_mapping_end(self):
[44]451        self.fetch_flow_collection_end(FlowMappingEndToken)
[43]452
453    def fetch_flow_collection_end(self, TokenClass):
454
455        # Reset possible simple key on the current level.
456        self.remove_possible_simple_key()
457
458        # Decrease the flow level.
459        self.flow_level -= 1
460
461        # No simple keys after ']' or '}'.
462        self.allow_simple_key = False
463
464        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
[136]465        start_mark = self.get_mark()
466        self.forward()
467        end_mark = self.get_mark()
[116]468        self.tokens.append(TokenClass(start_mark, end_mark))
[39]469
[51]470    def fetch_flow_entry(self):
[39]471
[51]472        # Simple keys are allowed after ','.
473        self.allow_simple_key = True
474
475        # Reset possible simple key on the current level.
476        self.remove_possible_simple_key()
477
478        # Add FLOW-ENTRY.
[136]479        start_mark = self.get_mark()
480        self.forward()
481        end_mark = self.get_mark()
[116]482        self.tokens.append(FlowEntryToken(start_mark, end_mark))
[51]483
484    def fetch_block_entry(self):
485
[43]486        # Block context needs additional checks.
487        if not self.flow_level:
[39]488
[43]489            # Are we allowed to start a new entry?
490            if not self.allow_simple_key:
[47]491                raise ScannerError(None, None,
492                        "sequence entries are not allowed here",
[136]493                        self.get_mark())
[39]494
[43]495            # We may need to add BLOCK-SEQUENCE-START.
[136]496            if self.add_indent(self.column):
497                mark = self.get_mark()
[116]498                self.tokens.append(BlockSequenceStartToken(mark, mark))
[39]499
[51]500        # It's an error for the block entry to occur in the flow context,
501        # but we let the parser detect this.
502        else:
503            pass
504
505        # Simple keys are allowed after '-'.
[43]506        self.allow_simple_key = True
[39]507
[43]508        # Reset possible simple key on the current level.
509        self.remove_possible_simple_key()
[39]510
[51]511        # Add BLOCK-ENTRY.
[136]512        start_mark = self.get_mark()
513        self.forward()
514        end_mark = self.get_mark()
[116]515        self.tokens.append(BlockEntryToken(start_mark, end_mark))
[39]516
[43]517    def fetch_key(self):
518       
519        # Block context needs additional checks.
520        if not self.flow_level:
[39]521
[43]522            # Are we allowed to start a key (not nessesary a simple)?
523            if not self.allow_simple_key:
[47]524                raise ScannerError(None, None,
525                        "mapping keys are not allowed here",
[136]526                        self.get_mark())
[43]527
528            # We may need to add BLOCK-MAPPING-START.
[136]529            if self.add_indent(self.column):
530                mark = self.get_mark()
[116]531                self.tokens.append(BlockMappingStartToken(mark, mark))
[43]532
533        # Simple keys are allowed after '?' in the block context.
534        self.allow_simple_key = not self.flow_level
535
536        # Reset possible simple key on the current level.
537        self.remove_possible_simple_key()
538
539        # Add KEY.
[136]540        start_mark = self.get_mark()
541        self.forward()
542        end_mark = self.get_mark()
[116]543        self.tokens.append(KeyToken(start_mark, end_mark))
[39]544
[43]545    def fetch_value(self):
[39]546
[43]547        # Do we determine a simple key?
548        if self.flow_level in self.possible_simple_keys:
[39]549
[43]550            # Add KEY.
551            key = self.possible_simple_keys[self.flow_level]
552            del self.possible_simple_keys[self.flow_level]
553            self.tokens.insert(key.token_number-self.tokens_taken,
[116]554                    KeyToken(key.mark, key.mark))
[39]555
[43]556            # If this key starts a new block mapping, we need to add
557            # BLOCK-MAPPING-START.
558            if not self.flow_level:
559                if self.add_indent(key.column):
560                    self.tokens.insert(key.token_number-self.tokens_taken,
[116]561                            BlockMappingStartToken(key.mark, key.mark))
[37]562
[43]563            # There cannot be two simple keys one after another.
564            self.allow_simple_key = False
[37]565
[43]566        # It must be a part of a complex key.
567        else:
568           
[47]569            # Block context needs additional checks.
570            # (Do we really need them? They will be catched by the parser
571            # anyway.)
572            if not self.flow_level:
573
574                # We are allowed to start a complex value if and only if
575                # we can start a simple key.
576                if not self.allow_simple_key:
577                    raise ScannerError(None, None,
578                            "mapping values are not allowed here",
[136]579                            self.get_mark())
[47]580
[188]581            # If this value starts a new block mapping, we need to add
582            # BLOCK-MAPPING-START.  It will be detected as an error later by
583            # the parser.
584            if not self.flow_level:
585                if self.add_indent(self.column):
586                    mark = self.get_mark()
587                    self.tokens.append(BlockMappingStartToken(mark, mark))
588
[43]589            # Simple keys are allowed after ':' in the block context.
590            self.allow_simple_key = not self.flow_level
[37]591
[43]592            # Reset possible simple key on the current level.
593            self.remove_possible_simple_key()
[37]594
[43]595        # Add VALUE.
[136]596        start_mark = self.get_mark()
597        self.forward()
598        end_mark = self.get_mark()
[116]599        self.tokens.append(ValueToken(start_mark, end_mark))
[37]600
[43]601    def fetch_alias(self):
[37]602
[43]603        # ALIAS could be a simple key.
604        self.save_possible_simple_key()
[37]605
[43]606        # No simple keys after ALIAS.
607        self.allow_simple_key = False
[37]608
[43]609        # Scan and add ALIAS.
[47]610        self.tokens.append(self.scan_anchor(AliasToken))
[37]611
[43]612    def fetch_anchor(self):
[37]613
[43]614        # ANCHOR could start a simple key.
615        self.save_possible_simple_key()
[37]616
[43]617        # No simple keys after ANCHOR.
618        self.allow_simple_key = False
[37]619
[43]620        # Scan and add ANCHOR.
[47]621        self.tokens.append(self.scan_anchor(AnchorToken))
[37]622
[43]623    def fetch_tag(self):
[37]624
[43]625        # TAG could start a simple key.
626        self.save_possible_simple_key()
[37]627
[43]628        # No simple keys after TAG.
629        self.allow_simple_key = False
[37]630
[43]631        # Scan and add TAG.
[47]632        self.tokens.append(self.scan_tag())
[37]633
[43]634    def fetch_literal(self):
[130]635        self.fetch_block_scalar(style='|')
[37]636
[43]637    def fetch_folded(self):
[130]638        self.fetch_block_scalar(style='>')
[37]639
[130]640    def fetch_block_scalar(self, style):
[37]641
[43]642        # A simple key may follow a block scalar.
643        self.allow_simple_key = True
[37]644
[43]645        # Reset possible simple key on the current level.
646        self.remove_possible_simple_key()
[37]647
[43]648        # Scan and add SCALAR.
[130]649        self.tokens.append(self.scan_block_scalar(style))
[37]650
[43]651    def fetch_single(self):
[130]652        self.fetch_flow_scalar(style='\'')
[37]653
[43]654    def fetch_double(self):
[130]655        self.fetch_flow_scalar(style='"')
[37]656
[130]657    def fetch_flow_scalar(self, style):
[37]658
[43]659        # A flow scalar could be a simple key.
660        self.save_possible_simple_key()
[37]661
[43]662        # No simple keys after flow scalars.
663        self.allow_simple_key = False
[37]664
[43]665        # Scan and add SCALAR.
[130]666        self.tokens.append(self.scan_flow_scalar(style))
[37]667
[43]668    def fetch_plain(self):
[37]669
[43]670        # A plain scalar could be a simple key.
671        self.save_possible_simple_key()
[37]672
[43]673        # No simple keys after plain scalars. But note that `scan_plain` will
674        # change this flag if the scan is finished at the beginning of the
675        # line.
676        self.allow_simple_key = False
[37]677
[43]678        # Scan and add SCALAR. May change `allow_simple_key`.
[47]679        self.tokens.append(self.scan_plain())
[37]680
[43]681    # Checkers.
[37]682
[43]683    def check_directive(self):
[37]684
[43]685        # DIRECTIVE:        ^ '%' ...
686        # The '%' indicator is already checked.
[136]687        if self.column == 0:
[43]688            return True
[37]689
[43]690    def check_document_start(self):
[37]691
[43]692        # DOCUMENT-START:   ^ '---' (' '|'\n')
[136]693        if self.column == 0:
[328]694            if self.prefix(3) == '---'  \
695                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
[43]696                return True
[37]697
[43]698    def check_document_end(self):
[37]699
[43]700        # DOCUMENT-END:     ^ '...' (' '|'\n')
[136]701        if self.column == 0:
[328]702            if self.prefix(3) == '...'  \
703                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
[43]704                return True
[37]705
[51]706    def check_block_entry(self):
[43]707
[51]708        # BLOCK-ENTRY:      '-' (' '|'\n')
[328]709        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
[43]710
711    def check_key(self):
712
713        # KEY(flow context):    '?'
714        if self.flow_level:
[37]715            return True
[43]716
717        # KEY(block context):   '?' (' '|'\n')
[37]718        else:
[328]719            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
[37]720
[43]721    def check_value(self):
722
723        # VALUE(flow context):  ':'
724        if self.flow_level:
[37]725            return True
[43]726
727        # VALUE(block context): ':' (' '|'\n')
[37]728        else:
[328]729            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
[37]730
[43]731    def check_plain(self):
[37]732
[48]733        # A plain scalar may start with any non-space character except:
734        #   '-', '?', ':', ',', '[', ']', '{', '}',
735        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
736        #   '%', '@', '`'.
737        #
738        # It may also start with
739        #   '-', '?', ':'
740        # if it is followed by a non-space character.
741        #
742        # Note that we limit the last rule to the block context (except the
743        # '-' character) because we want the flow context to be space
744        # independent.
[136]745        ch = self.peek()
[328]746        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
747                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
748                        and (ch == '-' or (not self.flow_level and ch in '?:')))
[48]749
[43]750    # Scanners.
751
752    def scan_to_next_token(self):
[47]753        # We ignore spaces, line breaks and comments.
754        # If we find a line break in the block context, we set the flag
755        # `allow_simple_key` on.
[51]756        # The byte order mark is stripped if it's the first character in the
757        # stream. We do not yet support BOM inside the stream as the
758        # specification requires. Any such mark will be considered as a part
759        # of the document.
[52]760        #
761        # TODO: We need to make tab handling rules more sane. A good rule is
762        #   Tabs cannot precede tokens
763        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
764        #   KEY(block), VALUE(block), BLOCK-ENTRY
765        # So the checking code is
766        #   if <TAB>:
767        #       self.allow_simple_keys = False
768        # We also need to add the check for `allow_simple_keys == True` to
769        # `unwind_indent` before issuing BLOCK-END.
770        # Scanners for block, flow, and plain scalars need to be modified.
771
[328]772        if self.index == 0 and self.peek() == '\uFEFF':
[136]773            self.forward()
[43]774        found = False
775        while not found:
[328]776            while self.peek() == ' ':
[136]777                self.forward()
[328]778            if self.peek() == '#':
779                while self.peek() not in '\0\r\n\x85\u2028\u2029':
[136]780                    self.forward()
[47]781            if self.scan_line_break():
[43]782                if not self.flow_level:
783                    self.allow_simple_key = True
[37]784            else:
[43]785                found = True
[37]786
[43]787    def scan_directive(self):
[48]788        # See the specification for details.
[136]789        start_mark = self.get_mark()
790        self.forward()
[116]791        name = self.scan_directive_name(start_mark)
[48]792        value = None
[328]793        if name == 'YAML':
[116]794            value = self.scan_yaml_directive_value(start_mark)
[136]795            end_mark = self.get_mark()
[328]796        elif name == 'TAG':
[116]797            value = self.scan_tag_directive_value(start_mark)
[136]798            end_mark = self.get_mark()
[43]799        else:
[136]800            end_mark = self.get_mark()
[328]801            while self.peek() not in '\0\r\n\x85\u2028\u2029':
[136]802                self.forward()
[116]803        self.scan_directive_ignored_line(start_mark)
804        return DirectiveToken(name, value, start_mark, end_mark)
[48]805
[116]806    def scan_directive_name(self, start_mark):
[48]807        # See the specification for details.
808        length = 0
[136]809        ch = self.peek(length)
[328]810        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
811                or ch in '-_':
[48]812            length += 1
[136]813            ch = self.peek(length)
[48]814        if not length:
[116]815            raise ScannerError("while scanning a directive", start_mark,
[52]816                    "expected alphabetic or numeric character, but found %r"
[328]817                    % ch, self.get_mark())
[136]818        value = self.prefix(length)
819        self.forward(length)
820        ch = self.peek()
[328]821        if ch not in '\0 \r\n\x85\u2028\u2029':
[116]822            raise ScannerError("while scanning a directive", start_mark,
[48]823                    "expected alphabetic or numeric character, but found %r"
[328]824                    % ch, self.get_mark())
[48]825        return value
826
[116]827    def scan_yaml_directive_value(self, start_mark):
[48]828        # See the specification for details.
[328]829        while self.peek() == ' ':
[136]830            self.forward()
[116]831        major = self.scan_yaml_directive_number(start_mark)
[136]832        if self.peek() != '.':
[116]833            raise ScannerError("while scanning a directive", start_mark,
[328]834                    "expected a digit or '.', but found %r" % self.peek(),
[136]835                    self.get_mark())
836        self.forward()
[116]837        minor = self.scan_yaml_directive_number(start_mark)
[328]838        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
[116]839            raise ScannerError("while scanning a directive", start_mark,
[328]840                    "expected a digit or ' ', but found %r" % self.peek(),
[136]841                    self.get_mark())
[48]842        return (major, minor)
[37]843
[116]844    def scan_yaml_directive_number(self, start_mark):
[48]845        # See the specification for details.
[136]846        ch = self.peek()
[328]847        if not ('0' <= ch <= '9'):
[116]848            raise ScannerError("while scanning a directive", start_mark,
[328]849                    "expected a digit, but found %r" % ch, self.get_mark())
[48]850        length = 0
[328]851        while '0' <= self.peek(length) <= '9':
[48]852            length += 1
[136]853        value = int(self.prefix(length))
854        self.forward(length)
[48]855        return value
856
[116]857    def scan_tag_directive_value(self, start_mark):
[48]858        # See the specification for details.
[328]859        while self.peek() == ' ':
[136]860            self.forward()
[116]861        handle = self.scan_tag_directive_handle(start_mark)
[328]862        while self.peek() == ' ':
[136]863            self.forward()
[116]864        prefix = self.scan_tag_directive_prefix(start_mark)
[48]865        return (handle, prefix)
866
[116]867    def scan_tag_directive_handle(self, start_mark):
[48]868        # See the specification for details.
[116]869        value = self.scan_tag_handle('directive', start_mark)
[136]870        ch = self.peek()
[328]871        if ch != ' ':
[116]872            raise ScannerError("while scanning a directive", start_mark,
[328]873                    "expected ' ', but found %r" % ch, self.get_mark())
[48]874        return value
875
[116]876    def scan_tag_directive_prefix(self, start_mark):
[48]877        # See the specification for details.
[116]878        value = self.scan_tag_uri('directive', start_mark)
[136]879        ch = self.peek()
[328]880        if ch not in '\0 \r\n\x85\u2028\u2029':
[116]881            raise ScannerError("while scanning a directive", start_mark,
[328]882                    "expected ' ', but found %r" % ch, self.get_mark())
[48]883        return value
884
[116]885    def scan_directive_ignored_line(self, start_mark):
[48]886        # See the specification for details.
[328]887        while self.peek() == ' ':
[136]888            self.forward()
[328]889        if self.peek() == '#':
890            while self.peek() not in '\0\r\n\x85\u2028\u2029':
[136]891                self.forward()
892        ch = self.peek()
[328]893        if ch not in '\0\r\n\x85\u2028\u2029':
[116]894            raise ScannerError("while scanning a directive", start_mark,
[48]895                    "expected a comment or a line break, but found %r"
[328]896                        % ch, self.get_mark())
[48]897        self.scan_line_break()
898
[43]899    def scan_anchor(self, TokenClass):
[48]900        # The specification does not restrict characters for anchors and
901        # aliases. This may lead to problems, for instance, the document:
902        #   [ *alias, value ]
903        # can be interpteted in two ways, as
904        #   [ "value" ]
905        # and
906        #   [ *alias , "value" ]
907        # Therefore we restrict aliases to numbers and ASCII letters.
[136]908        start_mark = self.get_mark()
909        indicator = self.peek()
[48]910        if indicator == '*':
911            name = 'alias'
912        else:
913            name = 'anchor'
[136]914        self.forward()
[48]915        length = 0
[136]916        ch = self.peek(length)
[328]917        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
918                or ch in '-_':
[48]919            length += 1
[136]920            ch = self.peek(length)
[48]921        if not length:
[116]922            raise ScannerError("while scanning an %s" % name, start_mark,
[52]923                    "expected alphabetic or numeric character, but found %r"
[328]924                    % ch, self.get_mark())
[136]925        value = self.prefix(length)
926        self.forward(length)
927        ch = self.peek()
[328]928        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
[116]929            raise ScannerError("while scanning an %s" % name, start_mark,
[48]930                    "expected alphabetic or numeric character, but found %r"
[328]931                    % ch, self.get_mark())
[136]932        end_mark = self.get_mark()
[116]933        return TokenClass(value, start_mark, end_mark)
[37]934
[43]935    def scan_tag(self):
[48]936        # See the specification for details.
[136]937        start_mark = self.get_mark()
938        ch = self.peek(1)
[328]939        if ch == '<':
[48]940            handle = None
[136]941            self.forward(2)
[116]942            suffix = self.scan_tag_uri('tag', start_mark)
[328]943            if self.peek() != '>':
[116]944                raise ScannerError("while parsing a tag", start_mark,
[328]945                        "expected '>', but found %r" % self.peek(),
[136]946                        self.get_mark())
947            self.forward()
[328]948        elif ch in '\0 \t\r\n\x85\u2028\u2029':
[48]949            handle = None
[328]950            suffix = '!'
[136]951            self.forward()
[48]952        else:
953            length = 1
954            use_handle = False
[328]955            while ch not in '\0 \r\n\x85\u2028\u2029':
956                if ch == '!':
[48]957                    use_handle = True
958                    break
959                length += 1
[136]960                ch = self.peek(length)
[328]961            handle = '!'
[48]962            if use_handle:
[116]963                handle = self.scan_tag_handle('tag', start_mark)
[48]964            else:
[328]965                handle = '!'
[136]966                self.forward()
[116]967            suffix = self.scan_tag_uri('tag', start_mark)
[136]968        ch = self.peek()
[328]969        if ch not in '\0 \r\n\x85\u2028\u2029':
[116]970            raise ScannerError("while scanning a tag", start_mark,
[328]971                    "expected ' ', but found %r" % ch, self.get_mark())
[48]972        value = (handle, suffix)
[136]973        end_mark = self.get_mark()
[116]974        return TagToken(value, start_mark, end_mark)
[43]975
[130]976    def scan_block_scalar(self, style):
[48]977        # See the specification for details.
978
[130]979        if style == '>':
980            folded = True
981        else:
982            folded = False
983
[48]984        chunks = []
[136]985        start_mark = self.get_mark()
[48]986
987        # Scan the header.
[136]988        self.forward()
[116]989        chomping, increment = self.scan_block_scalar_indicators(start_mark)
990        self.scan_block_scalar_ignored_line(start_mark)
[48]991
992        # Determine the indentation level and go to the first non-empty line.
993        min_indent = self.indent+1
994        if min_indent < 1:
995            min_indent = 1
996        if increment is None:
[116]997            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
[48]998            indent = max(min_indent, max_indent)
999        else:
1000            indent = min_indent+increment-1
[116]1001            breaks, end_mark = self.scan_block_scalar_breaks(indent)
[328]1002        line_break = ''
[48]1003
1004        # Scan the inner part of the block scalar.
[328]1005        while self.column == indent and self.peek() != '\0':
[48]1006            chunks.extend(breaks)
[328]1007            leading_non_space = self.peek() not in ' \t'
[48]1008            length = 0
[328]1009            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
[48]1010                length += 1
[136]1011            chunks.append(self.prefix(length))
1012            self.forward(length)
[48]1013            line_break = self.scan_line_break()
[116]1014            breaks, end_mark = self.scan_block_scalar_breaks(indent)
[328]1015            if self.column == indent and self.peek() != '\0':
[130]1016
[48]1017                # Unfortunately, folding rules are ambiguous.
1018                #
1019                # This is the folding according to the specification:
[51]1020               
[328]1021                if folded and line_break == '\n'    \
1022                        and leading_non_space and self.peek() not in ' \t':
[51]1023                    if not breaks:
[328]1024                        chunks.append(' ')
[51]1025                else:
1026                    chunks.append(line_break)
1027               
1028                # This is Clark Evans's interpretation (also in the spec
1029                # examples):
[48]1030                #
[328]1031                #if folded and line_break == '\n':
[48]1032                #    if not breaks:
[136]1033                #        if self.peek() not in ' \t':
[328]1034                #            chunks.append(' ')
[51]1035                #        else:
1036                #            chunks.append(line_break)
[48]1037                #else:
1038                #    chunks.append(line_break)
1039            else:
1040                break
1041
1042        # Chomp the tail.
1043        if chomping is not False:
1044            chunks.append(line_break)
1045        if chomping is True:
1046            chunks.extend(breaks)
1047
1048        # We are done.
[328]1049        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
[130]1050                style)
[48]1051
[116]1052    def scan_block_scalar_indicators(self, start_mark):
[48]1053        # See the specification for details.
1054        chomping = None
1055        increment = None
[136]1056        ch = self.peek()
[328]1057        if ch in '+-':
[48]1058            if ch == '+':
1059                chomping = True
1060            else:
1061                chomping = False
[136]1062            self.forward()
1063            ch = self.peek()
[328]1064            if ch in '0123456789':
[48]1065                increment = int(ch)
1066                if increment == 0:
[116]1067                    raise ScannerError("while scanning a block scalar", start_mark,
[48]1068                            "expected indentation indicator in the range 1-9, but found 0",
[136]1069                            self.get_mark())
1070                self.forward()
[328]1071        elif ch in '0123456789':
[48]1072            increment = int(ch)
1073            if increment == 0:
[116]1074                raise ScannerError("while scanning a block scalar", start_mark,
[48]1075                        "expected indentation indicator in the range 1-9, but found 0",
[136]1076                        self.get_mark())
1077            self.forward()
1078            ch = self.peek()
[328]1079            if ch in '+-':
[48]1080                if ch == '+':
1081                    chomping = True
1082                else:
1083                    chomping = False
[136]1084                self.forward()
1085        ch = self.peek()
[328]1086        if ch not in '\0 \r\n\x85\u2028\u2029':
[116]1087            raise ScannerError("while scanning a block scalar", start_mark,
[48]1088                    "expected chomping or indentation indicators, but found %r"
[328]1089                    % ch, self.get_mark())
[48]1090        return chomping, increment
1091
[116]1092    def scan_block_scalar_ignored_line(self, start_mark):
[48]1093        # See the specification for details.
[328]1094        while self.peek() == ' ':
[136]1095            self.forward()
[328]1096        if self.peek() == '#':
1097            while self.peek() not in '\0\r\n\x85\u2028\u2029':
[136]1098                self.forward()
1099        ch = self.peek()
[328]1100        if ch not in '\0\r\n\x85\u2028\u2029':
[116]1101            raise ScannerError("while scanning a block scalar", start_mark,
[328]1102                    "expected a comment or a line break, but found %r" % ch,
1103                    self.get_mark())
[48]1104        self.scan_line_break()
[43]1105
[48]1106    def scan_block_scalar_indentation(self):
1107        # See the specification for details.
1108        chunks = []
1109        max_indent = 0
[136]1110        end_mark = self.get_mark()
[328]1111        while self.peek() in ' \r\n\x85\u2028\u2029':
1112            if self.peek() != ' ':
[48]1113                chunks.append(self.scan_line_break())
[136]1114                end_mark = self.get_mark()
[48]1115            else:
[136]1116                self.forward()
1117                if self.column > max_indent:
1118                    max_indent = self.column
[116]1119        return chunks, max_indent, end_mark
[48]1120
1121    def scan_block_scalar_breaks(self, indent):
1122        # See the specification for details.
1123        chunks = []
[136]1124        end_mark = self.get_mark()
[328]1125        while self.column < indent and self.peek() == ' ':
[136]1126            self.forward()
[328]1127        while self.peek() in '\r\n\x85\u2028\u2029':
[48]1128            chunks.append(self.scan_line_break())
[136]1129            end_mark = self.get_mark()
[328]1130            while self.column < indent and self.peek() == ' ':
[136]1131                self.forward()
[116]1132        return chunks, end_mark
[48]1133
[130]1134    def scan_flow_scalar(self, style):
[48]1135        # See the specification for details.
[117]1136        # Note that we loose indentation rules for quoted scalars. Quoted
1137        # scalars don't need to adhere indentation because " and ' clearly
1138        # mark the beginning and the end of them. Therefore we are less
1139        # restrictive then the specification requires. We only need to check
1140        # that document separators are not included in scalars.
[130]1141        if style == '"':
1142            double = True
1143        else:
1144            double = False
[48]1145        chunks = []
[136]1146        start_mark = self.get_mark()
1147        quote = self.peek()
1148        self.forward()
[117]1149        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
[136]1150        while self.peek() != quote:
[117]1151            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
1152            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
[136]1153        self.forward()
1154        end_mark = self.get_mark()
[328]1155        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
[130]1156                style)
[48]1157
1158    ESCAPE_REPLACEMENTS = {
[328]1159        '0':    '\0',
1160        'a':    '\x07',
1161        'b':    '\x08',
1162        't':    '\x09',
1163        '\t':   '\x09',
1164        'n':    '\x0A',
1165        'v':    '\x0B',
1166        'f':    '\x0C',
1167        'r':    '\x0D',
1168        'e':    '\x1B',
1169        ' ':    '\x20',
1170        '\"':   '\"',
1171        '\\':   '\\',
1172        'N':    '\x85',
1173        '_':    '\xA0',
1174        'L':    '\u2028',
1175        'P':    '\u2029',
[48]1176    }
1177
1178    ESCAPE_CODES = {
[328]1179        'x':    2,
1180        'u':    4,
1181        'U':    8,
[48]1182    }
1183
[117]1184    def scan_flow_scalar_non_spaces(self, double, start_mark):
[48]1185        # See the specification for details.
1186        chunks = []
1187        while True:
1188            length = 0
[328]1189            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
[48]1190                length += 1
1191            if length:
[136]1192                chunks.append(self.prefix(length))
1193                self.forward(length)
1194            ch = self.peek()
[328]1195            if not double and ch == '\'' and self.peek(1) == '\'':
1196                chunks.append('\'')
[136]1197                self.forward(2)
[328]1198            elif (double and ch == '\'') or (not double and ch in '\"\\'):
[48]1199                chunks.append(ch)
[136]1200                self.forward()
[328]1201            elif double and ch == '\\':
[136]1202                self.forward()
1203                ch = self.peek()
[48]1204                if ch in self.ESCAPE_REPLACEMENTS:
1205                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
[136]1206                    self.forward()
[48]1207                elif ch in self.ESCAPE_CODES:
1208                    length = self.ESCAPE_CODES[ch]
[136]1209                    self.forward()
[48]1210                    for k in range(length):
[328]1211                        if self.peek(k) not in '0123456789ABCDEFabcdef':
[116]1212                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
[48]1213                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
[328]1214                                        (length, self.peek(k)), self.get_mark())
[136]1215                    code = int(self.prefix(length), 16)
[328]1216                    chunks.append(chr(code))
[136]1217                    self.forward(length)
[328]1218                elif ch in '\r\n\x85\u2028\u2029':
[48]1219                    self.scan_line_break()
[117]1220                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
[48]1221                else:
[116]1222                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
[328]1223                            "found unknown escape character %r" % ch, self.get_mark())
[37]1224            else:
[48]1225                return chunks
[37]1226
[117]1227    def scan_flow_scalar_spaces(self, double, start_mark):
[48]1228        # See the specification for details.
1229        chunks = []
1230        length = 0
[328]1231        while self.peek(length) in ' \t':
[48]1232            length += 1
[136]1233        whitespaces = self.prefix(length)
1234        self.forward(length)
1235        ch = self.peek()
[328]1236        if ch == '\0':
[116]1237            raise ScannerError("while scanning a quoted scalar", start_mark,
[136]1238                    "found unexpected end of stream", self.get_mark())
[328]1239        elif ch in '\r\n\x85\u2028\u2029':
[48]1240            line_break = self.scan_line_break()
[117]1241            breaks = self.scan_flow_scalar_breaks(double, start_mark)
[328]1242            if line_break != '\n':
[48]1243                chunks.append(line_break)
1244            elif not breaks:
[328]1245                chunks.append(' ')
[48]1246            chunks.extend(breaks)
1247        else:
1248            chunks.append(whitespaces)
1249        return chunks
1250
[117]1251    def scan_flow_scalar_breaks(self, double, start_mark):
[48]1252        # See the specification for details.
1253        chunks = []
1254        while True:
[117]1255            # Instead of checking indentation, we check for document
1256            # separators.
[136]1257            prefix = self.prefix(3)
[328]1258            if (prefix == '---' or prefix == '...')   \
1259                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
[116]1260                raise ScannerError("while scanning a quoted scalar", start_mark,
[136]1261                        "found unexpected document separator", self.get_mark())
[328]1262            while self.peek() in ' \t':
[136]1263                self.forward()
[328]1264            if self.peek() in '\r\n\x85\u2028\u2029':
[48]1265                chunks.append(self.scan_line_break())
1266            else:
1267                return chunks
1268
[43]1269    def scan_plain(self):
[48]1270        # See the specification for details.
1271        # We add an additional restriction for the flow context:
[117]1272        #   plain scalars in the flow context cannot contain ',', ':' and '?'.
[48]1273        # We also keep track of the `allow_simple_key` flag here.
[117]1274        # Indentation rules are loosed for the flow context.
[48]1275        chunks = []
[136]1276        start_mark = self.get_mark()
[116]1277        end_mark = start_mark
[43]1278        indent = self.indent+1
[117]1279        # We allow zero indentation for scalars, but then we need to check for
1280        # document separators at the beginning of the line.
1281        #if indent == 0:
1282        #    indent = 1
[48]1283        spaces = []
[43]1284        while True:
[48]1285            length = 0
[328]1286            if self.peek() == '#':
[43]1287                break
[48]1288            while True:
[136]1289                ch = self.peek(length)
[328]1290                if ch in '\0 \t\r\n\x85\u2028\u2029'    \
1291                        or (not self.flow_level and ch == ':' and
1292                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
1293                        or (self.flow_level and ch in ',:?[]{}'):
[48]1294                    break
1295                length += 1
[149]1296            # It's not clear what we should do with ':' in the flow context.
[328]1297            if (self.flow_level and ch == ':'
1298                    and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
[149]1299                self.forward(length)
1300                raise ScannerError("while scanning a plain scalar", start_mark,
1301                    "found unexpected ':'", self.get_mark(),
1302                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
[48]1303            if length == 0:
[43]1304                break
[48]1305            self.allow_simple_key = False
1306            chunks.extend(spaces)
[136]1307            chunks.append(self.prefix(length))
1308            self.forward(length)
1309            end_mark = self.get_mark()
[117]1310            spaces = self.scan_plain_spaces(indent, start_mark)
[328]1311            if not spaces or self.peek() == '#' \
[136]1312                    or (not self.flow_level and self.column < indent):
[48]1313                break
[328]1314        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
[37]1315
[117]1316    def scan_plain_spaces(self, indent, start_mark):
[48]1317        # See the specification for details.
1318        # The specification is really confusing about tabs in plain scalars.
1319        # We just forbid them completely. Do not use tabs in YAML!
1320        chunks = []
1321        length = 0
[328]1322        while self.peek(length) in ' ':
[48]1323            length += 1
[136]1324        whitespaces = self.prefix(length)
1325        self.forward(length)
1326        ch = self.peek()
[328]1327        if ch in '\r\n\x85\u2028\u2029':
[48]1328            line_break = self.scan_line_break()
1329            self.allow_simple_key = True
[136]1330            prefix = self.prefix(3)
[328]1331            if (prefix == '---' or prefix == '...')   \
1332                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
[117]1333                return
[48]1334            breaks = []
[328]1335            while self.peek() in ' \r\n\x85\u2028\u2029':
[136]1336                if self.peek() == ' ':
1337                    self.forward()
[48]1338                else:
1339                    breaks.append(self.scan_line_break())
[136]1340                    prefix = self.prefix(3)
[328]1341                    if (prefix == '---' or prefix == '...')   \
1342                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
[117]1343                        return
[328]1344            if line_break != '\n':
[48]1345                chunks.append(line_break)
1346            elif not breaks:
[328]1347                chunks.append(' ')
[48]1348            chunks.extend(breaks)
1349        elif whitespaces:
1350            chunks.append(whitespaces)
1351        return chunks
1352
[116]1353    def scan_tag_handle(self, name, start_mark):
[48]1354        # See the specification for details.
1355        # For some strange reasons, the specification does not allow '_' in
1356        # tag handles. I have allowed it anyway.
[136]1357        ch = self.peek()
[328]1358        if ch != '!':
[116]1359            raise ScannerError("while scanning a %s" % name, start_mark,
[328]1360                    "expected '!', but found %r" % ch, self.get_mark())
[48]1361        length = 1
[136]1362        ch = self.peek(length)
[328]1363        if ch != ' ':
1364            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
1365                    or ch in '-_':
[48]1366                length += 1
[136]1367                ch = self.peek(length)
[328]1368            if ch != '!':
[136]1369                self.forward(length)
[116]1370                raise ScannerError("while scanning a %s" % name, start_mark,
[328]1371                        "expected '!', but found %r" % ch, self.get_mark())
[48]1372            length += 1
[136]1373        value = self.prefix(length)
1374        self.forward(length)
[48]1375        return value
1376
[116]1377    def scan_tag_uri(self, name, start_mark):
[48]1378        # See the specification for details.
1379        # Note: we do not check if URI is well-formed.
1380        chunks = []
1381        length = 0
[136]1382        ch = self.peek(length)
[328]1383        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
1384                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
1385            if ch == '%':
[136]1386                chunks.append(self.prefix(length))
1387                self.forward(length)
[48]1388                length = 0
[116]1389                chunks.append(self.scan_uri_escapes(name, start_mark))
[48]1390            else:
1391                length += 1
[136]1392            ch = self.peek(length)
[48]1393        if length:
[136]1394            chunks.append(self.prefix(length))
1395            self.forward(length)
[48]1396            length = 0
1397        if not chunks:
[116]1398            raise ScannerError("while parsing a %s" % name, start_mark,
[328]1399                    "expected URI, but found %r" % ch, self.get_mark())
1400        return ''.join(chunks)
[48]1401
[116]1402    def scan_uri_escapes(self, name, start_mark):
[48]1403        # See the specification for details.
[328]1404        codes = []
[136]1405        mark = self.get_mark()
[328]1406        while self.peek() == '%':
[136]1407            self.forward()
[48]1408            for k in range(2):
[328]1409                if self.peek(k) not in '0123456789ABCDEFabcdef':
[116]1410                    raise ScannerError("while scanning a %s" % name, start_mark,
[328]1411                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
1412                            % self.peek(k), self.get_mark())
1413            codes.append(int(self.prefix(2), 16))
[136]1414            self.forward(2)
[48]1415        try:
[328]1416            value = bytes(codes).decode('utf-8')
1417        except UnicodeDecodeError as exc:
[116]1418            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
[48]1419        return value
1420
[47]1421    def scan_line_break(self):
1422        # Transforms:
1423        #   '\r\n'      :   '\n'
1424        #   '\r'        :   '\n'
1425        #   '\n'        :   '\n'
1426        #   '\x85'      :   '\n'
1427        #   '\u2028'    :   '\u2028'
1428        #   '\u2029     :   '\u2029'
1429        #   default     :   ''
[136]1430        ch = self.peek()
[328]1431        if ch in '\r\n\x85':
1432            if self.prefix(2) == '\r\n':
[136]1433                self.forward(2)
[47]1434            else:
[136]1435                self.forward()
[328]1436            return '\n'
1437        elif ch in '\u2028\u2029':
[136]1438            self.forward()
[47]1439            return ch
[328]1440        return ''
[47]1441
[45]1442#try:
1443#    import psyco
1444#    psyco.bind(Scanner)
1445#except ImportError:
1446#    pass
1447
Note: See TracBrowser for help on using the repository browser.