source: pyyaml/trunk/lib3/yaml/scanner.py @ 354

Revision 354, 50.7 KB checked in by xi, 5 years ago (diff)

Fixed a problem with a scanner error not detected when no line break at the end of the stream.

Line 
1
2# Scanner produces tokens of the following types:
3# STREAM-START
4# STREAM-END
5# DIRECTIVE(name, value)
6# DOCUMENT-START
7# DOCUMENT-END
8# BLOCK-SEQUENCE-START
9# BLOCK-MAPPING-START
10# BLOCK-END
11# FLOW-SEQUENCE-START
12# FLOW-MAPPING-START
13# FLOW-SEQUENCE-END
14# FLOW-MAPPING-END
15# BLOCK-ENTRY
16# FLOW-ENTRY
17# KEY
18# VALUE
19# ALIAS(value)
20# ANCHOR(value)
21# TAG(value)
22# SCALAR(value, plain, style)
23#
24# Read comments in the Scanner code for more details.
25#
26
27__all__ = ['Scanner', 'ScannerError']
28
29from .error import MarkedYAMLError
30from .tokens import *
31
32class ScannerError(MarkedYAMLError):
33    pass
34
35class SimpleKey:
36    # See below simple keys treatment.
37
38    def __init__(self, token_number, required, index, line, column, mark):
39        self.token_number = token_number
40        self.required = required
41        self.index = index
42        self.line = line
43        self.column = column
44        self.mark = mark
45
46class Scanner:
47
48    def __init__(self):
49        """Initialize the scanner."""
50        # It is assumed that Scanner and Reader will have a common descendant.
51        # Reader do the dirty work of checking for BOM and converting the
52        # input data to Unicode. It also adds NUL to the end.
53        #
54        # Reader supports the following methods
55        #   self.peek(i=0)       # peek the next i-th character
56        #   self.prefix(l=1)     # peek the next l characters
57        #   self.forward(l=1)    # read the next l characters and move the pointer.
58
59        # Had we reached the end of the stream?
60        self.done = False
61
62        # The number of unclosed '{' and '['. `flow_level == 0` means block
63        # context.
64        self.flow_level = 0
65
66        # List of processed tokens that are not yet emitted.
67        self.tokens = []
68
69        # Add the STREAM-START token.
70        self.fetch_stream_start()
71
72        # Number of tokens that were emitted through the `get_token` method.
73        self.tokens_taken = 0
74
75        # The current indentation level.
76        self.indent = -1
77
78        # Past indentation levels.
79        self.indents = []
80
81        # Variables related to simple keys treatment.
82
83        # A simple key is a key that is not denoted by the '?' indicator.
84        # Example of simple keys:
85        #   ---
86        #   block simple key: value
87        #   ? not a simple key:
88        #   : { flow simple key: value }
89        # We emit the KEY token before all keys, so when we find a potential
90        # simple key, we try to locate the corresponding ':' indicator.
91        # Simple keys should be limited to a single line and 1024 characters.
92
93        # Can a simple key start at the current position? A simple key may
94        # start:
95        # - at the beginning of the line, not counting indentation spaces
96        #       (in block context),
97        # - after '{', '[', ',' (in the flow context),
98        # - after '?', ':', '-' (in the block context).
99        # In the block context, this flag also signifies if a block collection
100        # may start at the current position.
101        self.allow_simple_key = True
102
103        # Keep track of possible simple keys. This is a dictionary. The key
104        # is `flow_level`; there can be no more that one possible simple key
105        # for each level. The value is a SimpleKey record:
106        #   (token_number, required, index, line, column, mark)
107        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
108        # '[', or '{' tokens.
109        self.possible_simple_keys = {}
110
111    # Public methods.
112
113    def check_token(self, *choices):
114        # Check if the next token is one of the given types.
115        while self.need_more_tokens():
116            self.fetch_more_tokens()
117        if self.tokens:
118            if not choices:
119                return True
120            for choice in choices:
121                if isinstance(self.tokens[0], choice):
122                    return True
123        return False
124
125    def peek_token(self):
126        # Return the next token, but do not delete if from the queue.
127        while self.need_more_tokens():
128            self.fetch_more_tokens()
129        if self.tokens:
130            return self.tokens[0]
131
132    def get_token(self):
133        # Return the next token.
134        while self.need_more_tokens():
135            self.fetch_more_tokens()
136        if self.tokens:
137            self.tokens_taken += 1
138            return self.tokens.pop(0)
139
140    # Private methods.
141
142    def need_more_tokens(self):
143        if self.done:
144            return False
145        if not self.tokens:
146            return True
147        # The current token may be a potential simple key, so we
148        # need to look further.
149        self.stale_possible_simple_keys()
150        if self.next_possible_simple_key() == self.tokens_taken:
151            return True
152
153    def fetch_more_tokens(self):
154
155        # Eat whitespaces and comments until we reach the next token.
156        self.scan_to_next_token()
157
158        # Remove obsolete possible simple keys.
159        self.stale_possible_simple_keys()
160
161        # Compare the current indentation and column. It may add some tokens
162        # and decrease the current indentation level.
163        self.unwind_indent(self.column)
164
165        # Peek the next character.
166        ch = self.peek()
167
168        # Is it the end of stream?
169        if ch == '\0':
170            return self.fetch_stream_end()
171
172        # Is it a directive?
173        if ch == '%' and self.check_directive():
174            return self.fetch_directive()
175
176        # Is it the document start?
177        if ch == '-' and self.check_document_start():
178            return self.fetch_document_start()
179
180        # Is it the document end?
181        if ch == '.' and self.check_document_end():
182            return self.fetch_document_end()
183
184        # TODO: support for BOM within a stream.
185        #if ch == '\uFEFF':
186        #    return self.fetch_bom()    <-- issue BOMToken
187
188        # Note: the order of the following checks is NOT significant.
189
190        # Is it the flow sequence start indicator?
191        if ch == '[':
192            return self.fetch_flow_sequence_start()
193
194        # Is it the flow mapping start indicator?
195        if ch == '{':
196            return self.fetch_flow_mapping_start()
197
198        # Is it the flow sequence end indicator?
199        if ch == ']':
200            return self.fetch_flow_sequence_end()
201
202        # Is it the flow mapping end indicator?
203        if ch == '}':
204            return self.fetch_flow_mapping_end()
205
206        # Is it the flow entry indicator?
207        if ch == ',':
208            return self.fetch_flow_entry()
209
210        # Is it the block entry indicator?
211        if ch == '-' and self.check_block_entry():
212            return self.fetch_block_entry()
213
214        # Is it the key indicator?
215        if ch == '?' and self.check_key():
216            return self.fetch_key()
217
218        # Is it the value indicator?
219        if ch == ':' and self.check_value():
220            return self.fetch_value()
221
222        # Is it an alias?
223        if ch == '*':
224            return self.fetch_alias()
225
226        # Is it an anchor?
227        if ch == '&':
228            return self.fetch_anchor()
229
230        # Is it a tag?
231        if ch == '!':
232            return self.fetch_tag()
233
234        # Is it a literal scalar?
235        if ch == '|' and not self.flow_level:
236            return self.fetch_literal()
237
238        # Is it a folded scalar?
239        if ch == '>' and not self.flow_level:
240            return self.fetch_folded()
241
242        # Is it a single quoted scalar?
243        if ch == '\'':
244            return self.fetch_single()
245
246        # Is it a double quoted scalar?
247        if ch == '\"':
248            return self.fetch_double()
249
250        # It must be a plain scalar then.
251        if self.check_plain():
252            return self.fetch_plain()
253
254        # No? It's an error. Let's produce a nice error message.
255        raise ScannerError("while scanning for the next token", None,
256                "found character %r that cannot start any token" % ch,
257                self.get_mark())
258
259    # Simple keys treatment.
260
261    def next_possible_simple_key(self):
262        # Return the number of the nearest possible simple key. Actually we
263        # don't need to loop through the whole dictionary. We may replace it
264        # with the following code:
265        #   if not self.possible_simple_keys:
266        #       return None
267        #   return self.possible_simple_keys[
268        #           min(self.possible_simple_keys.keys())].token_number
269        min_token_number = None
270        for level in self.possible_simple_keys:
271            key = self.possible_simple_keys[level]
272            if min_token_number is None or key.token_number < min_token_number:
273                min_token_number = key.token_number
274        return min_token_number
275
276    def stale_possible_simple_keys(self):
277        # Remove entries that are no longer possible simple keys. According to
278        # the YAML specification, simple keys
279        # - should be limited to a single line,
280        # - should be no longer than 1024 characters.
281        # Disabling this procedure will allow simple keys of any length and
282        # height (may cause problems if indentation is broken though).
283        for level in list(self.possible_simple_keys):
284            key = self.possible_simple_keys[level]
285            if key.line != self.line  \
286                    or self.index-key.index > 1024:
287                if key.required:
288                    raise ScannerError("while scanning a simple key", key.mark,
289                            "could not found expected ':'", self.get_mark())
290                del self.possible_simple_keys[level]
291
292    def save_possible_simple_key(self):
293        # The next token may start a simple key. We check if it's possible
294        # and save its position. This function is called for
295        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
296
297        # Check if a simple key is required at the current position.
298        required = not self.flow_level and self.indent == self.column
299
300        # A simple key is required only if it is the first token in the current
301        # line. Therefore it is always allowed.
302        assert self.allow_simple_key or not required
303
304        # The next token might be a simple key. Let's save it's number and
305        # position.
306        if self.allow_simple_key:
307            self.remove_possible_simple_key()
308            token_number = self.tokens_taken+len(self.tokens)
309            key = SimpleKey(token_number, required,
310                    self.index, self.line, self.column, self.get_mark())
311            self.possible_simple_keys[self.flow_level] = key
312
313    def remove_possible_simple_key(self):
314        # Remove the saved possible key position at the current flow level.
315        if self.flow_level in self.possible_simple_keys:
316            key = self.possible_simple_keys[self.flow_level]
317           
318            if key.required:
319                raise ScannerError("while scanning a simple key", key.mark,
320                        "could not found expected ':'", self.get_mark())
321
322            del self.possible_simple_keys[self.flow_level]
323
324    # Indentation functions.
325
326    def unwind_indent(self, column):
327
328        ## In flow context, tokens should respect indentation.
329        ## Actually the condition should be `self.indent >= column` according to
330        ## the spec. But this condition will prohibit intuitively correct
331        ## constructions such as
332        ## key : {
333        ## }
334        #if self.flow_level and self.indent > column:
335        #    raise ScannerError(None, None,
336        #            "invalid intendation or unclosed '[' or '{'",
337        #            self.get_mark())
338
339        # In the flow context, indentation is ignored. We make the scanner less
340        # restrictive then specification requires.
341        if self.flow_level:
342            return
343
344        # In block context, we may need to issue the BLOCK-END tokens.
345        while self.indent > column:
346            mark = self.get_mark()
347            self.indent = self.indents.pop()
348            self.tokens.append(BlockEndToken(mark, mark))
349
350    def add_indent(self, column):
351        # Check if we need to increase indentation.
352        if self.indent < column:
353            self.indents.append(self.indent)
354            self.indent = column
355            return True
356        return False
357
358    # Fetchers.
359
360    def fetch_stream_start(self):
361        # We always add STREAM-START as the first token and STREAM-END as the
362        # last token.
363
364        # Read the token.
365        mark = self.get_mark()
366       
367        # Add STREAM-START.
368        self.tokens.append(StreamStartToken(mark, mark,
369            encoding=self.encoding))
370       
371
372    def fetch_stream_end(self):
373
374        # Set the current intendation to -1.
375        self.unwind_indent(-1)
376
377        # Reset simple keys.
378        self.remove_possible_simple_key()
379        self.allow_simple_key = False
380        self.possible_simple_keys = {}
381
382        # Read the token.
383        mark = self.get_mark()
384       
385        # Add STREAM-END.
386        self.tokens.append(StreamEndToken(mark, mark))
387
388        # The steam is finished.
389        self.done = True
390
391    def fetch_directive(self):
392       
393        # Set the current intendation to -1.
394        self.unwind_indent(-1)
395
396        # Reset simple keys.
397        self.remove_possible_simple_key()
398        self.allow_simple_key = False
399
400        # Scan and add DIRECTIVE.
401        self.tokens.append(self.scan_directive())
402
403    def fetch_document_start(self):
404        self.fetch_document_indicator(DocumentStartToken)
405
406    def fetch_document_end(self):
407        self.fetch_document_indicator(DocumentEndToken)
408
409    def fetch_document_indicator(self, TokenClass):
410
411        # Set the current intendation to -1.
412        self.unwind_indent(-1)
413
414        # Reset simple keys. Note that there could not be a block collection
415        # after '---'.
416        self.remove_possible_simple_key()
417        self.allow_simple_key = False
418
419        # Add DOCUMENT-START or DOCUMENT-END.
420        start_mark = self.get_mark()
421        self.forward(3)
422        end_mark = self.get_mark()
423        self.tokens.append(TokenClass(start_mark, end_mark))
424
425    def fetch_flow_sequence_start(self):
426        self.fetch_flow_collection_start(FlowSequenceStartToken)
427
428    def fetch_flow_mapping_start(self):
429        self.fetch_flow_collection_start(FlowMappingStartToken)
430
431    def fetch_flow_collection_start(self, TokenClass):
432
433        # '[' and '{' may start a simple key.
434        self.save_possible_simple_key()
435
436        # Increase the flow level.
437        self.flow_level += 1
438
439        # Simple keys are allowed after '[' and '{'.
440        self.allow_simple_key = True
441
442        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
443        start_mark = self.get_mark()
444        self.forward()
445        end_mark = self.get_mark()
446        self.tokens.append(TokenClass(start_mark, end_mark))
447
448    def fetch_flow_sequence_end(self):
449        self.fetch_flow_collection_end(FlowSequenceEndToken)
450
451    def fetch_flow_mapping_end(self):
452        self.fetch_flow_collection_end(FlowMappingEndToken)
453
454    def fetch_flow_collection_end(self, TokenClass):
455
456        # Reset possible simple key on the current level.
457        self.remove_possible_simple_key()
458
459        # Decrease the flow level.
460        self.flow_level -= 1
461
462        # No simple keys after ']' or '}'.
463        self.allow_simple_key = False
464
465        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
466        start_mark = self.get_mark()
467        self.forward()
468        end_mark = self.get_mark()
469        self.tokens.append(TokenClass(start_mark, end_mark))
470
471    def fetch_flow_entry(self):
472
473        # Simple keys are allowed after ','.
474        self.allow_simple_key = True
475
476        # Reset possible simple key on the current level.
477        self.remove_possible_simple_key()
478
479        # Add FLOW-ENTRY.
480        start_mark = self.get_mark()
481        self.forward()
482        end_mark = self.get_mark()
483        self.tokens.append(FlowEntryToken(start_mark, end_mark))
484
485    def fetch_block_entry(self):
486
487        # Block context needs additional checks.
488        if not self.flow_level:
489
490            # Are we allowed to start a new entry?
491            if not self.allow_simple_key:
492                raise ScannerError(None, None,
493                        "sequence entries are not allowed here",
494                        self.get_mark())
495
496            # We may need to add BLOCK-SEQUENCE-START.
497            if self.add_indent(self.column):
498                mark = self.get_mark()
499                self.tokens.append(BlockSequenceStartToken(mark, mark))
500
501        # It's an error for the block entry to occur in the flow context,
502        # but we let the parser detect this.
503        else:
504            pass
505
506        # Simple keys are allowed after '-'.
507        self.allow_simple_key = True
508
509        # Reset possible simple key on the current level.
510        self.remove_possible_simple_key()
511
512        # Add BLOCK-ENTRY.
513        start_mark = self.get_mark()
514        self.forward()
515        end_mark = self.get_mark()
516        self.tokens.append(BlockEntryToken(start_mark, end_mark))
517
518    def fetch_key(self):
519       
520        # Block context needs additional checks.
521        if not self.flow_level:
522
523            # Are we allowed to start a key (not nessesary a simple)?
524            if not self.allow_simple_key:
525                raise ScannerError(None, None,
526                        "mapping keys are not allowed here",
527                        self.get_mark())
528
529            # We may need to add BLOCK-MAPPING-START.
530            if self.add_indent(self.column):
531                mark = self.get_mark()
532                self.tokens.append(BlockMappingStartToken(mark, mark))
533
534        # Simple keys are allowed after '?' in the block context.
535        self.allow_simple_key = not self.flow_level
536
537        # Reset possible simple key on the current level.
538        self.remove_possible_simple_key()
539
540        # Add KEY.
541        start_mark = self.get_mark()
542        self.forward()
543        end_mark = self.get_mark()
544        self.tokens.append(KeyToken(start_mark, end_mark))
545
546    def fetch_value(self):
547
548        # Do we determine a simple key?
549        if self.flow_level in self.possible_simple_keys:
550
551            # Add KEY.
552            key = self.possible_simple_keys[self.flow_level]
553            del self.possible_simple_keys[self.flow_level]
554            self.tokens.insert(key.token_number-self.tokens_taken,
555                    KeyToken(key.mark, key.mark))
556
557            # If this key starts a new block mapping, we need to add
558            # BLOCK-MAPPING-START.
559            if not self.flow_level:
560                if self.add_indent(key.column):
561                    self.tokens.insert(key.token_number-self.tokens_taken,
562                            BlockMappingStartToken(key.mark, key.mark))
563
564            # There cannot be two simple keys one after another.
565            self.allow_simple_key = False
566
567        # It must be a part of a complex key.
568        else:
569           
570            # Block context needs additional checks.
571            # (Do we really need them? They will be catched by the parser
572            # anyway.)
573            if not self.flow_level:
574
575                # We are allowed to start a complex value if and only if
576                # we can start a simple key.
577                if not self.allow_simple_key:
578                    raise ScannerError(None, None,
579                            "mapping values are not allowed here",
580                            self.get_mark())
581
582            # If this value starts a new block mapping, we need to add
583            # BLOCK-MAPPING-START.  It will be detected as an error later by
584            # the parser.
585            if not self.flow_level:
586                if self.add_indent(self.column):
587                    mark = self.get_mark()
588                    self.tokens.append(BlockMappingStartToken(mark, mark))
589
590            # Simple keys are allowed after ':' in the block context.
591            self.allow_simple_key = not self.flow_level
592
593            # Reset possible simple key on the current level.
594            self.remove_possible_simple_key()
595
596        # Add VALUE.
597        start_mark = self.get_mark()
598        self.forward()
599        end_mark = self.get_mark()
600        self.tokens.append(ValueToken(start_mark, end_mark))
601
602    def fetch_alias(self):
603
604        # ALIAS could be a simple key.
605        self.save_possible_simple_key()
606
607        # No simple keys after ALIAS.
608        self.allow_simple_key = False
609
610        # Scan and add ALIAS.
611        self.tokens.append(self.scan_anchor(AliasToken))
612
613    def fetch_anchor(self):
614
615        # ANCHOR could start a simple key.
616        self.save_possible_simple_key()
617
618        # No simple keys after ANCHOR.
619        self.allow_simple_key = False
620
621        # Scan and add ANCHOR.
622        self.tokens.append(self.scan_anchor(AnchorToken))
623
624    def fetch_tag(self):
625
626        # TAG could start a simple key.
627        self.save_possible_simple_key()
628
629        # No simple keys after TAG.
630        self.allow_simple_key = False
631
632        # Scan and add TAG.
633        self.tokens.append(self.scan_tag())
634
635    def fetch_literal(self):
636        self.fetch_block_scalar(style='|')
637
638    def fetch_folded(self):
639        self.fetch_block_scalar(style='>')
640
641    def fetch_block_scalar(self, style):
642
643        # A simple key may follow a block scalar.
644        self.allow_simple_key = True
645
646        # Reset possible simple key on the current level.
647        self.remove_possible_simple_key()
648
649        # Scan and add SCALAR.
650        self.tokens.append(self.scan_block_scalar(style))
651
652    def fetch_single(self):
653        self.fetch_flow_scalar(style='\'')
654
655    def fetch_double(self):
656        self.fetch_flow_scalar(style='"')
657
658    def fetch_flow_scalar(self, style):
659
660        # A flow scalar could be a simple key.
661        self.save_possible_simple_key()
662
663        # No simple keys after flow scalars.
664        self.allow_simple_key = False
665
666        # Scan and add SCALAR.
667        self.tokens.append(self.scan_flow_scalar(style))
668
669    def fetch_plain(self):
670
671        # A plain scalar could be a simple key.
672        self.save_possible_simple_key()
673
674        # No simple keys after plain scalars. But note that `scan_plain` will
675        # change this flag if the scan is finished at the beginning of the
676        # line.
677        self.allow_simple_key = False
678
679        # Scan and add SCALAR. May change `allow_simple_key`.
680        self.tokens.append(self.scan_plain())
681
682    # Checkers.
683
684    def check_directive(self):
685
686        # DIRECTIVE:        ^ '%' ...
687        # The '%' indicator is already checked.
688        if self.column == 0:
689            return True
690
691    def check_document_start(self):
692
693        # DOCUMENT-START:   ^ '---' (' '|'\n')
694        if self.column == 0:
695            if self.prefix(3) == '---'  \
696                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
697                return True
698
699    def check_document_end(self):
700
701        # DOCUMENT-END:     ^ '...' (' '|'\n')
702        if self.column == 0:
703            if self.prefix(3) == '...'  \
704                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
705                return True
706
707    def check_block_entry(self):
708
709        # BLOCK-ENTRY:      '-' (' '|'\n')
710        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
711
712    def check_key(self):
713
714        # KEY(flow context):    '?'
715        if self.flow_level:
716            return True
717
718        # KEY(block context):   '?' (' '|'\n')
719        else:
720            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
721
722    def check_value(self):
723
724        # VALUE(flow context):  ':'
725        if self.flow_level:
726            return True
727
728        # VALUE(block context): ':' (' '|'\n')
729        else:
730            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
731
732    def check_plain(self):
733
734        # A plain scalar may start with any non-space character except:
735        #   '-', '?', ':', ',', '[', ']', '{', '}',
736        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
737        #   '%', '@', '`'.
738        #
739        # It may also start with
740        #   '-', '?', ':'
741        # if it is followed by a non-space character.
742        #
743        # Note that we limit the last rule to the block context (except the
744        # '-' character) because we want the flow context to be space
745        # independent.
746        ch = self.peek()
747        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
748                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
749                        and (ch == '-' or (not self.flow_level and ch in '?:')))
750
751    # Scanners.
752
753    def scan_to_next_token(self):
754        # We ignore spaces, line breaks and comments.
755        # If we find a line break in the block context, we set the flag
756        # `allow_simple_key` on.
757        # The byte order mark is stripped if it's the first character in the
758        # stream. We do not yet support BOM inside the stream as the
759        # specification requires. Any such mark will be considered as a part
760        # of the document.
761        #
762        # TODO: We need to make tab handling rules more sane. A good rule is
763        #   Tabs cannot precede tokens
764        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
765        #   KEY(block), VALUE(block), BLOCK-ENTRY
766        # So the checking code is
767        #   if <TAB>:
768        #       self.allow_simple_keys = False
769        # We also need to add the check for `allow_simple_keys == True` to
770        # `unwind_indent` before issuing BLOCK-END.
771        # Scanners for block, flow, and plain scalars need to be modified.
772
773        if self.index == 0 and self.peek() == '\uFEFF':
774            self.forward()
775        found = False
776        while not found:
777            while self.peek() == ' ':
778                self.forward()
779            if self.peek() == '#':
780                while self.peek() not in '\0\r\n\x85\u2028\u2029':
781                    self.forward()
782            if self.scan_line_break():
783                if not self.flow_level:
784                    self.allow_simple_key = True
785            else:
786                found = True
787
788    def scan_directive(self):
789        # See the specification for details.
790        start_mark = self.get_mark()
791        self.forward()
792        name = self.scan_directive_name(start_mark)
793        value = None
794        if name == 'YAML':
795            value = self.scan_yaml_directive_value(start_mark)
796            end_mark = self.get_mark()
797        elif name == 'TAG':
798            value = self.scan_tag_directive_value(start_mark)
799            end_mark = self.get_mark()
800        else:
801            end_mark = self.get_mark()
802            while self.peek() not in '\0\r\n\x85\u2028\u2029':
803                self.forward()
804        self.scan_directive_ignored_line(start_mark)
805        return DirectiveToken(name, value, start_mark, end_mark)
806
807    def scan_directive_name(self, start_mark):
808        # See the specification for details.
809        length = 0
810        ch = self.peek(length)
811        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
812                or ch in '-_':
813            length += 1
814            ch = self.peek(length)
815        if not length:
816            raise ScannerError("while scanning a directive", start_mark,
817                    "expected alphabetic or numeric character, but found %r"
818                    % ch, self.get_mark())
819        value = self.prefix(length)
820        self.forward(length)
821        ch = self.peek()
822        if ch not in '\0 \r\n\x85\u2028\u2029':
823            raise ScannerError("while scanning a directive", start_mark,
824                    "expected alphabetic or numeric character, but found %r"
825                    % ch, self.get_mark())
826        return value
827
828    def scan_yaml_directive_value(self, start_mark):
829        # See the specification for details.
830        while self.peek() == ' ':
831            self.forward()
832        major = self.scan_yaml_directive_number(start_mark)
833        if self.peek() != '.':
834            raise ScannerError("while scanning a directive", start_mark,
835                    "expected a digit or '.', but found %r" % self.peek(),
836                    self.get_mark())
837        self.forward()
838        minor = self.scan_yaml_directive_number(start_mark)
839        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
840            raise ScannerError("while scanning a directive", start_mark,
841                    "expected a digit or ' ', but found %r" % self.peek(),
842                    self.get_mark())
843        return (major, minor)
844
845    def scan_yaml_directive_number(self, start_mark):
846        # See the specification for details.
847        ch = self.peek()
848        if not ('0' <= ch <= '9'):
849            raise ScannerError("while scanning a directive", start_mark,
850                    "expected a digit, but found %r" % ch, self.get_mark())
851        length = 0
852        while '0' <= self.peek(length) <= '9':
853            length += 1
854        value = int(self.prefix(length))
855        self.forward(length)
856        return value
857
858    def scan_tag_directive_value(self, start_mark):
859        # See the specification for details.
860        while self.peek() == ' ':
861            self.forward()
862        handle = self.scan_tag_directive_handle(start_mark)
863        while self.peek() == ' ':
864            self.forward()
865        prefix = self.scan_tag_directive_prefix(start_mark)
866        return (handle, prefix)
867
868    def scan_tag_directive_handle(self, start_mark):
869        # See the specification for details.
870        value = self.scan_tag_handle('directive', start_mark)
871        ch = self.peek()
872        if ch != ' ':
873            raise ScannerError("while scanning a directive", start_mark,
874                    "expected ' ', but found %r" % ch, self.get_mark())
875        return value
876
877    def scan_tag_directive_prefix(self, start_mark):
878        # See the specification for details.
879        value = self.scan_tag_uri('directive', start_mark)
880        ch = self.peek()
881        if ch not in '\0 \r\n\x85\u2028\u2029':
882            raise ScannerError("while scanning a directive", start_mark,
883                    "expected ' ', but found %r" % ch, self.get_mark())
884        return value
885
886    def scan_directive_ignored_line(self, start_mark):
887        # See the specification for details.
888        while self.peek() == ' ':
889            self.forward()
890        if self.peek() == '#':
891            while self.peek() not in '\0\r\n\x85\u2028\u2029':
892                self.forward()
893        ch = self.peek()
894        if ch not in '\0\r\n\x85\u2028\u2029':
895            raise ScannerError("while scanning a directive", start_mark,
896                    "expected a comment or a line break, but found %r"
897                        % ch, self.get_mark())
898        self.scan_line_break()
899
900    def scan_anchor(self, TokenClass):
901        # The specification does not restrict characters for anchors and
902        # aliases. This may lead to problems, for instance, the document:
903        #   [ *alias, value ]
904        # can be interpteted in two ways, as
905        #   [ "value" ]
906        # and
907        #   [ *alias , "value" ]
908        # Therefore we restrict aliases to numbers and ASCII letters.
909        start_mark = self.get_mark()
910        indicator = self.peek()
911        if indicator == '*':
912            name = 'alias'
913        else:
914            name = 'anchor'
915        self.forward()
916        length = 0
917        ch = self.peek(length)
918        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
919                or ch in '-_':
920            length += 1
921            ch = self.peek(length)
922        if not length:
923            raise ScannerError("while scanning an %s" % name, start_mark,
924                    "expected alphabetic or numeric character, but found %r"
925                    % ch, self.get_mark())
926        value = self.prefix(length)
927        self.forward(length)
928        ch = self.peek()
929        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
930            raise ScannerError("while scanning an %s" % name, start_mark,
931                    "expected alphabetic or numeric character, but found %r"
932                    % ch, self.get_mark())
933        end_mark = self.get_mark()
934        return TokenClass(value, start_mark, end_mark)
935
936    def scan_tag(self):
937        # See the specification for details.
938        start_mark = self.get_mark()
939        ch = self.peek(1)
940        if ch == '<':
941            handle = None
942            self.forward(2)
943            suffix = self.scan_tag_uri('tag', start_mark)
944            if self.peek() != '>':
945                raise ScannerError("while parsing a tag", start_mark,
946                        "expected '>', but found %r" % self.peek(),
947                        self.get_mark())
948            self.forward()
949        elif ch in '\0 \t\r\n\x85\u2028\u2029':
950            handle = None
951            suffix = '!'
952            self.forward()
953        else:
954            length = 1
955            use_handle = False
956            while ch not in '\0 \r\n\x85\u2028\u2029':
957                if ch == '!':
958                    use_handle = True
959                    break
960                length += 1
961                ch = self.peek(length)
962            handle = '!'
963            if use_handle:
964                handle = self.scan_tag_handle('tag', start_mark)
965            else:
966                handle = '!'
967                self.forward()
968            suffix = self.scan_tag_uri('tag', start_mark)
969        ch = self.peek()
970        if ch not in '\0 \r\n\x85\u2028\u2029':
971            raise ScannerError("while scanning a tag", start_mark,
972                    "expected ' ', but found %r" % ch, self.get_mark())
973        value = (handle, suffix)
974        end_mark = self.get_mark()
975        return TagToken(value, start_mark, end_mark)
976
977    def scan_block_scalar(self, style):
978        # See the specification for details.
979
980        if style == '>':
981            folded = True
982        else:
983            folded = False
984
985        chunks = []
986        start_mark = self.get_mark()
987
988        # Scan the header.
989        self.forward()
990        chomping, increment = self.scan_block_scalar_indicators(start_mark)
991        self.scan_block_scalar_ignored_line(start_mark)
992
993        # Determine the indentation level and go to the first non-empty line.
994        min_indent = self.indent+1
995        if min_indent < 1:
996            min_indent = 1
997        if increment is None:
998            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
999            indent = max(min_indent, max_indent)
1000        else:
1001            indent = min_indent+increment-1
1002            breaks, end_mark = self.scan_block_scalar_breaks(indent)
1003        line_break = ''
1004
1005        # Scan the inner part of the block scalar.
1006        while self.column == indent and self.peek() != '\0':
1007            chunks.extend(breaks)
1008            leading_non_space = self.peek() not in ' \t'
1009            length = 0
1010            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
1011                length += 1
1012            chunks.append(self.prefix(length))
1013            self.forward(length)
1014            line_break = self.scan_line_break()
1015            breaks, end_mark = self.scan_block_scalar_breaks(indent)
1016            if self.column == indent and self.peek() != '\0':
1017
1018                # Unfortunately, folding rules are ambiguous.
1019                #
1020                # This is the folding according to the specification:
1021               
1022                if folded and line_break == '\n'    \
1023                        and leading_non_space and self.peek() not in ' \t':
1024                    if not breaks:
1025                        chunks.append(' ')
1026                else:
1027                    chunks.append(line_break)
1028               
1029                # This is Clark Evans's interpretation (also in the spec
1030                # examples):
1031                #
1032                #if folded and line_break == '\n':
1033                #    if not breaks:
1034                #        if self.peek() not in ' \t':
1035                #            chunks.append(' ')
1036                #        else:
1037                #            chunks.append(line_break)
1038                #else:
1039                #    chunks.append(line_break)
1040            else:
1041                break
1042
1043        # Chomp the tail.
1044        if chomping is not False:
1045            chunks.append(line_break)
1046        if chomping is True:
1047            chunks.extend(breaks)
1048
1049        # We are done.
1050        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
1051                style)
1052
1053    def scan_block_scalar_indicators(self, start_mark):
1054        # See the specification for details.
1055        chomping = None
1056        increment = None
1057        ch = self.peek()
1058        if ch in '+-':
1059            if ch == '+':
1060                chomping = True
1061            else:
1062                chomping = False
1063            self.forward()
1064            ch = self.peek()
1065            if ch in '0123456789':
1066                increment = int(ch)
1067                if increment == 0:
1068                    raise ScannerError("while scanning a block scalar", start_mark,
1069                            "expected indentation indicator in the range 1-9, but found 0",
1070                            self.get_mark())
1071                self.forward()
1072        elif ch in '0123456789':
1073            increment = int(ch)
1074            if increment == 0:
1075                raise ScannerError("while scanning a block scalar", start_mark,
1076                        "expected indentation indicator in the range 1-9, but found 0",
1077                        self.get_mark())
1078            self.forward()
1079            ch = self.peek()
1080            if ch in '+-':
1081                if ch == '+':
1082                    chomping = True
1083                else:
1084                    chomping = False
1085                self.forward()
1086        ch = self.peek()
1087        if ch not in '\0 \r\n\x85\u2028\u2029':
1088            raise ScannerError("while scanning a block scalar", start_mark,
1089                    "expected chomping or indentation indicators, but found %r"
1090                    % ch, self.get_mark())
1091        return chomping, increment
1092
1093    def scan_block_scalar_ignored_line(self, start_mark):
1094        # See the specification for details.
1095        while self.peek() == ' ':
1096            self.forward()
1097        if self.peek() == '#':
1098            while self.peek() not in '\0\r\n\x85\u2028\u2029':
1099                self.forward()
1100        ch = self.peek()
1101        if ch not in '\0\r\n\x85\u2028\u2029':
1102            raise ScannerError("while scanning a block scalar", start_mark,
1103                    "expected a comment or a line break, but found %r" % ch,
1104                    self.get_mark())
1105        self.scan_line_break()
1106
1107    def scan_block_scalar_indentation(self):
1108        # See the specification for details.
1109        chunks = []
1110        max_indent = 0
1111        end_mark = self.get_mark()
1112        while self.peek() in ' \r\n\x85\u2028\u2029':
1113            if self.peek() != ' ':
1114                chunks.append(self.scan_line_break())
1115                end_mark = self.get_mark()
1116            else:
1117                self.forward()
1118                if self.column > max_indent:
1119                    max_indent = self.column
1120        return chunks, max_indent, end_mark
1121
1122    def scan_block_scalar_breaks(self, indent):
1123        # See the specification for details.
1124        chunks = []
1125        end_mark = self.get_mark()
1126        while self.column < indent and self.peek() == ' ':
1127            self.forward()
1128        while self.peek() in '\r\n\x85\u2028\u2029':
1129            chunks.append(self.scan_line_break())
1130            end_mark = self.get_mark()
1131            while self.column < indent and self.peek() == ' ':
1132                self.forward()
1133        return chunks, end_mark
1134
1135    def scan_flow_scalar(self, style):
1136        # See the specification for details.
1137        # Note that we loose indentation rules for quoted scalars. Quoted
1138        # scalars don't need to adhere indentation because " and ' clearly
1139        # mark the beginning and the end of them. Therefore we are less
1140        # restrictive then the specification requires. We only need to check
1141        # that document separators are not included in scalars.
1142        if style == '"':
1143            double = True
1144        else:
1145            double = False
1146        chunks = []
1147        start_mark = self.get_mark()
1148        quote = self.peek()
1149        self.forward()
1150        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
1151        while self.peek() != quote:
1152            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
1153            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
1154        self.forward()
1155        end_mark = self.get_mark()
1156        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
1157                style)
1158
1159    ESCAPE_REPLACEMENTS = {
1160        '0':    '\0',
1161        'a':    '\x07',
1162        'b':    '\x08',
1163        't':    '\x09',
1164        '\t':   '\x09',
1165        'n':    '\x0A',
1166        'v':    '\x0B',
1167        'f':    '\x0C',
1168        'r':    '\x0D',
1169        'e':    '\x1B',
1170        ' ':    '\x20',
1171        '\"':   '\"',
1172        '\\':   '\\',
1173        'N':    '\x85',
1174        '_':    '\xA0',
1175        'L':    '\u2028',
1176        'P':    '\u2029',
1177    }
1178
1179    ESCAPE_CODES = {
1180        'x':    2,
1181        'u':    4,
1182        'U':    8,
1183    }
1184
1185    def scan_flow_scalar_non_spaces(self, double, start_mark):
1186        # See the specification for details.
1187        chunks = []
1188        while True:
1189            length = 0
1190            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
1191                length += 1
1192            if length:
1193                chunks.append(self.prefix(length))
1194                self.forward(length)
1195            ch = self.peek()
1196            if not double and ch == '\'' and self.peek(1) == '\'':
1197                chunks.append('\'')
1198                self.forward(2)
1199            elif (double and ch == '\'') or (not double and ch in '\"\\'):
1200                chunks.append(ch)
1201                self.forward()
1202            elif double and ch == '\\':
1203                self.forward()
1204                ch = self.peek()
1205                if ch in self.ESCAPE_REPLACEMENTS:
1206                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
1207                    self.forward()
1208                elif ch in self.ESCAPE_CODES:
1209                    length = self.ESCAPE_CODES[ch]
1210                    self.forward()
1211                    for k in range(length):
1212                        if self.peek(k) not in '0123456789ABCDEFabcdef':
1213                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
1214                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
1215                                        (length, self.peek(k)), self.get_mark())
1216                    code = int(self.prefix(length), 16)
1217                    chunks.append(chr(code))
1218                    self.forward(length)
1219                elif ch in '\r\n\x85\u2028\u2029':
1220                    self.scan_line_break()
1221                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
1222                else:
1223                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
1224                            "found unknown escape character %r" % ch, self.get_mark())
1225            else:
1226                return chunks
1227
1228    def scan_flow_scalar_spaces(self, double, start_mark):
1229        # See the specification for details.
1230        chunks = []
1231        length = 0
1232        while self.peek(length) in ' \t':
1233            length += 1
1234        whitespaces = self.prefix(length)
1235        self.forward(length)
1236        ch = self.peek()
1237        if ch == '\0':
1238            raise ScannerError("while scanning a quoted scalar", start_mark,
1239                    "found unexpected end of stream", self.get_mark())
1240        elif ch in '\r\n\x85\u2028\u2029':
1241            line_break = self.scan_line_break()
1242            breaks = self.scan_flow_scalar_breaks(double, start_mark)
1243            if line_break != '\n':
1244                chunks.append(line_break)
1245            elif not breaks:
1246                chunks.append(' ')
1247            chunks.extend(breaks)
1248        else:
1249            chunks.append(whitespaces)
1250        return chunks
1251
1252    def scan_flow_scalar_breaks(self, double, start_mark):
1253        # See the specification for details.
1254        chunks = []
1255        while True:
1256            # Instead of checking indentation, we check for document
1257            # separators.
1258            prefix = self.prefix(3)
1259            if (prefix == '---' or prefix == '...')   \
1260                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
1261                raise ScannerError("while scanning a quoted scalar", start_mark,
1262                        "found unexpected document separator", self.get_mark())
1263            while self.peek() in ' \t':
1264                self.forward()
1265            if self.peek() in '\r\n\x85\u2028\u2029':
1266                chunks.append(self.scan_line_break())
1267            else:
1268                return chunks
1269
1270    def scan_plain(self):
1271        # See the specification for details.
1272        # We add an additional restriction for the flow context:
1273        #   plain scalars in the flow context cannot contain ',', ':' and '?'.
1274        # We also keep track of the `allow_simple_key` flag here.
1275        # Indentation rules are loosed for the flow context.
1276        chunks = []
1277        start_mark = self.get_mark()
1278        end_mark = start_mark
1279        indent = self.indent+1
1280        # We allow zero indentation for scalars, but then we need to check for
1281        # document separators at the beginning of the line.
1282        #if indent == 0:
1283        #    indent = 1
1284        spaces = []
1285        while True:
1286            length = 0
1287            if self.peek() == '#':
1288                break
1289            while True:
1290                ch = self.peek(length)
1291                if ch in '\0 \t\r\n\x85\u2028\u2029'    \
1292                        or (not self.flow_level and ch == ':' and
1293                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
1294                        or (self.flow_level and ch in ',:?[]{}'):
1295                    break
1296                length += 1
1297            # It's not clear what we should do with ':' in the flow context.
1298            if (self.flow_level and ch == ':'
1299                    and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
1300                self.forward(length)
1301                raise ScannerError("while scanning a plain scalar", start_mark,
1302                    "found unexpected ':'", self.get_mark(),
1303                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
1304            if length == 0:
1305                break
1306            self.allow_simple_key = False
1307            chunks.extend(spaces)
1308            chunks.append(self.prefix(length))
1309            self.forward(length)
1310            end_mark = self.get_mark()
1311            spaces = self.scan_plain_spaces(indent, start_mark)
1312            if not spaces or self.peek() == '#' \
1313                    or (not self.flow_level and self.column < indent):
1314                break
1315        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
1316
1317    def scan_plain_spaces(self, indent, start_mark):
1318        # See the specification for details.
1319        # The specification is really confusing about tabs in plain scalars.
1320        # We just forbid them completely. Do not use tabs in YAML!
1321        chunks = []
1322        length = 0
1323        while self.peek(length) in ' ':
1324            length += 1
1325        whitespaces = self.prefix(length)
1326        self.forward(length)
1327        ch = self.peek()
1328        if ch in '\r\n\x85\u2028\u2029':
1329            line_break = self.scan_line_break()
1330            self.allow_simple_key = True
1331            prefix = self.prefix(3)
1332            if (prefix == '---' or prefix == '...')   \
1333                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
1334                return
1335            breaks = []
1336            while self.peek() in ' \r\n\x85\u2028\u2029':
1337                if self.peek() == ' ':
1338                    self.forward()
1339                else:
1340                    breaks.append(self.scan_line_break())
1341                    prefix = self.prefix(3)
1342                    if (prefix == '---' or prefix == '...')   \
1343                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
1344                        return
1345            if line_break != '\n':
1346                chunks.append(line_break)
1347            elif not breaks:
1348                chunks.append(' ')
1349            chunks.extend(breaks)
1350        elif whitespaces:
1351            chunks.append(whitespaces)
1352        return chunks
1353
1354    def scan_tag_handle(self, name, start_mark):
1355        # See the specification for details.
1356        # For some strange reasons, the specification does not allow '_' in
1357        # tag handles. I have allowed it anyway.
1358        ch = self.peek()
1359        if ch != '!':
1360            raise ScannerError("while scanning a %s" % name, start_mark,
1361                    "expected '!', but found %r" % ch, self.get_mark())
1362        length = 1
1363        ch = self.peek(length)
1364        if ch != ' ':
1365            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
1366                    or ch in '-_':
1367                length += 1
1368                ch = self.peek(length)
1369            if ch != '!':
1370                self.forward(length)
1371                raise ScannerError("while scanning a %s" % name, start_mark,
1372                        "expected '!', but found %r" % ch, self.get_mark())
1373            length += 1
1374        value = self.prefix(length)
1375        self.forward(length)
1376        return value
1377
1378    def scan_tag_uri(self, name, start_mark):
1379        # See the specification for details.
1380        # Note: we do not check if URI is well-formed.
1381        chunks = []
1382        length = 0
1383        ch = self.peek(length)
1384        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
1385                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
1386            if ch == '%':
1387                chunks.append(self.prefix(length))
1388                self.forward(length)
1389                length = 0
1390                chunks.append(self.scan_uri_escapes(name, start_mark))
1391            else:
1392                length += 1
1393            ch = self.peek(length)
1394        if length:
1395            chunks.append(self.prefix(length))
1396            self.forward(length)
1397            length = 0
1398        if not chunks:
1399            raise ScannerError("while parsing a %s" % name, start_mark,
1400                    "expected URI, but found %r" % ch, self.get_mark())
1401        return ''.join(chunks)
1402
1403    def scan_uri_escapes(self, name, start_mark):
1404        # See the specification for details.
1405        codes = []
1406        mark = self.get_mark()
1407        while self.peek() == '%':
1408            self.forward()
1409            for k in range(2):
1410                if self.peek(k) not in '0123456789ABCDEFabcdef':
1411                    raise ScannerError("while scanning a %s" % name, start_mark,
1412                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
1413                            % self.peek(k), self.get_mark())
1414            codes.append(int(self.prefix(2), 16))
1415            self.forward(2)
1416        try:
1417            value = bytes(codes).decode('utf-8')
1418        except UnicodeDecodeError as exc:
1419            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
1420        return value
1421
1422    def scan_line_break(self):
1423        # Transforms:
1424        #   '\r\n'      :   '\n'
1425        #   '\r'        :   '\n'
1426        #   '\n'        :   '\n'
1427        #   '\x85'      :   '\n'
1428        #   '\u2028'    :   '\u2028'
1429        #   '\u2029     :   '\u2029'
1430        #   default     :   ''
1431        ch = self.peek()
1432        if ch in '\r\n\x85':
1433            if self.prefix(2) == '\r\n':
1434                self.forward(2)
1435            else:
1436                self.forward()
1437            return '\n'
1438        elif ch in '\u2028\u2029':
1439            self.forward()
1440            return ch
1441        return ''
1442
1443#try:
1444#    import psyco
1445#    psyco.bind(Scanner)
1446#except ImportError:
1447#    pass
1448
Note: See TracBrowser for help on using the repository browser.