@@ -682,6 +682,7 @@ def tokeniter(self, source, name, filename=None, state=None):
682
682
balancing_stack = []
683
683
lstrip_unless_re = self .lstrip_unless_re
684
684
newlines_stripped = 0
685
+ line_starting = True
685
686
686
687
while 1 :
687
688
# tokenizer loop
@@ -731,11 +732,11 @@ def tokeniter(self, source, name, filename=None, state=None):
731
732
):
732
733
# The start of text between the last newline and the tag.
733
734
l_pos = text .rfind ("\n " ) + 1
734
-
735
- # If there's only whitespace between the newline and the
736
- # tag, strip it.
737
- if not lstrip_unless_re .search (text , l_pos ):
738
- groups = (text [:l_pos ],) + groups [1 :]
735
+ if l_pos > 0 or line_starting :
736
+ # If there's only whitespace between the newline and the
737
+ # tag, strip it.
738
+ if not lstrip_unless_re .search (text , l_pos ):
739
+ groups = (text [:l_pos ],) + groups [1 :]
739
740
740
741
for idx , token in enumerate (tokens ):
741
742
# failure group
@@ -794,6 +795,8 @@ def tokeniter(self, source, name, filename=None, state=None):
794
795
yield lineno , tokens , data
795
796
lineno += data .count ("\n " )
796
797
798
+ line_starting = m .group ()[- 1 :] == "\n "
799
+
797
800
# fetch new position into new variable so that we can check
798
801
# if there is a internal parsing error which would result
799
802
# in an infinite loop
0 commit comments