summaryrefslogtreecommitdiff
path: root/lib/prism/translation/parser/lexer.rb
diff options
context:
space:
mode:
authorKevin Newton <kddnewton@gmail.com>2024-03-06 14:49:25 -0500
committerKevin Newton <kddnewton@gmail.com>2024-03-06 21:42:54 -0500
commitd266b714672734a9604f19c55d291d12c20718a3 (patch)
treef133e74cd64d5e13d667c41236fd5f52b0ee573c /lib/prism/translation/parser/lexer.rb
parent48ca2ce5fc6a7ed8f8fd0e5ead40c160369e2a4c (diff)
[ruby/prism] Use the diagnostic types in the parser translation layer
https://github.com/ruby/prism/commit/1a8a0063dc
Diffstat (limited to 'lib/prism/translation/parser/lexer.rb')
-rw-r--r--lib/prism/translation/parser/lexer.rb16
1 files changed, 11 insertions, 5 deletions
diff --git a/lib/prism/translation/parser/lexer.rb b/lib/prism/translation/parser/lexer.rb
index bc7e77f291..9a0a48a00f 100644
--- a/lib/prism/translation/parser/lexer.rb
+++ b/lib/prism/translation/parser/lexer.rb
@@ -213,9 +213,11 @@ module Prism
# Convert the prism tokens into the expected format for the parser gem.
def to_a
tokens = []
+
index = 0
+ length = lexed.length
- while index < lexed.length
+ while index < length
token, state = lexed[index]
index += 1
next if %i[IGNORED_NEWLINE __END__ EOF].include?(token.type)
@@ -229,14 +231,18 @@ module Prism
value.delete_prefix!("?")
when :tCOMMENT
if token.type == :EMBDOC_BEGIN
- until (next_token = lexed[index][0]) && next_token.type == :EMBDOC_END
+ start_index = index
+
+ while !((next_token = lexed[index][0]) && next_token.type == :EMBDOC_END) && (index < length - 1)
value += next_token.value
index += 1
end
- value += next_token.value
- location = Range.new(source_buffer, offset_cache[token.location.start_offset], offset_cache[lexed[index][0].location.end_offset])
- index += 1
+ if start_index != index
+ value += next_token.value
+ location = Range.new(source_buffer, offset_cache[token.location.start_offset], offset_cache[lexed[index][0].location.end_offset])
+ index += 1
+ end
else
value.chomp!
location = Range.new(source_buffer, offset_cache[token.location.start_offset], offset_cache[token.location.end_offset - 1])