2024-08-02 17:03:39 -03:00
|
|
|
require "xml"
|
|
|
|
|
|
|
|
module Tartrazine
|
|
|
|
VERSION = "0.1.0"
|
|
|
|
|
2024-08-03 05:26:32 -03:00
|
|
|
# This implements a lexer for Pygments RegexLexers as expressed
|
|
|
|
# in Chroma's XML serialization.
|
2024-08-03 06:05:29 -03:00
|
|
|
#
|
2024-08-03 05:26:32 -03:00
|
|
|
# For explanations on what emitters, transformers, etc do
|
|
|
|
# the Pygments documentation is a good place to start.
|
|
|
|
# https://pygments.org/docs/lexerdevelopment/
|
2024-08-02 17:23:40 -03:00
|
|
|
class State
|
|
|
|
property name : String = ""
|
2024-08-02 17:33:01 -03:00
|
|
|
property rules = [] of Rule
|
|
|
|
end
|
|
|
|
|
|
|
|
class Rule
|
2024-08-03 06:05:29 -03:00
|
|
|
property pattern : Regex = Regex.new ""
|
2024-08-03 05:26:32 -03:00
|
|
|
property emitters : Array(Emitter) = [] of Emitter
|
2024-08-03 06:05:29 -03:00
|
|
|
property transformers : Array(Transformer) = [] of Transformer
|
2024-08-03 06:37:15 -03:00
|
|
|
|
2024-08-03 07:21:21 -03:00
|
|
|
def match(text, pos, lexer) : Tuple(Bool, Int32, Array(Token))
|
2024-08-03 06:37:15 -03:00
|
|
|
tokens = [] of Token
|
|
|
|
match = pattern.match(text, pos)
|
|
|
|
# We are matched, move post to after the match
|
2024-08-03 07:21:21 -03:00
|
|
|
return false, pos, [] of Token if match.nil?
|
2024-08-03 06:37:15 -03:00
|
|
|
|
|
|
|
# Emit the tokens
|
|
|
|
emitters.each do |emitter|
|
|
|
|
# Emit the token
|
2024-08-03 07:24:55 -03:00
|
|
|
tokens += emitter.emit(match, lexer)
|
2024-08-03 06:37:15 -03:00
|
|
|
end
|
2024-08-03 07:21:21 -03:00
|
|
|
return true, match.end, tokens
|
2024-08-03 06:37:15 -03:00
|
|
|
end
|
2024-08-02 17:51:12 -03:00
|
|
|
end
|
|
|
|
|
2024-08-03 05:26:32 -03:00
|
|
|
# This rule includes another state like this:
|
2024-08-02 17:51:12 -03:00
|
|
|
# <rule>
|
|
|
|
# <include state="interp"/>
|
|
|
|
# </rule>
|
|
|
|
# </state>
|
|
|
|
# <state name="interp">
|
|
|
|
# <rule pattern="\$\(\(">
|
|
|
|
# <token type="Keyword"/>
|
|
|
|
# ...
|
|
|
|
|
2024-08-02 20:32:15 -03:00
|
|
|
class IncludeStateRule < Rule
|
2024-08-02 20:01:53 -03:00
|
|
|
property state : String = ""
|
2024-08-03 06:37:15 -03:00
|
|
|
|
2024-08-03 07:21:21 -03:00
|
|
|
def match(text, pos, lexer) : Tuple(Bool, Int32, Array(Token))
|
|
|
|
puts "Including state #{state} from #{lexer.state_stack.last}"
|
|
|
|
lexer.states[state].rules.each do |rule|
|
|
|
|
matched, new_pos, new_tokens = rule.match(text, pos, lexer)
|
|
|
|
return true, new_pos, new_tokens if matched
|
|
|
|
end
|
|
|
|
return false, pos, [] of Token
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# These rules look like this:
|
|
|
|
# <rule>
|
|
|
|
# <pop depth="1"/>
|
|
|
|
# </rule>
|
2024-08-03 07:44:28 -03:00
|
|
|
# They match, don't move pos, probably alter
|
|
|
|
# the stack, probably not generate tokens
|
2024-08-03 07:21:21 -03:00
|
|
|
class Always < Rule
|
|
|
|
def match(text, pos, lexer) : Tuple(Bool, Int32, Array(Token))
|
|
|
|
tokens = [] of Token
|
|
|
|
emitters.each do |emitter|
|
2024-08-03 07:24:55 -03:00
|
|
|
tokens += emitter.emit(nil, lexer)
|
2024-08-03 07:21:21 -03:00
|
|
|
end
|
|
|
|
return true, pos, tokens
|
2024-08-03 06:37:15 -03:00
|
|
|
end
|
2024-08-02 17:23:40 -03:00
|
|
|
end
|
|
|
|
|
2024-08-02 20:32:15 -03:00
|
|
|
class Emitter
|
2024-08-03 06:05:29 -03:00
|
|
|
property type : String
|
|
|
|
property xml : XML::Node
|
|
|
|
|
|
|
|
def initialize(@type : String, @xml : XML::Node?)
|
|
|
|
end
|
|
|
|
|
2024-08-03 07:24:55 -03:00
|
|
|
def emit(match : Regex::MatchData?, lexer : Lexer) : Array(Token)
|
2024-08-03 06:05:29 -03:00
|
|
|
case type
|
|
|
|
when "token"
|
2024-08-03 07:21:21 -03:00
|
|
|
raise Exception.new "Can't have a token without a match" if match.nil?
|
2024-08-03 06:37:15 -03:00
|
|
|
[Token.new(type: xml["type"], value: match[0])]
|
2024-08-03 07:44:28 -03:00
|
|
|
# TODO handle #push #push:n #pop and multiple states
|
2024-08-03 07:24:55 -03:00
|
|
|
when "push"
|
2024-08-03 17:15:12 -03:00
|
|
|
# Push without a state means push the current state
|
|
|
|
state = xml["state"]? || lexer.state_stack.last
|
|
|
|
puts "Pushing state #{state}"
|
|
|
|
lexer.state_stack << state
|
2024-08-03 07:27:29 -03:00
|
|
|
[] of Token
|
|
|
|
when "pop"
|
2024-08-03 10:26:36 -03:00
|
|
|
depth = xml["depth"].to_i
|
|
|
|
puts "Popping #{depth} states"
|
2024-08-03 17:15:12 -03:00
|
|
|
if lexer.state_stack.size <= depth
|
2024-08-03 10:26:36 -03:00
|
|
|
puts "Can't pop #{depth} states, only have #{lexer.state_stack.size}"
|
|
|
|
else
|
|
|
|
lexer.state_stack.pop(depth)
|
|
|
|
end
|
2024-08-03 07:27:29 -03:00
|
|
|
[] of Token
|
2024-08-03 10:26:36 -03:00
|
|
|
when "bygroups"
|
|
|
|
# This takes the groups in the regex and emits them as tokens
|
|
|
|
# Get all the token nodes
|
|
|
|
raise Exception.new "Can't have a token without a match" if match.nil?
|
|
|
|
tokens = xml.children.select { |n| n.name == "token" }.map { |t| t["type"].to_s }
|
|
|
|
p! match, tokens
|
|
|
|
result = [] of Token
|
|
|
|
tokens.each_with_index do |t, i|
|
|
|
|
result << {type: t, value: match[i]}
|
|
|
|
end
|
|
|
|
result
|
2024-08-03 06:37:15 -03:00
|
|
|
else
|
2024-08-03 07:24:55 -03:00
|
|
|
raise Exception.new("Unknown emitter type: #{type}: #{xml}")
|
2024-08-03 06:05:29 -03:00
|
|
|
end
|
|
|
|
end
|
2024-08-03 05:26:32 -03:00
|
|
|
end
|
|
|
|
|
|
|
|
class Transformer
|
2024-08-03 06:05:29 -03:00
|
|
|
property type : String = ""
|
|
|
|
property xml : String = ""
|
|
|
|
|
|
|
|
def transform
|
|
|
|
puts "Transforming #{type} #{xml}"
|
|
|
|
end
|
2024-08-02 20:32:15 -03:00
|
|
|
end
|
|
|
|
|
2024-08-03 06:05:29 -03:00
|
|
|
alias Token = NamedTuple(type: String, value: String)
|
|
|
|
|
2024-08-02 17:03:39 -03:00
|
|
|
class Lexer
|
|
|
|
property config = {
|
2024-08-02 17:09:05 -03:00
|
|
|
name: "",
|
2024-08-02 17:03:39 -03:00
|
|
|
aliases: [] of String,
|
|
|
|
filenames: [] of String,
|
|
|
|
mime_types: [] of String,
|
2024-08-03 05:05:01 -03:00
|
|
|
priority: 0.0,
|
2024-08-02 17:03:39 -03:00
|
|
|
}
|
|
|
|
|
2024-08-02 19:48:58 -03:00
|
|
|
property states = {} of String => State
|
2024-08-02 17:23:40 -03:00
|
|
|
|
2024-08-03 06:05:29 -03:00
|
|
|
property state_stack = ["root"]
|
|
|
|
|
|
|
|
# Turn the text into a list of tokens.
|
|
|
|
def tokenize(text) : Array(Token)
|
|
|
|
tokens = [] of Token
|
|
|
|
pos = 0
|
2024-08-03 07:21:21 -03:00
|
|
|
matched = false
|
2024-08-03 06:05:29 -03:00
|
|
|
while pos < text.size
|
|
|
|
state = states[state_stack.last]
|
|
|
|
state.rules.each do |rule|
|
2024-08-03 07:21:21 -03:00
|
|
|
matched, new_pos, new_tokens = rule.match(text, pos, self)
|
|
|
|
next unless matched
|
2024-08-03 06:37:15 -03:00
|
|
|
pos = new_pos
|
|
|
|
tokens += new_tokens
|
|
|
|
break # We go back to processing with current state
|
|
|
|
end
|
|
|
|
# If no rule matches, emit an error token
|
2024-08-03 07:21:21 -03:00
|
|
|
unless matched
|
2024-08-03 06:37:15 -03:00
|
|
|
tokens << {type: "Error", value: ""}
|
|
|
|
pos += 1
|
2024-08-03 06:05:29 -03:00
|
|
|
end
|
|
|
|
end
|
|
|
|
tokens
|
|
|
|
end
|
|
|
|
|
2024-08-02 17:03:39 -03:00
|
|
|
def self.from_xml(xml : String) : Lexer
|
|
|
|
l = Lexer.new
|
|
|
|
lexer = XML.parse(xml).first_element_child
|
|
|
|
if lexer
|
|
|
|
config = lexer.children.find { |n| n.name == "config" }
|
|
|
|
if config
|
|
|
|
l.config = {
|
2024-08-02 17:23:40 -03:00
|
|
|
name: xml_to_s(config, name) || "",
|
|
|
|
aliases: xml_to_a(config, _alias) || [] of String,
|
|
|
|
filenames: xml_to_a(config, filename) || [] of String,
|
|
|
|
mime_types: xml_to_a(config, mime_type) || [] of String,
|
2024-08-03 05:05:01 -03:00
|
|
|
priority: xml_to_f(config, priority) || 0.0,
|
2024-08-02 17:03:39 -03:00
|
|
|
}
|
|
|
|
end
|
2024-08-02 17:23:40 -03:00
|
|
|
|
|
|
|
rules = lexer.children.find { |n| n.name == "rules" }
|
|
|
|
if rules
|
|
|
|
# Rules contains states 🤷
|
2024-08-02 20:01:53 -03:00
|
|
|
rules.children.select { |n| n.name == "state" }.each do |state_node|
|
2024-08-02 17:23:40 -03:00
|
|
|
state = State.new
|
2024-08-02 20:01:53 -03:00
|
|
|
state.name = state_node["name"]
|
2024-08-02 19:48:58 -03:00
|
|
|
if l.states.has_key?(state.name)
|
|
|
|
puts "Duplicate state: #{state.name}"
|
|
|
|
else
|
|
|
|
l.states[state.name] = state
|
|
|
|
end
|
2024-08-02 17:33:01 -03:00
|
|
|
# And states contain rules 🤷
|
2024-08-02 20:01:53 -03:00
|
|
|
state_node.children.select { |n| n.name == "rule" }.each do |rule_node|
|
2024-08-03 07:21:21 -03:00
|
|
|
case rule_node["pattern"]?
|
|
|
|
when nil
|
|
|
|
if rule_node.first_element_child.try &.name == "include"
|
|
|
|
rule = IncludeStateRule.new
|
|
|
|
include_node = rule_node.children.find { |n| n.name == "include" }
|
|
|
|
rule.state = include_node["state"] if include_node
|
|
|
|
state.rules << rule
|
|
|
|
else
|
|
|
|
rule = Always.new
|
|
|
|
state.rules << rule
|
|
|
|
end
|
|
|
|
else
|
2024-08-02 17:51:12 -03:00
|
|
|
rule = Rule.new
|
2024-08-03 05:05:01 -03:00
|
|
|
begin
|
2024-08-03 06:05:29 -03:00
|
|
|
rule.pattern = /#{rule_node["pattern"]}/m
|
2024-08-03 07:21:21 -03:00
|
|
|
state.rules << rule
|
2024-08-03 05:05:01 -03:00
|
|
|
rescue ex : Exception
|
2024-08-03 05:13:34 -03:00
|
|
|
puts "Bad regex in #{l.config[:name]}: #{ex}"
|
2024-08-03 07:21:21 -03:00
|
|
|
next
|
2024-08-03 05:05:01 -03:00
|
|
|
end
|
2024-08-02 17:33:01 -03:00
|
|
|
end
|
2024-08-02 20:32:15 -03:00
|
|
|
|
2024-08-03 07:21:21 -03:00
|
|
|
next if rule.nil?
|
2024-08-02 20:32:15 -03:00
|
|
|
# Rules contain maybe an emitter and maybe a transformer
|
|
|
|
# emitters emit tokens, transformers do things to
|
2024-08-03 05:13:34 -03:00
|
|
|
# the state stack.
|
2024-08-02 20:32:15 -03:00
|
|
|
rule_node.children.each do |node|
|
|
|
|
next unless node.element?
|
2024-08-03 07:21:21 -03:00
|
|
|
# case node.name
|
|
|
|
# when "pop", "push", "multi", "combine" # "include",
|
|
|
|
# transformer = Transformer.new
|
|
|
|
# transformer.type = node.name
|
|
|
|
# transformer.xml = node.to_s
|
|
|
|
# rule.transformers << transformer
|
|
|
|
# else
|
|
|
|
rule.emitters << Emitter.new(node.name, node)
|
|
|
|
# end
|
2024-08-02 20:32:15 -03:00
|
|
|
end
|
2024-08-02 17:33:01 -03:00
|
|
|
end
|
2024-08-02 17:23:40 -03:00
|
|
|
end
|
|
|
|
end
|
2024-08-02 17:03:39 -03:00
|
|
|
end
|
|
|
|
l
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2024-08-03 06:05:29 -03:00
|
|
|
# Try loading all lexers
|
|
|
|
|
|
|
|
lexers = {} of String => Tartrazine::Lexer
|
2024-08-03 05:05:01 -03:00
|
|
|
Dir.glob("lexers/*.xml").each do |fname|
|
2024-08-03 06:05:29 -03:00
|
|
|
l = Tartrazine::Lexer.from_xml(File.read(fname))
|
2024-08-03 09:42:32 -03:00
|
|
|
lexers[l.config[:name].downcase] = l
|
|
|
|
l.config[:aliases].each do |key|
|
|
|
|
lexers[key.downcase] = l
|
|
|
|
end
|
2024-08-03 05:05:01 -03:00
|
|
|
end
|
2024-08-02 17:09:05 -03:00
|
|
|
|
|
|
|
# Convenience macros to parse XML
|
|
|
|
macro xml_to_s(node, name)
|
2024-08-02 17:23:40 -03:00
|
|
|
{{node}}.children.find{|n| n.name == "{{name}}".lstrip("_")}.try &.content.to_s
|
|
|
|
end
|
|
|
|
|
2024-08-03 05:05:01 -03:00
|
|
|
macro xml_to_f(node, name)
|
|
|
|
({{node}}.children.find{|n| n.name == "{{name}}".lstrip("_")}.try &.content.to_s.to_f)
|
2024-08-02 17:09:05 -03:00
|
|
|
end
|
|
|
|
|
|
|
|
macro xml_to_a(node, name)
|
|
|
|
{{node}}.children.select{|n| n.name == "{{name}}".lstrip("_")}.map {|n| n.content.to_s}
|
|
|
|
end
|
2024-08-03 09:42:32 -03:00
|
|
|
|
|
|
|
# Let's run some tests
|
|
|
|
|
2024-08-03 10:26:36 -03:00
|
|
|
good = 0
|
|
|
|
bad = 0
|
2024-08-03 09:42:32 -03:00
|
|
|
Dir.glob("tests/*/") do |lexername|
|
|
|
|
key = File.basename(lexername).downcase
|
|
|
|
next unless lexers.has_key? key
|
|
|
|
lexer = lexers[key]
|
|
|
|
|
|
|
|
Dir.glob("#{lexername}*.txt") do |testname|
|
|
|
|
puts "Testing #{key} with #{testname}"
|
2024-08-03 10:26:36 -03:00
|
|
|
test = File.read(testname).split("---input---\n").last.split("--tokens---").first
|
|
|
|
begin
|
|
|
|
tokens = lexer.tokenize(test)
|
|
|
|
good += 1
|
|
|
|
rescue ex : Exception
|
|
|
|
puts "Error in #{key} with #{testname}: #{ex}"
|
|
|
|
bad += 1
|
|
|
|
raise ex
|
|
|
|
end
|
2024-08-03 09:42:32 -03:00
|
|
|
end
|
2024-08-03 10:26:36 -03:00
|
|
|
end
|
|
|
|
puts "Good: #{good} Bad: #{bad}"
|