require "./spec_helper" require "../src/parcom.cr" include Parcom describe TokenStream do describe ".from_string" do it "constructs a TokenStream(Char) from a String" do tokens = TokenStream.from_string("abcd") tokens.tokens.should eq("abcd".chars) end end describe "#initialize" do it "wraps an array with the contents of the given iterable" do set = Set{'a', 'b', 'c', 'd'} tokens = TokenStream.new(set) tokens.tokens.should eq(set.to_a) arr = "abcd".chars tokens = TokenStream.new(arr) tokens.tokens.should eq(arr) end end context do tokens_empty = TokenStream.new([] of Char) tokens = TokenStream.from_string("abcd") describe "#[]" do it "returns the token at the given index" do tokens[2].should eq('c') expect_raises(IndexError) { tokens_empty[2] } end it "returns a new TokenStream similar to Array#[](Int, Int)" do tokens[1, 5].should eq(TokenStream.new(['b', 'c', 'd'])) expect_raises(IndexError) { tokens_empty[1, 5] } end it "returns a new TokenStream similar to Array#[](Range)" do tokens[1..3].should eq(TokenStream.new(['b', 'c', 'd'])) expect_raises(IndexError) { tokens_empty[1..3] } end end describe "#[]?" do it "analogous to `Array#[]?`" do # we should only need to check the nil-returning cases tokens_empty[2]?.should be_nil tokens_empty[1, 5]?.should be_nil tokens_empty[1..3]?.should be_nil end end describe "#empty?" do it "exposes the `#empty?` method of the wrapped array" do tokens.empty?.should be_false tokens_empty.empty?.should be_true end end end end describe Result do describe "#initialize" do it "sets values for #tokens and #value" do tokens = TokenStream.from_string("esting") value = 't' result = Result(Char, Char).new(tokens, value) result.tokens.should eq(tokens) result.value.should eq(value) end end end describe Parser do p = AnyToken(Char).new describe "#parse?" do it "returns `nil` if the parser fails" do result = p.parse?(TokenStream.new([] of Char)) result.should be_nil end it "returns a `Result(T, V)` if the parser succeeds" do tokens = TokenStream.from_string("testing") result = p.parse(tokens) result.should be_a(Result(Char, Char)) end end end describe Flunk do describe "#parse" do it "always fails" do tokens = TokenStream.from_string("testing") expect_raises(ParserException) { Flunk(Char, Char).new.parse(tokens) } end end end describe AnyToken do context do p = AnyToken(Char).new describe "#parse" do it "succeeds when input is non-empty" do tokens = TokenStream.from_string("testing") result = p.parse(tokens) result.tokens.should eq(tokens[1..]) result.value.should eq('t') end it "fails when input is empty" do expect_raises(ParserException) { p.parse(TokenStream.new([] of Char)) } end end end end describe Eof do p = Eof(Char).new describe "#parse" do it "succeeds when input is empty" do result = p.parse(TokenStream.new([] of Char)) result.tokens.empty?.should be_true result.value.should be_nil end it "fails when input is non-empty" do tokens = TokenStream.from_string("testing") expect_raises(ParserException) { p.parse(tokens) } end end end describe Peek do tokens = TokenStream.from_string("testing") p = AnyToken(Char).new result_normal = p.parse(tokens) result_peek = Peek.new(p).parse(tokens) describe "#parse" do it "does not modify the result of the wrapped parser" do result_peek.value.should eq(result_normal.value) end it "does not consume any input" do result_peek.tokens.should eq(tokens) end end end describe Assert do test_f = ->(x : Char) { x == 't' } p = Assert.new(AnyToken(Char).new, test_f) describe "#parse" do it "fails if the wrapped parser fails" do expect_raises(ParserException) { p.parse(TokenStream.new([] of Char)) } end it "fails if the result value fails the test" do tokens = TokenStream.from_string("_testing") expect_raises(ParserException) { p.parse(tokens) } end it "succeeds if the wrapped parser succeeds and the test passes" do tokens = TokenStream.from_string("testing") expected_char = tokens[0] result = p.parse(tokens) result.value.should eq(expected_char) test_f.call(expected_char).should be_true end end end describe Satisfy do test_f = ->(x : Char) { x == 't' } p = Satisfy.new(test_f) describe "#parse" do it "fails if the input is empty" do expect_raises(ParserException) { p.parse(TokenStream.new([] of Char)) } end it "fails if the token fails the test" do tokens = TokenStream.from_string("_testing") expect_raises(ParserException) { p.parse(tokens) } end it "succeeds if the token passes the test" do tokens = TokenStream.from_string("testing") expected_char = tokens[0] result = p.parse(tokens) result.value.should eq(expected_char) test_f.call(result.value).should be_true end end end describe Token do tokens = TokenStream.from_string("testing") describe "#parse" do it "fails if the input is empty" do p = Token(Char).new('t') expect_raises(ParserException) { p.parse(TokenStream.new([] of Char)) } end it "fails if the token is not the expected token" do p = Token(Char).new('#') expect_raises(ParserException) { p.parse(tokens) } end it "succeeds if the token is the expected token" do expected_char = tokens[0] p = Token(Char).new(expected_char) result = p.parse(tokens) result.value.should eq(expected_char) end end end describe Map do describe "#parse" do it "fails if the wrapped parser fails" do id = ->(x : Char) { x } p = Map.new(AnyToken(Char).new, id) expect_raises(ParserException) { p.parse(TokenStream.new([] of Char)) } end it "changes the result value via the provided proc" do is_letter = ->(x : Char) { x.letter? } p = Map.new(AnyToken(Char).new, is_letter) result = p.parse(TokenStream.from_string("testing")) result.value.should be_true result = p.parse(TokenStream.from_string("_testing")) result.value.should be_false end end end describe Plus do describe "#parse" do tokens = TokenStream.from_string("testing") p_t = Token(Char).new('t') p_e = Token(Char).new('e') p_at = Token(Char).new('@') it "fails if the first parser fails" do p = p_at + p_e expect_raises(ParserException) { p.parse(tokens) } end it "fails if the second parser fails" do p = p_t + p_at expect_raises(ParserException) { p.parse(tokens) } end it "fails if both parsers fail" do p = p_at + p_at expect_raises(ParserException) { p.parse(tokens) } end it "succeeds if both parsers succeed" do p = p_t + p_e result = p.parse(tokens) result.tokens.should eq(tokens[2..]) result.value[0].should eq('t') result.value[1].should eq('e') end it "evaluates parsers from left to right (left associative)" do p_succeeds = p_t + p_e p_fails = p_e + p_t p_succeeds.parse(tokens) # should not raise an exception expect_raises(ParserException) { p_fails.parse(tokens) } p_s = Token(Char).new('s') r = (p_t + p_e + p_s).parse(tokens) # should not raise an exception r.value.should be_a({ {Char, Char}, Char}) r = (p_t + (p_e + p_s)).parse(tokens) # should not raise an exception r.value.should be_a({Char, {Char, Char} }) end end end describe Phrase do p = Phrase.new(Token.new('t')) describe "#parse" do it "fails if the wrapped parser fails" do tokens = TokenStream.from_string("_") expect_raises(ParserException) { p.parse(tokens) } end it "fails if not all of the input tokens are parsed" do tokens = TokenStream.from_string("tt") expect_raises(ParserException) { p.parse(tokens) } end it "succeeds if the wrapped parser successfully parses all of the input" do tokens = TokenStream.from_string("t") result = p.parse(tokens) result.tokens.empty?.should be_true result.value.should eq('t') end end end pending Recover do end pending Optional do end pending Tokens do end pending Many do end pending Some do end pending Exactly do end pending AtLeast do end pending AtMost do end pending Between do end pending StopAt do end pending StopAfter do end pending StopIf do end pending FirstOf do end pending SepBy do end