Skip to content

Commit

Permalink
Merge pull request #3 from daawaan4U/feat/continuous-integration
Browse files Browse the repository at this point in the history
Setup Continuous Integration
  • Loading branch information
daawaan4x authored Sep 15, 2024
2 parents 57aa03f + 99fccb1 commit 5d84efa
Show file tree
Hide file tree
Showing 14 changed files with 231 additions and 143 deletions.
11 changes: 11 additions & 0 deletions .github/setup/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
name: Setup
description: Setup repository
runs:
using: composite
steps:
- uses: actions/setup-python@v5
with:
python-version: 3.8
cache: pip
- run: pip install -r requirements.txt
shell: bash
33 changes: 33 additions & 0 deletions .github/workflows/checks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
name: Check
on:
pull_request:
push:
branches:
- master

jobs:
ruff-check:
name: Ruff Checker
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/setup
- run: ruff check

ruff-format:
name: Ruff Formatter
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/setup
- run: ruff format

pyright:
name: Pyright
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/setup
- uses: jakebailey/pyright-action@v2
with:
pylance-version: latest-release
6 changes: 6 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.5
hooks:
- id: ruff
- id: ruff-format
10 changes: 10 additions & 0 deletions .vscode/extensions.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"recommendations": [
"ms-python.python",
"ms-python.debugpy",
"ms-python.vscode-pylance",
"charliermarsh.ruff",
"redhat.vscode-yaml",
"tamasfe.even-better-toml"
]
}
19 changes: 19 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
[tool.pyright]
strict = ["./ttg/"]

[tool.ruff.lint]
select = ["ALL"]
ignore = [
"A002",
"FBT001",
"FBT002",
"D100",
"D103",
"D104",
"TRY301",
"TRY002",
"TRY003",
"EM101",
"EM102",
"BLE001"
]
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
click==8.1.7
pip-chill==1.0.3
pre-commit==3.5.0
pyinstaller==6.10.0
pyright==1.1.380
rich==13.8.1
ruff==0.6.5
8 changes: 4 additions & 4 deletions ttg/__main__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import sys
import os
import pathlib
import sys
from pathlib import Path

sys.path.append(os.getcwd())
sys.path.append(os.getcwd()) # noqa: PTH109
sys.path.append(
pathlib.Path(os.path.dirname(os.path.realpath(__file__))).parent.absolute()
Path(os.path.dirname(os.path.realpath(__file__))).parent.absolute().__str__(), # noqa: PTH120
)

from ttg.command import command
Expand Down
14 changes: 7 additions & 7 deletions ttg/command.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
import sys
from pathlib import Path

from ttg.console import rich_console
from ttg.compile import compile


import click

from ttg.console import rich_console
from ttg.program import program


@click.command("tgg")
@click.argument("input", required=True)
@click.option("-f", "--file", is_flag=True, help="Treats the input as a filepath.")
@click.option("-i", "--inspect", is_flag=True, help="Display debug data.")
def command(input: str, file: bool = False, inspect: bool = False):
def command(input: str, file: bool = False, inspect: bool = False) -> None:
# Test if "formula" is actually a filepath
if file:
filepath = Path(input)
Expand All @@ -25,11 +25,11 @@ def command(input: str, file: bool = False, inspect: bool = False):
rich_console.print()
rich_console.print(f"{exc.__class__.__name__}: ", style="bold red", end="")
rich_console.print(exc)
exit(-1)
sys.exit(-1)

formulas = filepath.read_text().splitlines()
else:
formulas = [input]

for formula in formulas:
compile(formula, inspect)
program(formula, inspect)
Empty file added ttg/core/__init__.py
Empty file.
47 changes: 25 additions & 22 deletions ttg/core/evaluator.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
from typing import Dict, List
from __future__ import annotations

from typing import TYPE_CHECKING, Dict, List

from ttg.core.lexer import Token
from ttg.core.parser import BinaryExpr, Expr, UnaryExpr, VariableExpr

if TYPE_CHECKING:
from ttg.core.lexer import Token

TruthTable = Dict[str, List[bool]]
"""
The Truth Table type stores a list of boolean values for each expression. This
The Truth Table type stores a list of boolean values for each expression. This
represents the different outputs of the sub-expressions of the input formula
under all the possible set of truth values for all the variables.
"""
Expand All @@ -18,15 +22,13 @@
"""


def truth_table_variables(variables: List[str]):
"""
Generates all the possible set of truth values (cartesian product) for all
the given variables.
def truth_table_variables(variables: list[str]) -> list[TruthValues]:
"""Generate all truth value combinations for all the given variables.
For convenience, instead of returning `TruthTable`, it returns a list of
`TruthValues` which can be used directly in the Evaluator.
"""
products: List[TruthValues] = list()
products: list[TruthValues] = []

# Iterate all numbers from 0 to 2^n - 1 then use the individual bits in their
# binary representation as the True & False values.
Expand All @@ -42,34 +44,37 @@ def truth_table_variables(variables: List[str]):


class Evaluator:
"""
"""Interpreter for the Syntax Tree of a Formula.
A recursive interpreter implementation for traversing the
Abstract Syntax Tree (AST) of a propositional logic formula and
calculating the individual result of each node at every level.
"""

values: TruthValues

def eval(self, expr: Expr) -> bool:
def eval(self, expr: Expr) -> bool: # noqa: D102
if isinstance(expr, VariableExpr):
return self.eval_variable(expr)
if isinstance(expr, UnaryExpr):
return self.eval_unary(expr)
if isinstance(expr, BinaryExpr):
return self.eval_binary(expr)
return False

def eval_variable(self, expr: VariableExpr) -> bool:
def eval_variable(self, expr: VariableExpr) -> bool: # noqa: D102
return bool(self.values.get(expr.name.value))

def eval_unary(self, expr: UnaryExpr) -> bool:
def eval_unary(self, expr: UnaryExpr) -> bool: # noqa: D102
value = self.eval(expr.right)
if expr.operator.type == "not":
value = not value
self.values[str(expr)] = value # save result for each expression
return value

def eval_binary(self, expr: BinaryExpr) -> bool:
def eval_binary(self, expr: BinaryExpr) -> bool: # noqa: D102
left, right = self.eval(expr.left), self.eval(expr.right)
value = False
if expr.operator.type == "and":
value = left and right
if expr.operator.type == "or":
Expand All @@ -80,34 +85,32 @@ def eval_binary(self, expr: BinaryExpr) -> bool:
return value

def evaluate(self, tree: Expr, values: TruthValues) -> TruthValues:
"""
"""Evaluate & Store the sub-expressions of a formula.
Given the root node of an expression tree and the truth values for all
the variables in the expression tree, it returns an extended set of
truth values including the results of the sub-expressions of the
propositional logic formula
propositional logic formula.
"""

self.values = dict(values)
self.eval(tree)
return dict(self.values)


def evaluate(tokens: List[Token], tree: Expr) -> TruthTable:
def evaluate(tokens: list[Token], tree: Expr) -> TruthTable:
# wrapper function for convenience

# filter & get variable names from list of tokens
variables = list(
map(lambda x: x.value, filter(lambda x: x.type == "variable", tokens))
)
variables = [x.value for x in filter(lambda x: x.type == "variable", tokens)]

table: TruthTable = dict()
table: TruthTable = {}
evaluator = Evaluator()

# for each truth values combination of the variables, evaluate the
# expression tree and aggregate the result into a truth table
for truth_values in truth_table_variables(variables):
values = evaluator.evaluate(tree, truth_values)
for key, value in values.items():
table.setdefault(key, list()).append(value)
table.setdefault(key, []).append(value)

return table
42 changes: 24 additions & 18 deletions ttg/core/lexer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from typing import List, Literal, Tuple
from dataclasses import dataclass
from __future__ import annotations

import re
from dataclasses import dataclass
from typing import Literal

left_paren_regex = r"(?P<left_paren>\()"
right_paren_regex = r"(?P<right_paren>\))"
Expand All @@ -11,8 +13,8 @@
and_regex = r"(?P<and>\bAND\b|&&|&|\^|∧)"
"Regex for AND operators: `AND`, `and`, `&`, `&&`, `^`, `∧`"

or_regex = r"(?P<or>\bOR\b|\|\||\||v|∨)"
"Regex for OR operators: `OR`, `or`, `|`, `||`, `v`, `∨`"
or_regex = r"(?P<or>\bOR\b|\|\||\||v|∨)" # noqa: RUF001
"Regex for OR operators: `OR`, `or`, `|`, `||`, `v`, `∨`" # noqa: RUF001

then_regex = r"(?P<then>\bTHEN\b|>|->|→)"
"Regex for THEN operators: `THEN`, `then`, `>`, `->`, `→`"
Expand All @@ -22,7 +24,7 @@

invalid_regex = r"(?P<invalid>[\S]+)"

combined_regex = "|".join(
combined_regex = "|".join( # noqa: FLY002
[
left_paren_regex,
right_paren_regex,
Expand All @@ -32,35 +34,41 @@
then_regex,
variable_regex,
invalid_regex,
]
],
)
"Combined Regex for iterating tokens"

TokenType = Literal[
"left_paren", "right_paren", "not", "and", "or", "then", "variable", "invalid"
"left_paren",
"right_paren",
"not",
"and",
"or",
"then",
"variable",
"invalid",
]


@dataclass
class Token:
"Output data of the lexer after tokenizing the propositional logic formula"
"""Output data of the lexer after tokenizing the propositional logic formula."""

type: TokenType
"The classification of the value of the token"

value: str
"The matching input value of the token in its original input"

span: Tuple[int, int]
span: tuple[int, int]
"The position range of the token in its original input"

def __str__(self) -> str:
def __str__(self) -> str: # noqa: D105
return self.value


def tokenize(formula: str) -> List[Token]:
"""
Turns the input formula into a sequence of tokens.
def tokenize(formula: str) -> list[Token]:
"""Turn the input formula into a sequence of tokens.
The tokenize function is resilient and will not raise errors for invalid tokens
but will instead create a `Token(type="invalid")` added in the list.
Expand All @@ -74,11 +82,9 @@ def tokenize(formula: str) -> List[Token]:
matches = filter(lambda match: match.group(), query)

# map regex matches to tokens
tokens = map(
lambda match: Token(
type=match.lastgroup, value=match.group(), span=match.span()
),
matches,
tokens = (
Token(type=match.lastgroup, value=match.group(), span=match.span()) # type: ignore reportArgumentType
for match in matches
)

return list(tokens)
Loading

0 comments on commit 5d84efa

Please sign in to comment.