diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 28fbc51..b7884e8 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -1,154 +1,82 @@ name: Security Scan on: - pull_request_target: - types: [opened, synchronize, reopened] + pull_request: branches: [main] permissions: contents: read pull-requests: write - issues: write - checks: write - security-events: write - statuses: write jobs: security-scan: runs-on: ubuntu-latest - + steps: - - name: Checkout PR + - name: Checkout PR Code uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - + - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v4 with: python-version: '3.10' - - name: Cache pip packages - uses: actions/cache@v4 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - name: Install dependencies run: | python -m pip install --upgrade pip - pip install bandit safety + pip install bandit pip-audit + + - name: Run Bandit + run: | + echo "Running Bandit..." + bandit -r . -f txt -o bandit-results.txt || true + + - name: Run pip-audit + run: | + echo "Running pip-audit..." + pip-audit -r requirements.txt > pip-audit-results.txt || true - - name: Run Security Scan - id: security_scan - env: - BANDIT_SKIP_IDS: ${{ secrets.BANDIT_SKIP_IDS }} + - name: Combine results run: | - # Run bandit recursively on all Python files - echo "Running Bandit security scan..." - - bandit -r . \ - --severity-level medium \ - --skip "${BANDIT_SKIP_IDS}" \ - -f txt \ - -o bandit-results.txt || true - - # Run Safety check on requirements - if [ -f "requirements.txt" ]; then - echo "Checking dependencies with Safety..." - safety scan -r requirements.txt --output text > safety-results.txt || true - fi - - # Combine results echo "πŸ”’ Security Scan Results" > security-scan-results.txt echo "=========================" >> security-scan-results.txt - echo "" >> security-scan-results.txt - - if [ -f "bandit-results.txt" ]; then - echo "Bandit Scan Results:" >> security-scan-results.txt - echo "-------------------" >> security-scan-results.txt - cat bandit-results.txt >> security-scan-results.txt - echo "" >> security-scan-results.txt - fi - - if [ -f "safety-results.txt" ]; then - echo "Dependency Check Results:" >> security-scan-results.txt - echo "-----------------------" >> security-scan-results.txt - cat safety-results.txt >> security-scan-results.txt - fi - - # Check for critical issues - if grep -iE "Severity\:\ High|Severity\:\ Critical" bandit-results.txt > /dev/null 2>&1; then - echo "vulnerabilities_found=true" >> $GITHUB_OUTPUT - elif [ -f "safety-results.txt" ] && grep -iE "critical" safety-results.txt > /dev/null 2>&1; then - echo "vulnerabilities_found=true" >> $GITHUB_OUTPUT - else - echo "vulnerabilities_found=false" >> $GITHUB_OUTPUT - fi - - name: Create comment body - id: create-comment - if: always() - run: | - if [ -f security-scan-results.txt ]; then - SCAN_RESULTS=$(cat security-scan-results.txt) - if [ "${{ steps.security_scan.outputs.vulnerabilities_found }}" == "true" ]; then - echo 'comment_body<> $GITHUB_ENV - echo '## πŸ”’ Security Scan Results' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo "$SCAN_RESULTS" >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo '⛔️ **Critical vulnerabilities detected. Please review and address these security issues before merging.**' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo '### Next Steps:' >> $GITHUB_ENV - echo '1. Review each critical finding above and fix them according to OWASP top 10 mitigations.' >> $GITHUB_ENV - echo 'EOF' >> $GITHUB_ENV - else - echo 'comment_body<> $GITHUB_ENV - echo '## πŸ”’ Security Scan Results' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo "$SCAN_RESULTS" >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo 'βœ… **No critical security issues detected.**' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo 'The code has passed all critical security checks.' >> $GITHUB_ENV - echo 'EOF' >> $GITHUB_ENV - fi - else - echo 'comment_body<> $GITHUB_ENV - echo '## πŸ”’ Security Scan Results' >> $GITHUB_ENV - echo '' >> $GITHUB_ENV - echo '⚠️ **Error: The security scan failed to complete. Please review the workflow logs for more information.**' >> $GITHUB_ENV - echo 'EOF' >> $GITHUB_ENV - fi + echo "" >> security-scan-results.txt + echo "πŸ“˜ Bandit Scan:" >> security-scan-results.txt + echo "-------------------" >> security-scan-results.txt + cat bandit-results.txt >> security-scan-results.txt - - name: Comment PR - uses: peter-evans/create-or-update-comment@v4 - if: always() - with: - issue-number: ${{ github.event.pull_request.number }} - body: ${{ env.comment_body }} + echo "" >> security-scan-results.txt + echo "πŸ“¦ pip-audit:" >> security-scan-results.txt + echo "-------------------" >> security-scan-results.txt + cat pip-audit-results.txt >> security-scan-results.txt - - name: Upload scan artifacts - if: always() + - name: Upload scan results uses: actions/upload-artifact@v4 with: name: security-scan-results path: | security-scan-results.txt bandit-results.txt - safety-results.txt + pip-audit-results.txt retention-days: 5 - - name: Fail if vulnerabilities found - if: steps.security_scan.outputs.vulnerabilities_found == 'true' + - name: Create PR comment body + id: comment_body run: | - echo "::error::Critical security vulnerabilities were detected. Please review the findings and address them before merging." - exit 1 + echo 'comment<> $GITHUB_ENV + echo '## πŸ”’ Security Scan Summary' >> $GITHUB_ENV + echo '' >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + cat security-scan-results.txt >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo 'EOF' >> $GITHUB_ENV + + - name: Post Comment on PR + uses: peter-evans/create-or-update-comment@v4 + with: + issue-number: ${{ github.event.pull_request.number }} + body: ${{ env.comment }} diff --git a/.gitignore b/.gitignore index 95d4ec6..ac307b1 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ __pycache__/ env/ .env .ipynb_checkpoints/ -*.db +logs/app.log diff --git a/api/sync.py b/api/sync.py new file mode 100644 index 0000000..966e381 --- /dev/null +++ b/api/sync.py @@ -0,0 +1,59 @@ +from datetime import datetime, timezone +from models.user import UserProfile +from models import db +from flask import Blueprint, jsonify, request +from flask import current_app as app + +sync_bp = Blueprint('sync', __name__) + + +@sync_bp.route('/update', methods=['POST']) +def update_sync_time(): + try: + user_id = request.json.get('user_id') + if not user_id: + app.logger.warning(f"[sync/update] missing user_id in payload") + return jsonify({'error' : 'Missing user_id'}), 400 + + user = db.session.get(UserProfile, user_id) + if not user: + app.logger.warning(f"[sync/update] user not found: {user_id}") + return jsonify({'error' : 'User not found'}), 404 + + user.last_synced = datetime.now(timezone.utc) + db.session.commit() + ts = user.last_synced.strftime('%Y-%m-%dT%H:%M:%SZ') + app.logger.info(f"[sync/update] updated last_synced for user {user_id}: {ts}") + return jsonify({'message' : 'Last sync time updated', + 'user_id' : str(user_id), + 'last_synced' : ts + }), 200 + except Exception as e: + app.logger.error(f"[sync/update] error for user {user_id}: {e}", exc_info=True) + return jsonify({'error': 'Internal server error'}), 500 + + + +@sync_bp.route('/last', methods=['GET']) +def get_last_sync_time(): + try: + user_id = request.args.get('user_id') + if not user_id: + app.logger.warning(f"[sync/last] missing user_id in query") + return jsonify({'error': 'Missing user_id'}), 400 + + user = db.session.get(UserProfile, user_id) + if not user: + app.logger.warning(f"[sync/last] user not found: {user_id}") + return jsonify({'error' : 'User not found'}), 404 + ts = user.last_synced.strftime('%Y-%m-%dT%H:%M:%SZ') if user.last_synced else None + app.logger.info(f"[sync/last] fetched last_synced for user {user_id}: {ts}") + return jsonify({ + 'user_id' : str(user.id), + 'last_synced' : ts + }), 200 + except Exception as e: + app.logger.error(f"Error fetching sync status: {str(e)}") + return jsonify({'error': 'Internal server error'}), 500 + + diff --git a/app.py b/app.py index ddacb32..3b2b2ee 100644 --- a/app.py +++ b/app.py @@ -1,4 +1,5 @@ from flask import Flask, jsonify, session, render_template, request, redirect +from flask_migrate import Migrate from flask_cors import CORS from api.routes import api from api.goals import goals_bp @@ -8,18 +9,38 @@ from api.activity import activity_bp from models import db from dotenv import load_dotenv +from api.sync import sync_bp +from logging.handlers import RotatingFileHandler +import logging import os import pyrebase # Import scripts here from scripts.add_default_user import add_default_user +# ensure a folder for logs exists +os.makedirs('logs', exist_ok=True) + # Load environment variables from .env file load_dotenv() app = Flask(__name__) CORS(app, resources={r"/api/*": {"origins": "http://localhost:5173"}}) + +# set up a rotating file handler +file_handler = RotatingFileHandler( + 'logs/app.log', maxBytes=10*1024, backupCount=5 +) +file_handler.setFormatter(logging.Formatter( + '%(asctime)s %(levelname)s in %(module)s: %(message)s' +)) +file_handler.setLevel(logging.INFO) + +app.logger.addHandler(file_handler) +app.logger.setLevel(logging.INFO) +app.logger.info("🟒 App startup complete") + # Firebase configuration config = { 'apiKey': os.getenv('FIREBASE_API_KEY'), @@ -42,6 +63,8 @@ # Initialize database db.init_app(app) +migrate = Migrate(app, db) + with app.app_context(): db.create_all() @@ -53,6 +76,7 @@ app.register_blueprint(goals_bp, url_prefix='/api/goals') app.register_blueprint(dashboard_bp, url_prefix='/api/dashboard') app.register_blueprint(profile_api, url_prefix='/api/profile') +app.register_blueprint(sync_bp, url_prefix='/api/synced') app.register_blueprint(body_insight_bp, url_prefix='/api/body_insight') app.register_blueprint(activity_bp, url_prefix='/api/activity') diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..f692378 --- /dev/null +++ b/conftest.py @@ -0,0 +1,27 @@ +import pytest +from app import app as flask_app, db +from models.user import UserProfile + +@pytest.fixture +def app(): + # tell Flask to use testing config & in‑memory sqlite + flask_app.config.update({ + "TESTING": True, + "SQLALCHEMY_DATABASE_URI": "sqlite:///:memory:", + "SQLALCHEMY_TRACK_MODIFICATIONS": False, + }) + with flask_app.app_context(): + db.create_all() + yield flask_app + db.drop_all() + +@pytest.fixture +def client(app): + return app.test_client() + +@pytest.fixture +def sample_user(app): + u = UserProfile(name="Test", account="acct1", birthDate="2000-01-01", gender="F") + db.session.add(u) + db.session.commit() + return u diff --git a/docs/sync-api.md b/docs/sync-api.md new file mode 100644 index 0000000..75034d3 --- /dev/null +++ b/docs/sync-api.md @@ -0,0 +1,84 @@ +# Time Sync API + + +## 1. Overview + +Log user’s last-sync timestamp and allow retrieving. + +### Use Cases: + - Audit logs: Verify each data timestamp + - Session management + - Data consistency: Coordinate time-sensitive updates + + +## 2. Getting Started + +### Base URL + + Base path: `https://localhost:5000/api/synced` + +### Authentication + + All requests requires an `Authentication` header with Bearer token: + Authorization: Bearer `API-TOKEN` + +## 3. Endpoints +### GET + Purpose: To fetch data/time based on the recoreded timestamp. + +### URL & Method: + GET /sync/last + +### Parameter + - userId: int + +### Success Response (200 OK) + { + "userId": ???, + "lastSyncedAt": "????-??-??T??:??:??Z" + } + +### Error Response + - 401 Unauthorized + {"error": "Invalid or missing token"} + - 404 Not Found + {"error": "No sync record found for userId=??"} + - 500 Internal Server Error + {"error": "Database unavailable"} + +### POST + Purpose: To record new sync timestamp of a device. + +### URL & Method: + POST /sync/update + +### Parameter + - userID: int + - syncedAt: string + +### Success Response (201 Created) + { + "userId": ???, + "lastSyncedAt": "????-??-??T??:??:??Z" + } + +### Error Response + - 400 Bad Request + {"error": "Malformed JSON or missing fields"} + - 401 Unauthorized + {"error": "Invalid or missing token"} + - 500 Internal Server Error + {"error": "Failed to write to database"} + +## 4. Data Models + - userId: int + - lastSyncedAt: string + +## 5. Error Handling + - Always check the HTTP status code first. + - Inspect error field in response JSON for details. + - Common remedies: + 401 β†’ Refresh or correct your token + 400 β†’ Validate your payload structure and fields + 404 β†’ Ensure userId exists or call POST first + 500 β†’ Retry after a brief pause; contact support if persistent \ No newline at end of file diff --git a/instance/goals_backup.db b/instance/goals_backup.db new file mode 100644 index 0000000..bad4a7a Binary files /dev/null and b/instance/goals_backup.db differ diff --git a/migrations/README b/migrations/README new file mode 100644 index 0000000..0e04844 --- /dev/null +++ b/migrations/README @@ -0,0 +1 @@ +Single-database configuration for Flask. diff --git a/migrations/alembic.ini b/migrations/alembic.ini new file mode 100644 index 0000000..ec9d45c --- /dev/null +++ b/migrations/alembic.ini @@ -0,0 +1,50 @@ +# A generic, single database configuration. + +[alembic] +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic,flask_migrate + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[logger_flask_migrate] +level = INFO +handlers = +qualname = flask_migrate + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/migrations/env.py b/migrations/env.py new file mode 100644 index 0000000..4c97092 --- /dev/null +++ b/migrations/env.py @@ -0,0 +1,113 @@ +import logging +from logging.config import fileConfig + +from flask import current_app + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) +logger = logging.getLogger('alembic.env') + + +def get_engine(): + try: + # this works with Flask-SQLAlchemy<3 and Alchemical + return current_app.extensions['migrate'].db.get_engine() + except (TypeError, AttributeError): + # this works with Flask-SQLAlchemy>=3 + return current_app.extensions['migrate'].db.engine + + +def get_engine_url(): + try: + return get_engine().url.render_as_string(hide_password=False).replace( + '%', '%%') + except AttributeError: + return str(get_engine().url).replace('%', '%%') + + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +config.set_main_option('sqlalchemy.url', get_engine_url()) +target_db = current_app.extensions['migrate'].db + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def get_metadata(): + if hasattr(target_db, 'metadatas'): + return target_db.metadatas[None] + return target_db.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, target_metadata=get_metadata(), literal_binds=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # this callback is used to prevent an auto-migration from being generated + # when there are no changes to the schema + # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html + def process_revision_directives(context, revision, directives): + if getattr(config.cmd_opts, 'autogenerate', False): + script = directives[0] + if script.upgrade_ops.is_empty(): + directives[:] = [] + logger.info('No changes in schema detected.') + + conf_args = current_app.extensions['migrate'].configure_args + if conf_args.get("process_revision_directives") is None: + conf_args["process_revision_directives"] = process_revision_directives + + connectable = get_engine() + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=get_metadata(), + **conf_args + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/migrations/script.py.mako b/migrations/script.py.mako new file mode 100644 index 0000000..2c01563 --- /dev/null +++ b/migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/models/user.py b/models/user.py index 78f4e21..196fd85 100644 --- a/models/user.py +++ b/models/user.py @@ -1,4 +1,5 @@ from models import db +from datetime import datetime, timezone class UserProfile(db.Model): __tablename__ = 'user_profile' diff --git a/tests/sync_test.py b/tests/sync_test.py new file mode 100644 index 0000000..7008204 --- /dev/null +++ b/tests/sync_test.py @@ -0,0 +1,45 @@ +import json +from datetime import datetime +import pytest + +API = "/api/synced" + +def test_update_missing_id(client): + resp = client.post(f"{API}/update", json={}) + assert resp.status_code == 400 + assert "Missing user_id" in resp.get_data(as_text=True) + +def test_update_not_found(client): + resp = client.post(f"{API}/update", json={"user_id": 999}) + assert resp.status_code == 404 + assert "User not found" in resp.get_data(as_text=True) + +def test_update_success(client, sample_user): + resp = client.post(f"{API}/update", json={"user_id": sample_user.id}) + data = resp.get_json() + assert resp.status_code == 200 + assert data["user_id"] == str(sample_user.id) + + dt = datetime.strptime(data["last_synced"], "%Y-%m-%dT%H:%M:%SZ") + assert abs((dt - datetime.utcnow()).total_seconds()) < 5 + +def test_last_missing_id(client): + resp = client.get(f"{API}/last") + assert resp.status_code == 400 + assert "Missing user_id" in resp.get_data(as_text=True) + +def test_last_not_found(client): + resp = client.get(f"{API}/last?user_id=999") + assert resp.status_code == 404 + assert "User not found" in resp.get_data(as_text=True) + +def test_last_success(client, sample_user): + # first set a sync + client.post(f"{API}/update", json={"user_id": sample_user.id}) + # then retrieve + resp = client.get(f"{API}/last?user_id={sample_user.id}") + data = resp.get_json() + assert resp.status_code == 200 + # the last_synced we get back matches the DB + assert data["user_id"] == str(sample_user.id) + assert "last_synced" in data