diff --git a/.babelrc b/.babelrc deleted file mode 100644 index c13c5f6..0000000 --- a/.babelrc +++ /dev/null @@ -1,3 +0,0 @@ -{ - "presets": ["es2015"] -} diff --git a/.cursor b/.cursor new file mode 120000 index 0000000..0d3d27e --- /dev/null +++ b/.cursor @@ -0,0 +1 @@ +ai \ No newline at end of file diff --git a/.eslintignore b/.eslintignore deleted file mode 100644 index 57af935..0000000 --- a/.eslintignore +++ /dev/null @@ -1,4 +0,0 @@ -node_modules -coverage -build -template diff --git a/.eslintrc b/.eslintrc deleted file mode 100644 index 010c21f..0000000 --- a/.eslintrc +++ /dev/null @@ -1,190 +0,0 @@ -{ - "parser": "babel-eslint", - - "env": { - "browser": true, - "node": true, - "es6": true - }, - - "ecmaFeatures": { - "arrowFunctions": true, - "binaryLiterals": true, - "blockBindings": true, - "classes": false, - "defaultParams": true, - "destructuring": true, - "forOf": true, - "generators": true, - "modules": true, - "objectLiteralComputedProperties": true, - "objectLiteralDuplicateProperties": true, - "objectLiteralShorthandMethods": true, - "objectLiteralShorthandProperties": true, - "octalLiterals": true, - "regexUFlag": true, - "regexYFlag": true, - "spread": true, - "superInFunctions": false, - "templateStrings": true, - "unicodeCodePointEscapes": true, - "globalReturn": true, - "jsx": true - }, - - "rules": { - "block-scoped-var": [0], - "brace-style": [2, "1tbs", { "allowSingleLine": true }], - "camelcase": [0], - "comma-dangle": [2, "never"], - "comma-spacing": [2], - "comma-style": [2, "last"], - "complexity": [0, 11], - "consistent-return": [2], - "consistent-this": [0, "that"], - "curly": [2, "multi-line"], - "default-case": [2], - "dot-notation": [2, { "allowKeywords": true }], - "eol-last": [2], - "eqeqeq": [2], - "func-names": [0], - "func-style": [0, "declaration"], - "generator-star-spacing": [2, "after"], - "guard-for-in": [0], - "handle-callback-err": [0], - "key-spacing": [2, { "beforeColon": false, "afterColon": true }], - "quotes": [2, "single", "avoid-escape"], - "max-depth": [0, 4], - "max-len": [0, 80, 4], - "max-nested-callbacks": [0, 2], - "max-params": [0, 3], - "max-statements": [0, 10], - "new-parens": [2], - "new-cap": [0], - "newline-after-var": [0], - "no-alert": [2], - "no-array-constructor": [2], - "no-bitwise": [0], - "no-caller": [2], - "no-catch-shadow": [2], - "no-cond-assign": [2], - "no-console": [0], - "no-constant-condition": [1], - "no-continue": [2], - "no-control-regex": [2], - "no-debugger": [2], - "no-delete-var": [2], - "no-div-regex": [0], - "no-dupe-args": [2], - "no-dupe-keys": [2], - "no-duplicate-case": [2], - "no-else-return": [0], - "no-empty": [2], - "no-empty-character-class": [2], - "no-empty-label": [2], - "no-eq-null": [0], - "no-eval": [2], - "no-ex-assign": [2], - "no-extend-native": [1], - "no-extra-bind": [2], - "no-extra-boolean-cast": [2], - "no-extra-semi": [1], - "no-fallthrough": [2], - "no-floating-decimal": [2], - "no-func-assign": [2], - "no-implied-eval": [2], - "no-inline-comments": [0], - "no-inner-declarations": [2, "functions"], - "no-invalid-regexp": [2], - "no-irregular-whitespace": [2], - "no-iterator": [2], - "no-label-var": [2], - "no-labels": [2], - "no-lone-blocks": [2], - "no-lonely-if": [2], - "no-loop-func": [2], - "no-mixed-requires": [0, false], - "no-mixed-spaces-and-tabs": [2, false], - "no-multi-spaces": [2], - "no-multi-str": [2], - "no-multiple-empty-lines": [2, { "max": 2 }], - "no-native-reassign": [1], - "no-negated-in-lhs": [2], - "no-nested-ternary": [0], - "no-new": [2], - "no-new-func": [2], - "no-new-object": [2], - "no-new-require": [0], - "no-new-wrappers": [2], - "no-obj-calls": [2], - "no-octal": [2], - "no-octal-escape": [2], - "no-param-reassign": [2], - "no-path-concat": [0], - "no-plusplus": [0], - "no-process-env": [0], - "no-process-exit": [2], - "no-proto": [2], - "no-redeclare": [2], - "no-regex-spaces": [2], - "no-reserved-keys": [0], - "no-restricted-modules": [0], - "no-return-assign": [2], - "no-script-url": [2], - "no-self-compare": [0], - "no-sequences": [2], - "no-shadow": [2], - "no-shadow-restricted-names": [2], - "no-spaced-func": [2], - "no-sparse-arrays": [2], - "no-sync": [0], - "no-ternary": [0], - "no-throw-literal": [2], - "no-trailing-spaces": [2], - "no-undef": [2], - "no-undef-init": [2], - "no-undefined": [0], - "no-underscore-dangle": [2], - "no-unreachable": [2], - "no-unused-expressions": [2], - "no-unused-vars": [1, { "vars": "all", "args": "after-used" }], - "no-use-before-define": [2], - "no-void": [0], - "no-warning-comments": [0, { "terms": ["todo", "fixme", "xxx"], "location": "start" }], - "no-with": [2], - "no-extra-parens": [0], - "one-var": [0], - "operator-assignment": [0, "always"], - "operator-linebreak": [2, "after"], - "padded-blocks": [0], - "quote-props": [0], - "radix": [0], - "semi": [2], - "semi-spacing": [2, { "before": false, "after": true }], - "sort-vars": [0], - "space-after-keywords": [2, "always"], - "space-before-function-paren": [2, { "anonymous": "always", "named": "always" }], - "space-before-blocks": [0, "always"], - "space-in-brackets": [0, "never", { - "singleValue": true, - "arraysInArrays": false, - "arraysInObjects": false, - "objectsInArrays": true, - "objectsInObjects": true, - "propertyName": false - }], - "space-in-parens": [2, "never"], - "space-infix-ops": [2], - "space-return-throw-case": [2], - "space-unary-ops": [2, { "words": true, "nonwords": false }], - "spaced-line-comment": [0, "always"], - "strict": [2, "never"], - "use-isnan": [2], - "valid-jsdoc": [0], - "valid-typeof": [2], - "vars-on-top": [0], - "wrap-iife": [2], - "wrap-regex": [2], - "yoda": [2, "never", { "exceptRange": true }] - } -} diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 0d292a2..0000000 --- a/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Logs -logs -*.log - -# Runtime data -pids -*.pid -*.seed - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage - -# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (http://nodejs.org/api/addons.html) -build - -# Dependency directory -# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git -node_modules - diff --git a/.npmignore b/.npmignore deleted file mode 100644 index 4aa3726..0000000 --- a/.npmignore +++ /dev/null @@ -1,7 +0,0 @@ -/lib -/coverage -/test -/.eslint* -/.gitignore -/.npmignore -/circle.yml diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 380eb4e..0000000 --- a/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 cloverfield-tools - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/README.md b/README.md index 1cdc271..321267e 100644 --- a/README.md +++ b/README.md @@ -1,66 +1,961 @@ -# cf-package [![Circle CI](https://circleci.com/gh/cloverfield-tools/cf-package/tree/master.svg?style=svg)](https://circleci.com/gh/cloverfield-tools/cf-package/tree/master) +# Cloverfield -Cloverfield Package Scaffold +**A general-purpose multimodal foundation model for creative professionals, developers, and interactive AI experiences.** -# Status - Deprecated +## Vision -This package is not being actively maintained. +**Build a 6B parameter model that creative professionals actually want to use.** -## What does this do? +Cloverfield enables: +- **Creative editing**: "Give Mom a funny Christmas sweater in this family photo" +- **Interactive storytelling**: Generate and explore virtual worlds in real-time +- **Conversational AI avatars**: Natural multi-turn dialogue with visual characters +- **Code generation with visual context**: "Add a glowing particle effect to this Unity scene" +- **Video editing with natural language**: "Make this sunset more dramatic and add ambient ocean sounds" +- **Image generation that obeys physics**: Objects have proper weight, lighting, and material properties -It scaffolds a new module with the following features: +**Long-term vision**: Real-time generation of interactive virtual worlds—think holodeck, but on your laptop. Explore AI-generated environments that respond to your actions, maintain physical consistency, and evolve through conversation. -* ES6 with Babel -* Lint with ESLint -* Tape tests with coverage report -* Dependency security audits with nsp -* Ensure dependencies are properly declared in package.json -* Git precommit hook enforces quality checks on commit -* CI config (Travis, CircleCI) -* [A contributing guide](template/docs/contributing/index.md) -* An assortment of useful npm scripts +### How We Get There +**Physics-grounded training** (Unreal Engine simulations) teaches the model how reality works—forces, materials, lighting, causality. This isn't about physics education; it's about ensuring generated content is **physically plausible**: +- Shadows match light sources +- Objects don't float or clip through surfaces +- Materials behave realistically +- Motion follows natural dynamics -## Getting Started +Combined with **quality over scale** philosophy (inspired by Phi-1/Phi-2): curated synthetic data from Unreal Engine beats massive web scrapes for learning world models. -```sh -npm install -g cf-package -mkdir project -cd project -cf-package -npm install -npm test +## Training Philosophy: Quality Over Scale + +### Inspired by "Textbooks Are All You Need" + +Microsoft Research's Phi models (1.3B–2.7B parameters) demonstrated that **small models trained on high-quality synthetic data can match models 25× larger** on reasoning tasks. + +Key principles: +- **Textbook quality**: Clear, self-contained, instructive content +- **Synthetic curation**: GPT-generated exercises and explanations +- **Data > Scale**: Phi-1's 7B tokens of curated data outperformed larger models on billions of web tokens + +### Our Adaptation: Physics-Grounded World Models + +We extend this philosophy to multimodal learning: + +**Synthetic training data from Unreal Engine** combining: +- Natural language descriptions and dialogue +- Physically accurate 3D scenes (materials, lighting, dynamics) +- Synchronized audio (spatial sound, realistic interactions) +- Ground-truth metadata (object properties, camera pose, forces) + +**Why Unreal Engine synthetic data?** +- **Perfect ground truth**: Every pixel has known 3D position, material, lighting +- **Physical consistency**: Motion, collisions, and materials follow real-world physics +- **Controllable diversity**: Generate unlimited variations of any scenario +- **Naturally multimodal**: Engine timesteps synchronize video, audio, and text +- **Quality over quantity**: Curated synthetic scenes > noisy web scrapes + +This teaches the model a **world model**—how reality works—enabling it to generate content that's not just visually convincing but physically plausible. + +--- + +## Core Architecture + +### Primary Objective + +**Unbounded streaming transformer** with perfect token retrieval and efficient text encoding: + +1. **SPCE** (Spectral Phase-Coherent Encoding) — continuous phase field coordinates +2. **Visual text encoding** — 10× compression following DeepSeek OCR lessons +3. **Integrated retrieval** — perfect recall of exact input tokens, unbounded context +4. **Windowed attention** — efficient O(w²) local processing +5. **SSM carry** — cross-window state persistence +6. **Keyframe anchoring** — periodic drift correction + +### 1. SPCE — Spectral Phase-Coherent Encoding + +*(Pronounced "space" - mnemonic and literal: encodes space and time)* + +**Not a positional encoding—a field embedding.** SPCE is a continuous spectral phase field that spans space and time. Each token lives as a point on a helical manifold in this space. Every modality—text, audio, video—shares the same spectral coordinate system, so cross-modal alignment is natural. + +``` +θ = ω·t + k·r + φ₀ +``` + +where: +- **ω**: Spectral frequency palette (12-24 log-spaced atoms: 10⁻⁴ to 10³ Hz) +- **t**: Absolute continuous time +- **k**: Spatial frequency vector (k_x, k_y, k_z for 3D coherence) +- **r**: Spatial coordinates (x, y, z) +- **φ₀**: Phase offset + +**SPCE defines how every modality coexists in one continuous spectral phase field.** Each token's coordinates are not a scalar "position" but a point in phase space that evolves with time, space, and energy. Traditional positional encodings break down after thousands of tokens because they're one-dimensional and periodic. SPCE carries continuous phase and frequency, behaving like a state space for all modalities—it handles motion, rhythm, and cross-modal phase alignment as the model's world clock and coordinate frame. + +**Crystal lattice spiral geometry:** + +In complex space, e^(iθ) traces spirals at different pitches: +- **Low frequencies** (ω ≈ 10⁻⁴): Wide spirals → narrative arc, scene coherence (hours) +- **Mid frequencies** (ω ≈ 1): Medium spirals → action sequences, sentences (seconds) +- **High frequencies** (ω ≈ 10³): Tight spirals → frame details, phonemes (milliseconds) + +Together, the log-spaced ω atoms form a **quasi-crystalline lattice in phase space**. All modalities at time t, position r map to the same lattice coordinates—audio beats, video frames, and text tokens share identical phase, providing geometric cross-modal alignment through crystalline structure. + +**Multi-scale temporal decomposition:** + +Per-head softmax gates let attention heads explicitly select temporal scales: +- Attention head attending to low-ω: learns long-range dependencies (narrative flow) +- Attention head attending to high-ω: learns short-range patterns (local details) +- Mixed-ω heads: learn cross-scale relationships + +This Fourier-like decomposition of temporal structure is fundamentally different from position-based encodings—it's geometric embedding into a multi-scale phase manifold. + +**Implementation:** +- **Complex exponential basis**: e^(iωt) where ω spans learned spectral distribution +- **Shared spectral palette**: 12–24 log-spaced ω atoms (global pool spanning temporal scales) +- **Per-head ω distributions**: Each attention head learns softmax-weighted mixture of ω atoms → multi-scale temporal sensitivity +- **Spatial frequencies**: k_x, k_y, k_z encode 3D object coordinates (objects share spatial lattice) +- **Absolute time evaluation**: `θ = ω·t + k·r` computed directly (no cumulative drift) +- **Phase continuity across windows**: θ_{t+Δt} = θ_t + ω·Δt maintains smooth evolution +- **Re-normalization**: Periodic low-frequency re-anchoring prevents long-run precision drift + +**Key properties:** + +1. **Temporal continuity**: Phase unwrapped across infinite timesteps → unbounded streaming without drift +2. **Spatial coherence**: Spatial frequencies e^(i(k_x·x + k_y·y + k_z·z)) → objects in 3D share coordinate system +3. **Spectral control**: ω atoms are learnable → model adapts to slow/fast phenomena automatically +4. **Cross-modal alignment**: Audio, video, text at (t, r) → identical phase coordinates → beats, motion, language sync +5. **SSM coupling**: Low-frequency ω bands persist in SSM carry (narrative state), high-frequency bands refresh with attention (local details) +6. **Window agnostic**: Temporal relationships (∆θ = ω·∆t) invariant to window size → variable window training works +7. **Multi-view ready**: SPCE becomes shared coordinate frame for multiple cameras—spatial frequencies encode same 3D world from different viewpoints + +### 2. Visual Text Encoding (DeepSeek OCR Approach) + +**Lesson from DeepSeek:** Text as compressed visual tokens achieves 10× compression with 97% fidelity. + +**Architecture:** +``` +Text → Rendered Image → Windowed SAM (80M) → CLIP Global (300M) → 16× Compression → Visual Tokens +``` + +**Unified representation:** +- Video patches, audio spectrograms, and text ALL encoded as visual tokens +- Single modality-agnostic transformer processes everything +- Text compression: 5000 text tokens → 500 visual tokens (10×) + +**Advantages:** +- Maintains semantic structure (layout, equations, formatting) +- Efficient: Fewer tokens without information loss +- Unified: No separate text vs vision pathways + +### 3. Integrated Retrieval Architecture + +**Three-tier memory for unbounded context:** + +#### **Tier 1: Windowed Attention (Local Context)** +``` +Window size: 4K-32K tokens (task-dependent) +- Video generation (standard): 4K-8K tokens +- Video generation (ultra quality): 16K-32K tokens +- Code generation: ~8K tokens +- NLU/conversational: 8K-16K tokens + +Complexity: O(w²) scales with window choice +- 4K window: 16M ops/layer +- 16K window: 256M ops/layer +- 32K window: 1B ops/layer (still 160× faster than standard attention @ 400K tokens) + +Memory: O(w) - constant per window regardless of total sequence length +``` + +Efficient local attention with SPCE phase rotation. Full O(w²) cross-attention within the window—every token can attend to every other token for complete semantic understanding. + +**Why small windows work:** The three-tier retrieval architecture + SSM carry means we don't need massive windows for long-range understanding: +- **Tier 1 (windowed attention)**: Handles immediate context with full O(w²) cross-attention +- **Tier 2 (RETRO retrieval)**: Pulls in relevant examples (scenes, dialogue, code patterns) +- **Tier 3 (kNN retrieval)**: Perfect recall of exact tokens from unbounded history +- **SSM carry**: Low-frequency phase coherence maintains narrative/conversational state across windows + +**Result: Small finite windows (4K-32K) + retrieval + SSM carry = unbounded effective context** with constant compute cost. + +#### **Tier 2: Chunked Knowledge Retrieval (RETRO-Style)** +``` +Chunk size: 64 tokens +Retrieved neighbors: k=5 per chunk +Database: Multimodal knowledge base (millions of examples) +``` + +For every 64-token chunk, retrieve k=5 similar examples from pre-built database: +- Similar visual scenes (lighting, composition, materials) +- Analogous dialogue patterns (conversation styles, emotional tone) +- Related code patterns (Unity scripts, shader code, game logic) +- Common editing operations (color grading, object manipulation) + +Cross-attention from window tokens to retrieved knowledge chunks. + +#### **Tier 3: Exact Token Retrieval (kNN Memory)** +``` +Search: k=8 nearest neighbors per query +Index: FAISS approximate nearest neighbors +Keys: Phase-rotated token embeddings (SPCE-encoded) +``` + +**Perfect recall:** Retrieve exact input tokens from arbitrarily long history via kNN search. + +**Phase-aware similarity:** Tokens with similar phase patterns (temporal structure) retrieved together. + +**Unbounded:** No context limit—memory grows with conversation/video length. + +**Example:** +``` +t=0: User: "Generate a cozy coffee shop scene with warm lighting" + → Model generates image with specific lighting setup +t=5000: User: "Now add a character sitting by the window, same lighting style" + → kNN retrieves exact tokens from t=0 (lighting parameters, color palette) + → SSM carry maintains scene state (camera pose, style consistency) + → RETRO retrieves similar coffee shop scenes with characters + → Perfect long-range reference despite finite window +``` + +### The Architecture's Key Advantage: Unbounded Effective Context + +**Small finite windows (4K-32K tokens) + Three-tier retrieval + SSM carry = Unbounded effective context** + +- **Constant compute cost**: O(w²) regardless of conversation/video length +- **Perfect recall**: kNN retrieval finds exact tokens from unlimited history +- **Knowledge integration**: RETRO pulls relevant physics examples +- **State persistence**: SSM carry maintains conversational/narrative flow +- **No context limit**: Process hours of video with constant memory footprint + +This is the architectural moat: **unbounded understanding with bounded compute**. + +### 4. Spectral SSM Carry + +**Diagonal, low-rank state space model** for cross-window state: + +``` +x_next = exp(-a·Δt) ⊙ (b·x + gain·u) +``` + +**Properties:** +- Eigenvalues constrained to unit circle (bounded memory) +- Stores slow harmonics (low frequencies) and entity slots (objects, speakers, topics) +- Updated once per window shift +- Maintains conversational/narrative state across windows + +**Coupling with SPCE:** +- High frequencies (ω ≈ 10³): Handled by attention (fast refresh) +- Low frequencies (ω ≈ 10⁻⁴): Handled by SSM carry (stable persistence) + +### 5. Keyframes + +**Periodic anchors at fixed interval T seconds** (e.g., every 2-5 seconds): + +``` +[KEYFRAME] <θ̂_anchor> +``` + +**Purpose:** +- Refresh slow phase anchors (`θ̂`) +- Store camera pose, object IDs, lighting state +- Enforce drift penalty (loss term: `|θ̂_predicted - ω·t|`) +- Enable safe rewind/resume mid-stream +- Anchor points for retrieval index + +**During training**: Keyframes provide supervision for SSM carry +**During inference**: Small corrections at keyframes prevent long-tail drift + +### 6. Stateful Inference with Adaptive Capacity + +The inference engine maintains state across conversations and supports **low-rank Hebbian adaptation**—dynamically expanding effective attention capacity through query-focused adaptation on current context. This enables efficient fine-tuning to specific conversations or domains during inference without full retraining. + +*Implementation details deferred to inference optimization phase.* + +--- + +## Training Pipeline + +### Curriculum + +**Stage 1: Text-to-image with dialogue** (static scenes, foundation for visual understanding) +- Train 6B parameters from scratch with SPCE architecture +- Random initialization, variable window training (4K-32K randomly sampled per batch) +- Unreal Engine rendered scenes with natural language descriptions +- Synchronized audio narration and ambient sound +- Focus: Cross-modal alignment, visual grounding, material/lighting understanding, NLU bootstrapping + +**Stage 2: Dynamic scenes and interaction** (temporal understanding) +- Unreal Engine: Character movement, object interactions, environmental changes +- Camera motion, lighting transitions, physics-based dynamics +- Instruction-following: "Make the character wave" → animation generation +- Focus: Temporal coherence, action understanding, world model learning + +**Stage 3: Conversational and editing tasks** (instruction following) +- Multi-turn dialogue with visual references ("add a hat to this character") +- Instruct-edit examples ("make this sunset more dramatic") +- Code generation in visual contexts ("add particle effects to this scene") +- Creative writing with scene generation +- Focus: Instruction adherence, iterative refinement, creative control + +**Stage 4: Real-world data augmentation** (generalization) +- Mix with curated web data (licensed images, videos, conversations) +- Transfer learning from synthetic to real-world domains +- Fine-tuning on creative professional workflows + +### Input Structure + +Tokens are packed in **tick order** (1 tick ≈ 1/960 ms): + +``` +[TICK_0000] [KEYFRAME] +[TICK_0001] +[TICK_0002] +[TICK_0003] +... +[TICK_1920] [KEYFRAME] +[TICK_1921] "add_particle_effect" +... +``` + +**Modality-specific encodings:** +- **Video**: Patch tokens @ 30fps → ticks with frame offset +- **Audio**: Waveform samples @ 16kHz → ticks with zero offset +- **Text**: Dialogue/narration/instructions with timestamps → tick-aligned +- **Scene metadata**: Camera pose, lighting, materials, object properties → tick-aligned + +**Key tokens:** +- `[TICK]`: Hard anchor for phase (emitted every N ticks) +- `[KEYFRAME]`: Store state, refresh anchors +- ``, ``, ``: 3D spatial grounding +- ``, ``: Visual properties for consistent generation +- ``: Edit/generation commands + +--- + +## Simplified Implementation + +### SPCE Core + +**1. Shared ω atoms** (12–24 per pool, 2 pools: low + high freq) +```python +ω_low = log_uniform(1e-4, 1) # Slow harmonics +ω_high = log_uniform(1, 1e3) # Fast dynamics +``` + +**2. Per-head gates** (softmax-constrained mixture) +```python +ω_head = Σₖ softmax(α_head)[k] · ω_k +``` + +**3. Absolute phase** +```python +θ = ω_head · t # Direct computation, no drift +rotary_apply(Q, K, θ) # SPCE phase rotation (replaces RoPE) +``` + +### SSM Carry + +**Diagonal state space** (closed-form update) +```python +x_carry = exp(-a·Δt) * (b·x_prev + gain·input) +``` + +**Constraints:** +- `a` constrained to unit circle eigenvalues +- Low-rank projection for entity slots +- Updated once per window shift (not per token) + +### Keyframes + +**Scheduled emission** every `T` seconds: +```python +if t % keyframe_interval == 0: + emit [KEYFRAME] + store cam_pose, object_ids, θ̂_anchor + loss += |θ̂_predicted - ω·t| # Drift penalty +``` + +--- + +## Success Metrics + +**What matters for creative professionals and interactive AI:** + +### Instruction Following & Editing +- **Edit accuracy**: "Give Mom a Christmas sweater" → correct object identification + appropriate generation +- **Style consistency**: Maintaining lighting, color palette, artistic style across edits +- **Multi-turn coherence**: Iterative refinements maintain previous edits +- **Spatial understanding**: "Add character by the window" → correct spatial placement + +### Generation Quality +- **Physical plausibility**: Generated content obeys real-world constraints (lighting, shadows, materials, physics) +- **Visual fidelity**: FID scores on generated images/video comparable to SOTA in weight class +- **Audio-visual sync**: Perfect synchronization between generated audio and visual content +- **Temporal coherence**: No flickering, object persistence, smooth motion + +### Conversational AI +- **Multi-turn dialogue**: Natural conversations with AI avatars maintaining character/context +- **Visual grounding**: "Show me what you mean" → generates relevant visual content +- **Long-context reasoning**: Maintains conversation state over hours (100K+ tokens) +- **Personality consistency**: AI character maintains voice, style, knowledge across sessions + +### Interactive World Generation +- **Real-time generation**: Latency <500ms for interactive responses in generated environments +- **World consistency**: Generated scenes maintain spatial layout, object persistence, physics +- **Exploration capability**: User can navigate and interact with AI-generated spaces +- **Dynamic response**: Environment reacts to user actions with physical plausibility + +### Retrieval Performance +- **kNN recall accuracy**: Exact token retrieval from 100K+ token history +- **Retrieval latency**: <10ms for k=8 neighbors via FAISS +- **Phase-aware similarity**: Temporal pattern matching accuracy +- **Knowledge retrieval relevance**: Chunked physics DB retrieval precision + +### Computational Efficiency +- **Throughput** (tokens/sec) with fused kernels +- **Memory footprint** during unbounded streaming (should be O(w) constant) +- **Keyframe overhead** (should be <5% of total compute) +- **Retrieval overhead** (RETRO + kNN, should be <10% of total compute) + +--- + +## Implementation Roadmap + +### Phase 1: SPCE Validation +- Implement shared ω atoms + per-head gates in MLX +- Compare SPCE vs RoPE on audio-only task (music beat prediction) +- **Success criterion**: SPCE should exceed RoPE due to higher information density (continuous time + shared cross-modal frequencies + explicit phase offsets) + +### Phase 2: Base Model Training (Stage 1 Curriculum) +- Train 6B parameters from scratch on M4 Max 128GB +- Random initialization, SPCE + windowed attention from day 1 +- Variable window training (4K-32K randomly sampled per batch) +- ~10B tokens of curated UE synthetic data (text-to-image with dialogue) +- **Training time estimate**: 3-6 months continuous on M4 Max +- **Success criterion**: Text-to-image generation matching Stable Diffusion quality, basic NLU capability + +### Phase 3: Temporal & Editing Capabilities (Stages 2-3) +- Continue training with video sequences and instruction-edit examples +- **Training time**: Additional 2-4 months +- **Success criterion**: 8-second video generation + instruct-edit ("Give Mom a Christmas sweater") + +### Phase 4: Conversational AI & Interactive Worlds (Stage 4) +- Add conversational training data and real-world augmentation +- **Training time**: Additional 2-4 months +- **Success criterion**: Natural multi-turn dialogue with AI avatars + navigable generated environments (holodeck prototype) + +**Total training time: 7-14 months on M4 Max 128GB** (continuous, assuming quality synthetic data pipeline established) + +--- + +## Computational Efficiency Analysis + +### TL;DR: Competitive with Mamba, Much Better Than Standard Transformers + +**For streaming 4-hour video (≈400K tokens):** + +| Architecture | Time Complexity | Memory | Throughput | Notes | +|--------------|----------------|---------|------------|-------| +| Standard Transformer | O(n²) = O(160B) | O(n²) | 1× baseline | Quadratic wall | +| FlashAttention | O(n²) = O(160B) | O(n) | 2-3× | Memory-efficient, still quadratic | +| Mamba | O(n) = O(400K) | O(1) | 5× | Linear time, constant memory | +| **Cloverfield** | **O(w²)** | **O(w)** | **4-6×** | **Flexible window (4K-32K) + SSM carry** | + +*n = total sequence length, w = window size (4K-32K depending on task)* + +--- + +### Detailed Breakdown + +#### 1. Visual Text Compression (DeepSeek OCR) + +**Text tokenization overhead:** +- Traditional BPE: 5000 text tokens for typical page +- DeepSeek visual encoding: 500 visual tokens (10× compression) +- Encoding cost: Windowed SAM (80M) + CLIP (300M) = ~400M ops per page +- **Amortized**: One-time encoding, 97% fidelity maintained + +**Net benefit:** +- 90% reduction in sequence length for text-heavy content +- Unified visual representation simplifies architecture +- Preserves semantic structure (equations, layout, formatting) + +#### 2. SPCE Overhead vs RoPE + +**RoPE baseline:** +- Rotary embedding: ~1-3% overhead (negligible in practice) +- Applied per-layer with fused CUDA kernels +- Dominated by matrix multiplies, not position encoding + +**SPCE overhead:** +- Absolute phase computation: `θ = ω_head · t` (same cost as RoPE) +- Per-head gate: Softmax over K=12-24 atoms (once per forward pass) + - Cost: O(heads × K) ≈ 32 heads × 16 atoms = 512 ops + - Negligible compared to attention (millions of ops) +- Complex rotary apply: Same as RoPE (fused kernel) + +**Verdict:** SPCE ≈ RoPE overhead (1-3%), potentially slightly better since ω is per-head, not per-token. + +**SPCE information density advantage:** +- Continuous absolute time (not discrete positions) → finer temporal resolution +- Shared ω palette across modalities → built-in cross-modal phase coherence +- Explicit phase offsets → structural alignment without learning +- Result: SPCE has MORE information than RoPE while maintaining similar computational cost + +#### 3. Windowed Attention + Integrated Retrieval vs Full Attention + +**Standard attention over full context:** +``` +Attention(Q, K, V) where K, V ∈ ℝⁿˣᵈ +Cost: O(n² · d) per layer +Memory: O(n² + n · d) for attention matrix + KV cache ``` -### Input via prompt: +**Cloverfield windowed attention + retrieval + SSM:** +``` +Window attention: Q, K, V ∈ ℝʷˣᵈ where w << n +Cost: O(w² · d) per layer (constant w) + +RETRO retrieval: k=5 neighbors per 64-token chunk +Cost: O((w/64) × k × d_encode) ≈ O(160 × d_encode) +Memory: O(k × chunk_size × d) = O(5 × 64 × d) (constant!) + +kNN memory: k=8 nearest neighbors per query +Cost: O(w × log(n)) for FAISS search (amortized) +Memory: O(n × d) for full history index (grows with session) + +SSM carry: x_next = exp(-a·Δt) ⊙ (b·x + gain·u) +Cost: O(d_ssm) per window shift (d_ssm ≈ 256-512) +Memory: O(d_ssm) (constant!) -```sh -cf-package -> Your name: My Name -> Your email (will be publicly available, optional): my@email -> Your GitHub public username: mygithub -> Package name: test -> Package description: Test Package +Total per window: O(w² + w·log(n) + d_ssm) ``` -Variable | Prompt -------------------- | --- -user.name | > Your name: -user.email | > Your email (will be publicly available, optional): -user.github | > Your GitHub public username: -package.name | > Package name: -package.description | > Package description: +**Example (4-hour video @ 100 tokens/sec):** +- n = 400,000 tokens (full context) +- Model trained with variable windows (4K-32K), can use any at inference + +**Standard attention:** +- 400K² = 160 billion ops per layer +- 52 GB memory (KV cache) + +**Cloverfield with 16K window** (same model, typical for video understanding): +- Window attention: 16K² = 256 million ops +- RETRO retrieval: 250 chunks × 5 neighbors × encode = ~1.25M ops +- kNN search: 16K × log(400K) ≈ 290K ops (FAISS) +- SSM update: 512 ops +- **Total: ~258M ops per layer (620× reduction!)** +- **Memory: 2.1 GB window + index (25× reduction in active memory)** + +**Cloverfield with 4K window** (same model, real-time holodeck): +- Window attention: 4K² = 16 million ops +- **Total: ~17M ops per layer (9,400× reduction!)** +- **16× faster than 16K window for real-time applications** + +**Key insight:** Retrieval cost is negligible compared to attention savings. + +#### 4. Keyframe Overhead + +**Emission frequency:** +- Keyframe every T seconds (e.g., T=3s) +- At 100 tokens/sec: 1 keyframe per 300 tokens +- Overhead: ~0.33% extra tokens + +**Keyframe operations:** +- Store state: O(d_ssm) = O(256-512) +- Drift penalty loss: O(heads) = O(32) +- Total: <1ms per keyframe (negligible) + +**Verdict:** Keyframe overhead <1% of total compute. + +#### 4. Streaming Comparison + +**Scenario: Process 4-hour educational video (400K tokens)** + +**Standard Transformer (e.g., GPT-4, Claude):** +- Must split into chunks (context window limit: 128K-1M tokens) +- Each chunk processed independently: O(chunk_size²) +- Total: O(n_chunks × chunk_size²) +- KV cache grows with context: O(n · d) +- **Problem:** Can't maintain state across hours without reprocessing + +**FlashAttention Transformer:** +- IO-aware, memory-efficient, but still O(n²) +- Reduces HBM accesses by 10-20×, speeds up 2-3× +- Still quadratic: 400K² = 160B ops per layer +- **Problem:** Throughput degrades as sequence grows -### Quick input +**Mamba:** +- Linear time: O(n) = 400K ops per layer +- Constant memory: O(1) state +- 5× throughput vs standard attention +- **Limitation:** Weak at in-context learning (copying tasks) -Alternatively it is possible to input every generator variable as CLI option: +**Cloverfield:** +- Variable window training enables flexible inference window choice +- **With 16K window** (video understanding): + - O(w²) = 16K² = 256M ops per layer + - Total: 25 windows × 256M = 6.4B ops + - **25× reduction vs standard attention** +- **With 4K window** (real-time holodeck): + - O(w²) = 4K² = 16M ops per layer + - Total: 100 windows × 16M = 1.6B ops + - **100× reduction vs standard attention** +- SSM carry: O(d_ssm) per window shift = 256 ops (negligible) +- Memory: Constant O(w²) regardless of total sequence length -```sh -cf-package --user.name="My Name" \ - --user.email=my@email \ - --user.github=mygithub \ - --package.name=test \ - --package.description="Test Package" +#### 5. Memory Footprint + +**Standard attention KV cache:** +``` +Memory = n_layers × n_tokens × n_heads × head_dim × 2 (K+V) × bytes_per_param +For 6B model (32 layers, 400K tokens, 32 heads, 128 dim, fp16): += 32 × 400K × 32 × 128 × 2 × 2 bytes += 52 GB just for KV cache! +``` + +**Cloverfield:** +``` +Memory = window_cache + SSM_state +Window cache = n_layers × w × n_heads × head_dim × 2 × 2 + +For 16K window (video understanding): += 32 × 16384 × 32 × 128 × 2 × 2 += 2.1 GB (constant, independent of video length!) + +For 4K window (real-time holodeck): += 32 × 4096 × 32 × 128 × 2 × 2 += 0.5 GB (even faster, lower memory) + +SSM state = n_layers × d_ssm × 2 += 32 × 512 × 2 += 33 KB (negligible!) + +Total: 0.5-2.1 GB depending on window choice (constant per window) ``` -Explore and enjoy! Part of the [Cloverfield project](https://github.com/cloverfield-tools/cloverfield). +**Savings: 52 GB → 0.5-2.1 GB = 25-100× reduction in active memory** + +#### 6. Throughput Estimates + +**Benchmarks (from literature):** +- Standard attention: 1× baseline +- FlashAttention: 2-3× faster than standard +- Mamba: 5× faster than standard +- Hybrid (Mamba-2): 8× faster at inference + +**Cloverfield estimates:** +- Attention on fixed window: Similar to FlashAttention (2-3×) +- SSM carry overhead: Negligible (<5%) +- Keyframe overhead: Negligible (<1%) +- Expected: **4-6× throughput vs standard attention** +- Memory-bound streaming: **No degradation with sequence length** + +#### 7. Training Efficiency + +**Standard multimodal transformer:** +- Batch size limited by memory (full context KV cache) +- Gradient checkpointing required for long sequences +- Slow convergence on long-range dependencies + +**Cloverfield:** +- Fixed memory per window → larger batch sizes possible +- SSM carry trained with closed-form gradients (stable) +- Keyframe supervision provides dense signal for long-range learning +- Expected: **2-3× faster training convergence** on long videos + +--- + +### When Cloverfield Wins + +**✅ Excels at:** +1. **Long-form streaming** (hours of video) + - Constant memory, no context limit + - Standard transformers hit memory wall +2. **Phase-sensitive tasks** (A/V sync, music, speech) + - SPCE naturally captures temporal coherence +3. **Real-time inference** (live video processing) + - Fixed latency per window + - Predictable compute budget +4. **Multi-hour reasoning** (educational lectures) + - SSM carry maintains entity state + - Keyframes provide periodic supervision + +**⚠️ Potentially weaker at:** +1. **Strong copying/in-context learning** + - Mamba's limitation applies here too + - Mitigation: Hybrid attention-SSM (like Mamba-2 Hybrid) +2. **Very short sequences** (<2K tokens) + - Full attention might be fine + - SPCE/SSM overhead not justified +3. **Random access** (jumping around document) + - Sequential processing assumption + - Mitigation: Process multiple streams in parallel + +--- + +### Architecture Comparison Summary + +| Aspect | Standard Transformer | Mamba | Cloverfield | +|--------|---------------------|-------|-------------| +| **Time complexity** | O(n²) | O(n) | O(w² + w·log n) ≈ constant | +| **Memory (active)** | O(n²) → O(n) cache | O(1) | **O(w) = 2-8 GB (constant)** | +| **Memory (total)** | O(n) | O(1) | O(n) for kNN index | +| **Throughput** | 1× | 5× | 4-6× | +| **Context limit** | 128K-1M tokens | Unbounded | Unbounded | +| **Perfect recall** | Full attention | No | Yes (kNN retrieval) | +| **Knowledge retrieval** | No | No | Yes (RETRO-style) | +| **Phase alignment** | Learned | Learned | Built-in (SPCE) | +| **NLU capability** | Excellent | Good | Excellent (full attention in window) | +| **Streaming latency** | High (recompute) | Low (constant) | Low (constant) | +| **Memory @ 400K tokens** | 52 GB active | ~1 GB | **2-8 GB (window-dependent) + index** | +| **Effective context** | Limited by window | Good (SSM) | **Unbounded (retrieval + SSM)** | +| **Multi-hour stability** | Poor (no carry) | Good (SSM) | Excellent (SSM+keyframes+retrieval) | + +--- + +### Implementation Efficiency Notes + +**CUDA kernel priorities:** +1. **Fused SPCE rotary** (biggest impact) + - Compute ω_head from gates + atoms + - Apply complex rotation to Q, K + - Target: Match RoPE performance (1-3% overhead) + +2. **Diagonal SSM update** (second priority) + - Closed-form `exp(-a·Δt) ⊙ ...` + - Float64 accumulation for stability + - Target: <5% overhead per window shift + +3. **Keyframe injection** (low priority) + - Simple token insertion + - Can be done in PyTorch/MLX (not critical path) + +**Expected development time:** +- Triton prototypes: 1-2 weeks +- Optimized CUDA: 2-4 weeks +- Performance validation: 1 week + +--- + +## Why This Can Work + +### ✅ Grounded in Proven Principles + +**"Textbooks Are All You Need"** showed: +- 1.3B model + 7B tokens curated data > 10B+ model on web data +- Quality beats scale for reasoning tasks + +**Cloverfield** extends this to creative multimodal AI: +- Unreal Engine synthetic data = "textbook quality" for world models +- Physics grounding = physically plausible generation (not just visual patterns) +- Continuous phase field = natural cross-modal synchronization +- Streaming architecture = unbounded interactive experiences + +### ✅ Technically Feasible + +**Hardware**: 6B model trainable from scratch on M4 Max 128GB (50-60GB training footprint) + +**Data**: Unreal Engine enables unlimited synthetic training data with perfect ground truth (lighting, materials, physics) + +**Architecture**: Built on proven components (SPCE extends RoPE concepts, RETRO retrieval, Mamba SSM) + +**Training time**: 7-14 months continuous on M4 Max to reach holodeck prototype capability + +### ✅ Unique Advantages + +**vs. GPT-4o/Gemini:** +- Physically plausible generation (shadows, materials, motion obey real-world rules) +- Unbounded streaming context (no context window limits for conversations/world exploration) +- Real-time interactive world generation (local, on-device) +- Automatic cross-modal sync (SPCE phase alignment) + +**vs. Firefly/Midjourney/Runway:** +- Conversational interface with iterative editing +- Physics-grounded consistency (objects behave realistically) +- Multi-turn instruction following with perfect context recall +- Unified model for image + video + audio + code + conversation + +**vs. Academic models:** +- Real implementation focus (not just theoretical) +- Curated data strategy (not web-scale scraping) +- Open-source from day one + +--- + +## Essence + +**Do not integrate noise.** Compute phase from absolute time. Learn a tiny spectral palette and let heads gate it. Share one clock across all modalities. Re-anchor with keyframes. Use physics simulations as textbook-quality training data. The rest is next-token prediction. + +--- + +## Status + +🚧 **Active Development** + +Current focus: SPCE prototype implementation in MLX + +--- + +## References + +### Training Philosophy: Quality Over Scale + +- **Textbooks Are All You Need** (Gunasekar et al., 2023) + Quality over scale for code generation. Phi-1 (1.3B params) with curated synthetic data outperforms larger models. + *arXiv:2306.11644* + +- **Textbooks Are All You Need II** (Li et al., 2023) + Phi-1.5 for natural language reasoning tasks. + *arXiv:2309.05463* + +- **Phi-2** (Microsoft Research, 2023) + 2.7B model matching 25× larger models through data quality. + *Microsoft Research Blog* + +### Unified Multimodal Architectures + +- **Gemini 1.5: Unlocking multimodal understanding** (Gemini Team, Google, 2024) + Joint vision-language transformers with unified multimodal backbone. Direct cross-modal tokenization where image patches, speech spectrograms, and text sequences coexist in the same latent representation. + *arXiv:2403.05530* | [PDF](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf) + +- **Meta-Transformer: A Unified Framework for Multimodal Learning** (Zhang et al., 2023) + Unified tokenizer across 12 modalities with shared token space. Performs multimodal perception without paired training data. + *arXiv:2307.10802* | [Project](https://kxgong.github.io/meta_transformer/) + +- **ImageBind: One Embedding Space To Bind Them All** (Girdhar et al., 2023) + Joint embedding space across 6 modalities (vision, text, audio, depth, thermal, IMU). + *CVPR 2023* | *arXiv:2305.05665* + +- **Chameleon: Mixed-Modal Early-Fusion Foundation Models** (Meta AI, 2024) + Early-fusion token-based architecture treating images and text as unified vocabulary. + *arXiv:2405.09818* + +- **UniForm: Unified Diffusion Transformer for Audio-Video** (Zhao et al., 2025) + Unified latent space for audio and video with single diffusion process. + *arXiv:2502.03897* | [Project](https://uniform-t2av.github.io/) + +### Visual Text Encoding & Compression + +- **DeepSeek-OCR: Context Optical Compression** (DeepSeek AI, 2024) + 10× text compression via visual token encoding with 97% fidelity. Windowed SAM (80M) for local detail + CLIP (300M) for global layout + 16× convolutional compression. Unified vision-language architecture processing text as images. + *arXiv:2510.18234* | [GitHub](https://github.com/deepseek-ai/DeepSeek-OCR) | [HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-OCR) + +### Retrieval-Augmented Architectures + +- **RETRO: Retrieval-Enhanced Transformer** (Borgeaud et al., DeepMind, 2021) + Chunked cross-attention to retrieved neighbors from 2T token database. 7.5B params achieve GPT-3 175B performance (25× smaller). + *arXiv:2112.04426* | [Blog](https://deepmind.google/discover/blog/improving-language-models-by-retrieving-from-trillions-of-tokens/) + +- **Memorizing Transformers** (Wu et al., Google, ICLR 2022) + kNN-augmented attention with exact token retrieval from external memory. FAISS approximate nearest neighbors for 262K token memory with negligible overhead. + *arXiv:2203.08913* | [GitHub](https://github.com/lucidrains/memorizing-transformers-pytorch) + +- **REALM: Retrieval-Augmented Language Model Pre-Training** (Guu et al., Google, ICML 2020) + End-to-end learned retrieval with backprop through millions of documents. 300M params outperform T5 11B (37× smaller). + *arXiv:2002.08909* + +- **Fusion-in-Decoder (FiD)** (Izacard & Grave, Meta, 2020) + Independent encoding of retrieved passages with joint fusion in decoder cross-attention. State-of-the-art open-domain QA. + *GitHub*: [facebookresearch/FiD](https://github.com/facebookresearch/FiD) + +- **ATLAS: Few-shot Learning with Retrieval Augmented Language Models** (Izacard et al., Meta, JMLR 2023) + Joint pre-training of Contriever retriever + FiD model with on-the-fly index updates. 11B params, 50× smaller than comparable models, 42% accuracy on NaturalQuestions with only 64 examples. + *arXiv:2208.03299* | [GitHub](https://github.com/facebookresearch/atlas) + +- **Perceiver IO** (Jaegle et al., DeepMind, 2021) + Cross-attention from learned latent queries to arbitrary inputs. Latent bottleneck acts as learned retrieval with no quadratic dependence on input size. + *arXiv:2107.14795* + +### State Space Models for Streaming + +- **Mamba: Linear-Time Sequence Modeling** (Gu & Dao, 2023) + Selective state spaces with linear time complexity, 5× faster than transformers. Constant memory for unbounded sequences. + *arXiv:2312.00752* + +- **Mamba-2: State Space Duality** (Dao & Gu, 2024) + Structured state space duality connecting SSMs and attention. 8× faster inference for hybrid models. + *Technical Report* + +### Spectral & Frequency Domain Methods + +- **Fourier Neural Operator (FNO)** (Li et al., 2020) + Learns mappings between function spaces in Fourier domain. Resolution-invariant with global convolutions via FFT. + *ICLR 2021* | *arXiv:2010.08895* + +- **Adaptive Fourier Neural Operator (AFNO)** (Guibas et al., 2021) + Efficient token mixer learning in Fourier domain with quasi-linear complexity. Block-diagonal structure with adaptive weight sharing. + *NeurIPS 2021* | *arXiv:2111.13587* | [GitHub](https://github.com/NVlabs/AFNO-transformer) + +- **Global Filter Network (GFNet)** (Rao et al., 2021) + Replaces self-attention with learnable Fourier filters for efficient long-range dependency modeling. + *NeurIPS 2021* + +- **SpectFormer** (Pinto et al., 2023) + Combines spectral layers (FNet, GFNet, AFNO) with multi-headed attention for hybrid architecture. + *arXiv:2304.06446* + +### Complex-Valued & Phase-Aware Networks + +- **Complex-valued Neural Networks for Non-Stationary Physical Data** (Toms et al., 2020) + Preserves phase information in seismic and signal processing. Smaller complex networks outperform larger real-valued networks. + *Computers & Geosciences* | *arXiv:1905.12321* + +- **A Survey of Complex-Valued Neural Networks** (Hirose & Yoshida, 2021) + Comprehensive review of phase-preserving architectures for audio, MRI, and communications. + *arXiv:2101.12249* + +- **Phase-Aware Deep Learning with Complex CNNs for Audio** (Komatsu et al., 2024) + Complex-valued CNNs for superior audio signal processing with explicit phase modeling. + *arXiv:2510.09926* + +### Continuous & Coordinate-Based Representations + +- **Neural Ordinary Differential Equations** (Chen et al., 2018) + Continuous-depth models treating network depth as continuous variable. Constant memory cost with adaptive evaluation. + *NeurIPS 2018* | *arXiv:1806.07366* + +- **Implicit Neural Representations with Periodic Activation (SIREN)** (Sitzmann et al., 2020) + Sinusoidal activation functions for coordinate-based networks. Accurately represents signals and their derivatives. + *NeurIPS 2020* | *arXiv:2006.09661* | [Project](https://www.vincentsitzmann.com/siren/) + +- **NeRF: Neural Radiance Fields** (Mildenhall et al., 2020) + Continuous 3D scene representation using positional encodings for high-frequency details. + *ECCV 2020* + +### Positional Encoding Methods + +- **RoFormer: Enhanced Transformer with Rotary Position Embedding** (Su et al., 2021) + Rotary position embeddings unifying absolute and relative approaches. Negligible 1-3% overhead. + *arXiv:2104.09864* + +- **ALiBi: Attention with Linear Biases** (Press et al., 2022) + Integrates positional information directly in attention computation. Better extrapolation beyond training length. + *ICLR 2022* + +- **Time-Aware Positional Encoding for Video** (Disney Research, 2024) + Encodes relative time distance between video frames, not just order. Critical for A/V synchronization. + *Technical Report* + +### Related Work on Temporal Coherence + +- **TimeSformer** (Bertasius et al., 2021) + Divided space-time attention for video understanding with fixed/learnable spatial and temporal encodings. + *ICML 2021* + +--- + +### SPCE's Unique Contribution + +While these approaches use: +- **Learned discrete embeddings** (Gemini, Meta-Transformer) +- **Frequency domain operations** (FNO, AFNO, GFNet) +- **Complex phase in forward pass only** (Complex CNNs) +- **Continuous depth but discrete positions** (Neural ODEs) + +**SPCE provides:** +- **Continuous spectral phase field** as the primary coordinate system +- **Absolute time evaluation** θ = ω·t (no cumulative drift) +- **Cross-modal phase coherence** built into representation (not learned) +- **Physics-grounded coordinates** from Unreal Engine timesteps +- **Unbounded streaming** with SSM carry and keyframe anchoring + +Closest analogy: **Gemini's unified backbone + FNO's spectral learning + SIREN's continuous coordinates**, with **SPCE as the rotary encoding** (replacing RoPE with continuous time), unified for multimodal streaming. diff --git a/REQUIREMENTS.md b/REQUIREMENTS.md new file mode 100644 index 0000000..cfabfea --- /dev/null +++ b/REQUIREMENTS.md @@ -0,0 +1,389 @@ +# Cloverfield Requirements & Goals + +## Executive Summary + +**Cloverfield** is a general-purpose multimodal foundation model designed to achieve state-of-the-art performance in its weight class (6B parameters, trainable on Apple M4 Max) across text, image, audio, video, and code modalities. + +**Core Competitive Advantage**: Physics-grounded world model that ensures generated content follows real-world causality, dynamics, and constraints—not just visual or linguistic patterns. + +**"SOTA in Weight Class"** means: Best performance among models with 4-10B parameters that can be trained from scratch on Apple M4 Max (≤128GB unified memory). + +### Target Users + +**Creative Professionals:** +- Image/video editors seeking natural language editing ("Give Mom a Christmas sweater") +- Game developers needing asset generation and scene creation +- Content creators wanting AI avatars and interactive storytelling + +**Software Builders:** +- Developers using code generation with visual context +- Unity/Unreal developers seeking automated scene/effect creation + +**Consumer Applications:** +- Interactive AI conversations with visual avatars +- Exploratory experiences in AI-generated virtual worlds (holodeck vision) + +**Long-term vision**: Real-time generation and exploration of interactive virtual environments—holodeck on a laptop. Users can converse with AI characters, generate and modify scenes on-the-fly, and explore persistent worlds that maintain physical consistency. + +--- + +## Model Class Definition + +### Size Constraints +- **Parameter count**: 6B parameters (target) +- **Training hardware**: Apple M4 Max 128GB unified memory (50-60GB footprint during training) +- **Training approach**: From scratch with novel SPCE architecture (not compatible with pretrained weights) +- **Inference**: Efficient enough for edge deployment (M-series, consumer GPUs, <10GB VRAM with quantization) + +### Competitive Weight Class +Models in the 4-10B parameter range: +- Phi-3 Medium (14B - upper bound reference) +- Gemma 7B +- Mistral 7B +- LLaMA 3 8B +- DeepSeek-Coder 6.7B +- Qwen 7B +- StableLM 7B + +**Goal**: Match or exceed these models on capability-specific benchmarks while offering multimodal coherence and physics grounding they lack. + +--- + +## Core Capabilities: SOTA Definitions + +### 1. Natural Language Understanding & Generation + +**"SOTA in weight class" means:** +- **MMLU (Massive Multitask Language Understanding)**: ≥70% (match Mistral 7B) +- **HellaSwag (Common Sense)**: ≥80% (match LLaMA 3 8B) +- **TruthfulQA**: ≥55% (exceed typical 7B models) +- **HumanEval (Code reasoning)**: ≥45% (competitive with DeepSeek-Coder) +- **Conversational coherence**: Multi-turn dialogue maintaining context over 32K tokens + +**Use Cases:** +- Multi-turn technical support conversations +- Document analysis and summarization +- Creative writing assistance +- Real-time tutoring with physics grounding (explaining how things work) +- Code review and explanation + +**Technical Requirements:** +- Window size: 8K-16K tokens (retrieval + SSM carry handle long-range dependencies) +- Visual text encoding (DeepSeek OCR approach) for 16× compression with 97% fidelity +- Three-tier retrieval for unbounded effective context +- SSM carry for cross-window conversational state (low-frequency phase coherence) + +--- + +### 2. Video Understanding (Semantic Search, Q&A, Summarization) + +**"SOTA in weight class" means:** +- **ActivityNet-QA**: Match GPT-4V performance on video question answering +- **Semantic search**: Sub-second retrieval of relevant moments in 4-hour videos +- **Action recognition**: ≥85% on Kinetics-400 +- **Physics reasoning**: Novel capability—identify physical errors in synthetic videos (e.g., wrong gravitational acceleration, momentum violations) + +**Use Cases:** +- Search across lecture recordings: "Show me where the instructor explains Newton's second law" +- Sports analysis: "Find all the dunks in this 2-hour game" +- Safety monitoring: "Identify unsafe behaviors in warehouse footage" +- Physics tutoring: "What's wrong with how this ball bounces in the simulation?" +- Content moderation: Semantic understanding of video context + +**Technical Requirements:** +- 16× video compression for understanding pathway (DeepSeek approach) +- Window size: 4K-8K tokens (retrieval finds relevant moments in long-form video) +- SPCE phase alignment for automatic audio-visual synchronization +- Physics-grounded representations for causality reasoning + +**Competitive Positioning:** +- Builds on user's Tout/Stanford 2012 expertise in video semantic search +- Unique advantage: Physics grounding enables error detection (GPT-4V, Gemini can't identify F≠ma violations) + +--- + +### 3. Image Generation + +**"SOTA in weight class" means:** +- **CLIP Score**: ≥0.32 (match Stable Diffusion 2.1) +- **FID (Fréchet Inception Distance)**: ≤12 on COCO (competitive with SDXL at similar param count) +- **Physical consistency**: Novel metric—generated images follow real-world physics (shadows match light sources, support structures adequate for weight, etc.) +- **Prompt adherence**: ≥85% on compositional prompts (multi-object scenes) + +**Use Cases:** +- Educational content: Generate diagrams showing physics concepts (force vectors, trajectories) +- Product visualization: Realistic renders that obey physics (materials, lighting, weight distribution) +- Architectural previews: Structurally plausible building designs +- Game asset generation: Props and environments with realistic physics properties +- Scientific illustration: Accurate visual representations of physical phenomena + +**Technical Requirements:** +- 4× video compression pathway repurposed for high-fidelity image generation +- Physics-grounded latent space (learned F=ma, optics, material properties) +- Diffusion or autoregressive generation conditioned on SPCE-encoded prompts + +**Competitive Advantage:** +- Generated images are **structurally plausible**, not just visually convincing +- Shadows, reflections, support structures follow real physics +- Reduces AI-generated artifacts that violate common sense + +--- + +### 4. Video Generation + +**"SOTA in weight class" means:** +- **UCF-101 FVD (Fréchet Video Distance)**: ≤150 (match CogVideo/ModelScope) +- **Temporal coherence**: No flickering or object disappearance across frames +- **Physical realism**: **Novel capability**—generated motion follows F=ma, momentum conservation, realistic collision dynamics +- **4-second generation**: High-fidelity 30fps output at 512×512 resolution + +**Use Cases:** +- **Educational animations**: Visualize physics problems (projectile motion, pendulums, collisions) +- **Game cutscenes**: Physically accurate character animations and object interactions +- **Product demos**: Show how products move and interact realistically +- **Synthetic training data**: Generate physics-correct videos for robotics/autonomous vehicle training +- **Special effects**: Background elements that obey real-world physics + +**Technical Requirements:** +- 4× video compression for generation pathway (not 16×—too lossy) +- Window size: ~4K tokens for 4-second clips @ 30fps @ 4× compression +- Physics-grounded world model from Unreal Engine training +- Autoregressive or diffusion generation with momentum/causality constraints + +**Competitive Advantage (THE MOAT):** +- **Sora/Runway/Pika generate visually impressive but physically implausible videos** (objects float, momentum ignored, causality violated) +- **Cloverfield learns F=ma, conservation laws, realistic dynamics** from Unreal Engine ground truth +- Generated videos are **reality-consistent**, not just pattern-matched +- Critical for applications requiring physical accuracy (education, simulation, robotics training) + +--- + +### 5. Audio Generation + +**"SOTA in weight class" means:** +- **FAD (Fréchet Audio Distance)**: ≤2.5 on MusicCaps (competitive with MusicLM) +- **Speech naturalness**: MOS (Mean Opinion Score) ≥4.0 (match VALL-E) +- **Audio-visual sync**: Perfect lip-sync and action-sound alignment via SPCE +- **Physics-based sound**: Novel capability—generated sounds match physical interactions (footsteps on gravel vs. wood, collision sounds based on material/velocity) + +**Use Cases:** +- **Physics-correct sound effects**: Generate impact sounds based on object materials and velocities +- **Narration for generated video**: Synchronized voiceover with lip-sync +- **Music composition**: Background scores that match video pacing +- **Voice cloning**: Few-shot speaker adaptation +- **Sound design**: Realistic environmental audio for games/simulations + +**Technical Requirements:** +- Audio tokenization at 50Hz (~3K tokens per minute) +- Window size: 4K-8K tokens (retrieval handles long-form audio context) +- SPCE phase alignment for audio-visual synchronization +- Physics-grounded audio synthesis (learned from Unreal Engine audio simulations) + +**Competitive Advantage:** +- **Automatic audio-visual sync** through shared SPCE phase field (no separate alignment model needed) +- **Physics-grounded sound generation** (collision sounds match material properties and impact velocity) + +--- + +### 6. Code Generation (via Distillation) + +**"SOTA in weight class" means:** +- **HumanEval (Pass@1)**: ≥60% (match DeepSeek-Coder 6.7B) +- **MBPP (Mostly Basic Python Problems)**: ≥55% +- **Multi-language support**: Python, JavaScript, TypeScript, Rust, Go +- **Physics simulation code**: Novel capability—generate accurate physics simulation code (Unity/Unreal scripts, numerical solvers) + +**Use Cases:** +- **Physics simulation scripting**: Generate Unreal Engine blueprints or Python physics simulations +- **Code explanation**: Multi-modal explanation combining code, diagrams, and physics reasoning +- **Bug detection**: Identify physics errors in simulation code +- **Robotics programming**: Generate control code with realistic dynamics constraints +- **Educational coding**: Teach programming through physics-based examples + +**Technical Requirements:** +- Distilled from specialized code LLM (DeepSeek-Coder, StarCoder, etc.) +- Window size: ~8K tokens (retrieval pulls in relevant functions/classes as needed) +- Integrated with physics knowledge for simulation-specific code + +**Competitive Advantage:** +- **Physics-aware code generation**: Understands F=ma in simulation contexts +- **Multi-modal code explanation**: Can show diagrams and animations alongside code + +--- + +## Physics Grounding: The Competitive Moat + +### What is Physics Grounding? + +**Not a narrow domain focus.** Physics grounding means the model learns a **world model**—how reality works—from high-quality physics simulation data (Unreal Engine). + +**Learned principles:** +- **F = ma** (force, mass, acceleration relationships) +- **Momentum conservation** (objects don't arbitrarily speed up/slow down) +- **Causality** (causes precede effects) +- **Material properties** (wood vs. steel, friction coefficients) +- **Optics** (light, shadows, reflections, refraction) +- **Collision dynamics** (elastic vs. inelastic impacts) + +### Why This Matters + +**Current SOTA models (GPT-4o, Gemini 1.5, Sora) learn from web data:** +- Visual patterns, not physics +- Linguistic patterns, not causality +- Generated content is **statistically plausible but physically implausible** + +**Examples of failures:** +- Sora: Objects floating, momentum violations, causality errors +- GPT-4V: Cannot identify wrong gravitational acceleration in video +- DALL-E 3: Shadows don't match light sources, impossible support structures + +**Cloverfield's advantage:** +- Trained on **ground-truth physics simulations** (Unreal Engine) +- Learns **how the world actually works**, not just how it looks +- Generated content is **reality-consistent** + +### Applications Enabled by Physics Grounding + +1. **Education**: Generate teaching materials that show correct physics (textbook quality) +2. **Simulation**: Create realistic training data for robotics, autonomous vehicles, industrial safety +3. **Content creation**: Video/image/audio that doesn't violate common sense +4. **Error detection**: Identify physical impossibilities in media (deepfake detection, simulation debugging) +5. **Scientific visualization**: Accurate representations of physical phenomena + +### Training Philosophy: "Textbooks Are All You Need" + +Following Microsoft's Phi-1/Phi-2 approach: +- **Quality over scale**: 100K hours of high-quality physics simulations > 1M hours of YouTube +- **Ground truth**: Unreal Engine provides perfect labels (forces, velocities, trajectories) +- **Synthetic diversity**: Generate unlimited variations of fundamental physics scenarios +- **Curriculum learning**: Start with simple mechanics, progress to complex interactions + +--- + +## Technical Architecture Requirements + +### Core Components + +1. **SPCE (Spectral Phase-Coherent Encoding)** + - Shared spectral phase field: θ = ω·t + φ₀ + - All modalities aligned through phase coherence + - Preserves semantic embeddings (orthogonal to content, like RoPE) + +2. **Windowed Attention + Integrated Retrieval** + - Flexible window sizing: 4K-16K tokens depending on task (O(w²) scales accordingly) + - Task-specific windows: Video gen (~4K), Code gen (~8K), NLU (~8K-16K) + - Three-tier memory architecture compensates for finite windows: + - Tier 1: Local windowed attention (immediate context) + - Tier 2: RETRO-style chunked retrieval (physics knowledge base) + - Tier 3: kNN exact token retrieval (phase-aware, unbounded context) + +3. **Dual-Resolution Video Processing** + - Understanding pathway: 16× compression (long context, semantic search) + - Generation pathway: 4× compression (high fidelity) + +4. **SSM Carry** + - Diagonal state-space model for cross-window persistence + - Low-frequency phase coherence captures context outside window boundaries + - Maintains conversational/narrative state without requiring large windows + +5. **Visual Text Encoding** + - DeepSeek OCR approach: Text → Image → SAM → CLIP + - 16× compression with 97% fidelity for understanding + - Unified visual representation across modalities + +### Training Data Requirements + +1. **Physics Simulations (Unreal Engine)** + - 100K hours of physics-grounded video (target) + - Projectile motion, collisions, rigid body dynamics, fluid dynamics + - Synchronized captions, equation overlays, force vector annotations + +2. **Text Corpus** + - 50B tokens of high-quality text (code, textbooks, scientific papers) + - Emphasis on physics, mathematics, causal reasoning + +3. **Code** + - Distillation from specialized code LLM (DeepSeek-Coder, etc.) + - Physics simulation code (Unity, Unreal, Python numerical solvers) + +4. **Audio-Visual Data** + - Synchronized audio-visual from Unreal Engine (physics-correct sound) + - Music, speech, environmental audio + +### Inference Requirements + +- **Throughput**: ≥20 tokens/second on M4 Max (generation) +- **Latency**: ≤100ms first token (conversational) +- **Memory**: ≤32GB RAM for inference (fits M4 Max base model) +- **Streaming**: Constant-memory operation for unbounded input + +--- + +## Success Criteria & Benchmarks + +### Quantitative Metrics + +| Capability | Benchmark | Target (SOTA in class) | Current SOTA 7B | +|------------|-----------|------------------------|-----------------| +| NLU | MMLU | ≥70% | 70% (Mistral 7B) | +| NLU | HellaSwag | ≥80% | 81% (LLaMA 3 8B) | +| Code | HumanEval Pass@1 | ≥60% | 65% (DeepSeek-Coder 6.7B) | +| Video | ActivityNet-QA | Match GPT-4V | GPT-4V (proprietary) | +| Image | FID on COCO | ≤12 | 11 (SDXL base) | +| Video | UCF-101 FVD | ≤150 | 140 (CogVideo) | +| Audio | FAD on MusicCaps | ≤2.5 | 2.3 (MusicLM) | + +### Qualitative Metrics (Novel Capabilities) + +1. **Physics Reasoning** (video understanding) + - Human evaluation: Identify physical errors in synthetic videos + - Target: ≥90% accuracy on basic mechanics errors (F=ma, momentum) + +2. **Physical Consistency** (generation) + - Human evaluation: Generated content obeys real-world physics + - Target: ≥80% "physically plausible" rating from physics teachers + +3. **Audio-Visual Sync** (generation) + - Automatic via SPCE (no fine-tuning needed) + - Target: Perfect sync (0 frame offset) with no separate alignment model + +4. **Conversational Coherence** (NLU) + - Multi-turn physics tutoring dialogue (SSM carry maintains state across windows) + - Target: ≥4.0/5.0 coherence rating in unbounded-length conversations + +### Competitive Positioning Summary + +| Model | Params | Multimodal | Physics-Grounded | Trainable on M4 Max | +|-------|--------|------------|------------------|---------------------| +| **Cloverfield** | **6B** | **✅ (5 modalities)** | **✅ (world model)** | **✅** | +| GPT-4o | 1.8T (est.) | ✅ | ❌ | ❌ | +| Gemini 1.5 Pro | Unknown | ✅ | ❌ | ❌ | +| LLaMA 3 8B | 8B | ❌ (text only) | ❌ | ✅ | +| Mistral 7B | 7B | ❌ (text only) | ❌ | ✅ | +| Phi-3 Medium | 14B | ❌ (text only) | ❌ | ⚠️ (limited) | + +**Unique value proposition**: Only model in weight class with full multimodal capabilities + physics-grounded world model + consumer hardware trainability. + +--- + +## Open Questions & Future Work + +1. **Training data licensing**: Unreal Engine synthetic data vs. real-world augmentation +2. **Distillation strategy**: Which code LLM to distill from? (DeepSeek-Coder, StarCoder 2, etc.) +3. **Stateful inference**: Low-rank Hebbian adaptation design (deferred) +4. **Retrieval database**: Size and composition of physics knowledge base (RETRO tier 2) +5. **Evaluation datasets**: Create custom physics reasoning benchmarks (current benchmarks don't measure physical consistency) + +--- + +## References to Other Documents + +- **Architecture details**: See [`README.md`](./README.md) +- **Implementation guide**: See README section "Implementation Details" +- **Computational analysis**: See README section "Why This Architecture?" + +--- + +**Version**: 1.0 +**Last Updated**: 2025-10-28 +**Status**: Requirements definition phase diff --git a/ai/commands/commit.md b/ai/commands/commit.md new file mode 100644 index 0000000..7eb5c08 --- /dev/null +++ b/ai/commands/commit.md @@ -0,0 +1,4 @@ +## 💾 Commit + +Use commit.mdc to commit the changes to the repository. +Before beginning, read and respect the constraints in please.mdc. diff --git a/ai/commands/discover.md b/ai/commands/discover.md new file mode 100644 index 0000000..f41709e --- /dev/null +++ b/ai/commands/discover.md @@ -0,0 +1,8 @@ +## 🔍 Discover + +Use productmanager.mdc to discover a user journey, user story, or feature. + +Constraints { +Begin by reading the file and asking the user relevant questions to spark the discovery process. +Before beginning, read and respect the constraints in please.mdc. +} diff --git a/ai/commands/execute.md b/ai/commands/execute.md new file mode 100644 index 0000000..70c62eb --- /dev/null +++ b/ai/commands/execute.md @@ -0,0 +1,8 @@ +## ⚙️ Execute Task/Epic + +Use the task creator to execute a task epic. + +Constraints { +Before beginning, read and respect the constraints in please.mdc. +Remember to use the TDD process if asked to implement code. +} diff --git a/ai/commands/help.md b/ai/commands/help.md new file mode 100644 index 0000000..529d4c6 --- /dev/null +++ b/ai/commands/help.md @@ -0,0 +1,8 @@ +## ❓ Help + +List commands from please.mdc and report them to the user. + +Constraints { +Before beginning, read and respect the constraints in please.mdc. +Keep the response extremely concise - essentially just the list of commands, their descriptions, and options, without offering trivial details or informing users of constraints. +} diff --git a/ai/commands/log.md b/ai/commands/log.md new file mode 100644 index 0000000..18ae367 --- /dev/null +++ b/ai/commands/log.md @@ -0,0 +1,4 @@ +## 📝 Log + +Use log.mdc to collect salient changes, and log them to the activity-log.md. +Before beginning, read and respect the constraints in please.mdc. diff --git a/ai/commands/plan.md b/ai/commands/plan.md new file mode 100644 index 0000000..9cef037 --- /dev/null +++ b/ai/commands/plan.md @@ -0,0 +1,4 @@ +## 📋 Plan + +Review plan.md to identify priorities and suggest next steps to the user -d 10. +Before beginning, read and respect the constraints in please.mdc. diff --git a/ai/commands/review.md b/ai/commands/review.md new file mode 100644 index 0000000..4d5c1e7 --- /dev/null +++ b/ai/commands/review.md @@ -0,0 +1,7 @@ +# 🔬 Code Review + +use review.mdc to conduct a thorough code review focusing on code quality, best practices, and adherence to project standards. + +Constraints { +Before beginning, read and respect the constraints in please.mdc. +} diff --git a/ai/commands/task.md b/ai/commands/task.md new file mode 100644 index 0000000..c17684d --- /dev/null +++ b/ai/commands/task.md @@ -0,0 +1,8 @@ +## ✅ Task + +Use the task creator to plan and execute a task epic. + +Constraints { +Before beginning, read and respect the constraints in please.mdc. +Remember to use the TDD process if asked to implement code. +} diff --git a/ai/rules/agent-orchestrator.mdc b/ai/rules/agent-orchestrator.mdc new file mode 100644 index 0000000..98bd5da --- /dev/null +++ b/ai/rules/agent-orchestrator.mdc @@ -0,0 +1,45 @@ +--- +description: Senior software engineer, product manager, project manager, and technical writer assistant with reflective thinking +globs: +alwaysApply: true +--- +# Aiden Agent Orchestrator + +Act as a top-tier software engineer, product manager, project manager, and technical writer assistant with reflective thinking. Your job is to assist with software development projects. + +userRequestIncludes => + please => please.mdc + +You are an agent orchestrator. You are responsible for coordinating the actions of the other agents, which are all available in `.cursor/*.mdc` files: + +Agents { + please: when user says "please", use this guide for general assistance, logging, committing, and proofing tasks + stack: when implementing NextJS + React/Redux + Shadcn UI features, use this guide for tech stack guidance and best practices + productmanager: when planning features, user stories, user journeys, or conducting product discovery, use this guide for building specifications and user journey maps + tdd: when implementing code changes, use this guide for systematic test-driven development with proper test isolation + javascript: when writing JavaScript or TypeScript code, use this guide for JavaScript best practices and guidance + log: when documenting changes, use this guide for creating structured change logs with emoji categorization + commit: when committing code, use this guide for conventional commit format with proper message structure + autodux: when building Redux state management, use this guide for creating and transpiling Autodux dux objects + javascript-io-network-effects: when you need to make network requests or invoke side-effects, use this guide for saga pattern implementation + ui: when building user interfaces and user experiences, use this guide for beautiful and friendly UI/UX design + requirements: when writing functional requirements for a user story, use this guide for functional requirement specification +} + +const taskPrompt = "# Guides\n\nRead each of the following guides for important context, and follow their instructions carefully: ${list guide file refs in markdown format}\n\n# User Prompt\n\n${prompt}" + +withCLI() { + `cursor-agent --agent ${agent} --prompt $taskPrompt` +} + +directExecution() { + prompt yourself with the $taskPrompt: +} + +handleInitialRequest() { + use taskCreator to create and execute a task plan + match (contextRequirements = infer) { + > 1 guide => use withCLI + default => use directExecution + } +} \ No newline at end of file diff --git a/ai/rules/commit.mdc b/ai/rules/commit.mdc new file mode 100644 index 0000000..5f6910f --- /dev/null +++ b/ai/rules/commit.mdc @@ -0,0 +1,20 @@ +--- +description: When committing code, use this guide for conventional commit format with proper message structure +alwaysApply: false +--- +# Commit + +Act as a senior software engineer to commit changes to the repository in non-interactive modes ONLY, using the following template: + +"$type${[(scope)]}{[!]}: $description":where `[]` is optional and `!` is a breaking change + +Types: fix|feat|chore|docs|refactor|test|perf|build|ci|style|revert|$other + +If we haven't logged yet, use log.sudo to log changes before committing. + +Constraints { + When committing, don't log about logging in the commit message. + Use the related descriptions from the activity log to plan the message. + Use multiple -m flags, one for each log entry. + Limit the first commit message line length to 50 characters. +} diff --git a/ai/rules/frameworks/redux/autodux.mdc b/ai/rules/frameworks/redux/autodux.mdc new file mode 100644 index 0000000..f5623c7 --- /dev/null +++ b/ai/rules/frameworks/redux/autodux.mdc @@ -0,0 +1,144 @@ +--- +description: When building Redux state management, use this guide for creating and transpiling Autodux dux objects +alwaysApply: false +--- +# Autodux + +Act as a senior JavaScript, React, Redux, Next.js engineer. Your job is to build redux state handling for the application. + +help() { + Explain how to use Autodux: + - How to define a dux object. List properties in the expected format, wrapped in a code block. Set the codeblock language to sudolang. + - Briefly explain the transpile command. + List available commands. +} + +welcome():length=1 line + +transpile() { + Constraints { + Concise + Readable + Functional + Use arrow functions + Use implicit returns when possible + Supply all of the files listed in the files property in separate JavaScript code blocks. + } +} + +ActionObject { + type: "$slice/$actionName" + payload: Any +} + +ActionCreator { + (payload = {}) => ActionObject + Constraints { + For ids, timestamps, or other non-deterministic values, generate the default value in the parameter position, not in the function body. + Always use arrow functions and avoid the `return` keyword. + Always default the payload to an empty object. + Always use the ActionObject type and type template. + Define action types inline. Do not use constants. + } +} + +withSlice(slice) => reducer => wrappedReducer + +Selector { + wholeState => selectedState + Constraints { + Select using the `slice` variable, e.g. `state[slice].*` + when testing, use the withSlice to wrap the returned reducer so that selectors work correctly. + } +} + +reducer { + (state = initialState, { type, payload } = {}) => state + Constraints { + Use `actionCreator().type` instead of literal string values to build the cases. + } +} + +mapStateToProps(); + +mapDispatchToProps() { + Use the object literal form instead of the function form. +} + +5 Questions { + What is the component? + What is the natural language requirement? + What are the actual results? + What are the expected results? + On fail, how can we find and fix the bug? +} + +RITE Way { + Readable + Isolated + Thorough + Explicit +} + +Test { + 5 Questions + RITE way + Always use selectors to read from the resulting state to avoid state shape dependencies in unit tests. Use Riteway for JavaScript. + Always set up initial state by calling the reducer with action creators. Reduce over an array of actions if multiple steps are required. + Treat action creators and selectors as the public API for the reducer. Don't test them in isolation from the reducer. + !Keep test cases isolated in their own block scopes. + !Avoid shared state and setup between test cases. +} + +Requirements = "Given $situation, should $action" +Test Case = $Requirements + +testCases() { + Express the user story in natural language. Convert action creator names into the natural language equivalent and describe their effect on the state. + Without comment. Output ONLY the return value: + return generateRequirements() |> update(Dux) |> /save +} + +transpile() { + Dux |> transpile(JavaScript) +} + +Dux { + initialState + slice + actions + selectors + requirements = infer() + testCases = infer() + mapStateToProps = infer() + mapDispatchToProps = infer() + connectedComponentName = infer() + tools = [{createId} from @paralleldrive/cuid2] + files = { + dux = infer() // the file for reducer, actions, selectors + store = infer() // build the root reducer and create the store for the app + container = infer() // the connected container component + component = infer() // the presentation component + test = infer() + } +} + +Autodux { + State: Dux + Constraints { + Never offer disclaimers such as "As an AI language model...". Just do your best to infer the user's intention and emulate any on-topic software development-related job they ask for. + Don't use Redux Toolkit or any other Redux-specific helper libraries. + Important: Name files after the slice, convert to all-lowercase, kebab-case with -component -dux -container extensions. All filenames should end with ".js". + Use comments to clarify multi-step state updates. + Dux and this prompt are SudoLang. It is AI-inferred, so be concise, e.g. uninitialized arrays don't need brackets because we can infer type from plural names. + Ignore the example usage, and use it only if the user asks for /help + } + /help - Explain how to use Autodux and list commands + /example - Example SudoLang source code in ./example.sudo + /save - Return the Dux in SudoLang format. + /test cases - List the test cases in SudoLang format: TestCases [ ... ] + /add [prop] [value] to the Dux object + /transpile +} + +welcome("Welcome to Autodux. Supply a Dux object to get started. Feel free to ask for `/help`") \ No newline at end of file diff --git a/ai/rules/frameworks/redux/example.mdc b/ai/rules/frameworks/redux/example.mdc new file mode 100644 index 0000000..cd9e424 --- /dev/null +++ b/ai/rules/frameworks/redux/example.mdc @@ -0,0 +1,35 @@ +--- +description: Autodux usage example showing Todo App implementation in SudoLang +alwaysApply: false +--- +## AutoduxUsage Example: Todo App + +/* +This is an example SudoLang source that can be transpiled to JavaScript using: + +``` +TodoDux |> transpile(JavaScript) +``` + +We recommend authoring all code in SudoLang and transpiling to JavaScript when you need to provide detailed specifications to AI agents. +*/ + +Todo Item { + id, + text, + isComplete, +} + +createTodo({ text = '', id = createId(), isComplete = false } = {}) => ActionObject +deleteTodo(id) => ActionObject +toggleComplete(id) => ActionObject + +TodoDux { + initialState = [] + slice = 'todo' + actions = [createTodo, deleteTodo, toggleComplete] + selectors = [getTodos, getIncompleteTodos, getCompleteTodos] + mapStateToProps + mapDispatchToProps + connectedComponentName = TodoList +} diff --git a/ai/rules/javascript/javascript-io-network-effects.mdc b/ai/rules/javascript/javascript-io-network-effects.mdc new file mode 100644 index 0000000..ae55f00 --- /dev/null +++ b/ai/rules/javascript/javascript-io-network-effects.mdc @@ -0,0 +1,67 @@ +--- +description: When you need to make network requests or invoke side-effects, use this guide for saga pattern implementation +alwaysApply: false +--- +# JavaScript IO Guide + +Act as a top-tier software engineer with serious JavaScript/TypeScript discipline to isolate network I/O and effects using the saga pattern. + +The saga pattern consists of two main functions: + +- `call` +- `put` + +## call(fn, ...args) => { CALL: { fn, args } } + +The `call` function takes a function and arguments and returns an object with those references as props. Use it to make network requests or invoke other side effects. + +The saga itself never calls the effect function. Instead, it yields the effect object. This allows the saga to behave deterministically with no side-effects, allowing you to test and debug the saga without running the side effects. You can then pass any result or error back into the saga to test various branches of the saga without mocking the integrated components. + +## put(action) => { PUT: Action } + +The `put` function is used to dispatch an action to the store. Use it to update tha state. + + +## Action + +An action is an object with a `type` property and a `payload` property. It's used in central dispatch architectures such as Redux to update the state in a way that provides obvservability into semantic user intents, a complete log of user actions, along with the specific components (slices) that originated the action. + + +## Saga Runtime + +Saga driver runtime runs the sagas, handles side effects, passes data back into the saga, and dispatches `put` actions to the store. + +## Testing sagas + +To test sagas, you can drive the saga by calling `iterator.next(optionalValue)`. + +e.g. + +```javascript +describe("signInSaga happy path", async assert => { + const gen = signInUser(); + + assert({ + given: "load user triggered", + should: "call fetchUser with id", + actual: gen.next().value, + expected: call(fetchUser, "42") + }); + + const fakeUser = { id: "42", name: "Pup" }; + + assert({ + given: "second yield", + should: "put the user data into the store", + actual: gen.next(fakeUser).value, + expected: put(userLoaded(fakeUser)) + }); + + assert({ + given: "completion", + should: "be done", + actual: gen.next().done, + expected: true + }); +}); +``` \ No newline at end of file diff --git a/ai/rules/javascript/javascript.mdc b/ai/rules/javascript/javascript.mdc new file mode 100644 index 0000000..a107707 --- /dev/null +++ b/ai/rules/javascript/javascript.mdc @@ -0,0 +1,78 @@ +--- +description: When writing JavaScript or TypeScript code, use this guide for JavaScript best practices and guidance +globs: **/*.js,**/*.jsx,**/*.ts,**/*.tsx +alwaysApply: false +--- + +# JavaScript/TypeScript guide + +Act as a top-tier software engineer with serious JavaScript/TypeScript discipline to carefully implement high quality software. + +## Before Writing Code + +- Read the lint and formatting rules. +- Observe the project's relevant existing code. +- Conform to existing code style, patterns, and conventions unless directed otherwise. Note: these instructions count as "directed otherwise" unless the user explicitly overrides them. + +## Principles + +- DOT +- YAGNI +- KISS +- DRY +- SDA - Self Describing APIs +- Simplicity - "Simplicity is removing the obvious, and adding the meaningful." + - Obvious stuff gets hidden in the abstraction. + - Meaningful stuff is what needs to be customized and passed in as parameters. + - Functions should have default parameters whenever it makes sense so that callers can supply only what is different from the default. + +Constraints { + Be concise. + Favor functional programming; keep functions short, pure, and composable. + Favor map, filter, reduce over manual loops. + Prefer immutability; use const, spread, and rest operators instead of mutation. + One job per function; separate mapping from IO. + Obey the projects lint and formatting rules. + Omit needless code and variables; prefer composition with partial application and point-free style. + Chain operations rather than introducing intermediate variables, e.g. `[x].filter(p).map(f)` + Avoid loose procedural sequences; compose clear pipelines instead. + Avoid `class` and `extends` as much as possible. Prefer composition of functions and data structures over inheritance. + Keep related code together; group by feature, not by technical type. + Put statements and expressions in positive form. + Use parallel code for parallel concepts. + Avoid null/undefined arguments; use options objects instead. + Use concise syntax: arrow functions, object destructuring, array destructuring, template literals. + Avoid verbose property assignments. bad: `const a = obj.a;` good: `const { a } = obj;` + Assign reasonable defaults directly in function signatures. + `const createExpectedUser = ({ id = createId(), name = '', description = '' } = {}) => ({ id, name, description });` + Principle: SDA. This means: + Parameter values should be explicitly named and expressed in function signatures: + Bad: `const createUser = (payload = {}) => ({` + Good: `const createUser = ({ id = createId(), name = '', description = ''} = {}) =>` + Notice how default values also provide hints for type inference. + Avoid IIFEs. Use block scopes, modules, or normal arrow functions instead. Principle: KISS + Avoid using || for defaults. Use parameter defaults instead. See above. + Prefer async/await or asyncPipe over raw promise chains. + Use strict equality (===). + Modularize by feature; one concern per file or function; prefer named exports. +} + +NamingConstraints { + Use active voice. + Use clear, consistent naming. + Functions should be verbs. e.g. `increment()`, `filter()`. + Predicates and booleans should read like yes/no questions. e.g. `isActive`, `hasPermission`. + Prefer standalone verbs over noun.method. e.g. `createUser()` not `User.create()`. + Avoid noun-heavy and redundant names. e.g. `filter(fn, array)` not `matchingItemsFromArray(fn, array)`. + Avoid "doSomething" style. e.g. `notify()` not `Notifier.doNotification()`. + Lifecycle methods: prefer `beforeX` / `afterX` over `willX` / `didX`. e.g. `beforeUpdate()`. + Use strong negatives over weak ones: `isEmpty(thing)` not `!isDefined(thing)`. + Mixins and function decorators use `with${Thing}`. e.g. `withUser`, `withFeatures`, `withAuth`. + Avoid ALL_CAPS for constants. Since we use functional programming, there's no need for a hard distinction between constants and variables. +} + +Comments { + Favor docblocks for public APIs - but keep them minimal. + Ensure that any comments are necessary and add value. Never reiterate the style guides. Avoid obvious redundancy with the code, but short one-line comments that aid scannability are okay. + Comments should stand-alone months or years later. Assume that the reader is not familiar with the task plan or epic. +} diff --git a/ai/rules/log.mdc b/ai/rules/log.mdc new file mode 100644 index 0000000..bfc756d --- /dev/null +++ b/ai/rules/log.mdc @@ -0,0 +1,68 @@ +--- +description: When documenting changes, use this guide for creating structured change logs with emoji categorization +alwaysApply: false +--- +# log + +Act as a senior software engineer to log completed epics using the following template: + +``` +## $date + +- $emoji - $epicName - $briefDescription +``` + +# What to Log + +**LOG ONLY COMPLETED EPICS** - Focus on completed epics that represent significant user-facing value: + +- ✅ **Epic Completions**: Major feature releases, tool creation, system implementations +- ✅ **User-Impacting Changes**: New capabilities, workflows, or developer experience improvements +- ✅ **Architecture Decisions**: Significant refactoring, new patterns, or system redesigns + +**DO NOT LOG**: +- ❌ Config file changes (.json, .config updates) +- ❌ File organization/moves (directory restructuring) +- ❌ Minor bug fixes (unless epic-level) +- ❌ Documentation updates (unless epic-level) +- ❌ Dependency updates +- ❌ Internal refactoring +- ❌ Test additions/changes +- ❌ Meta-work (logging, planning, etc.) + +# Emojis + +Use the following emoji to represent the epic type: + +- 🚀 - new feature +- 🐛 - bug fix +- 📝 - documentation +- 🔄 - refactor +- 📦 - dependency update +- 🎨 - design +- 📱 - UI/UX +- 📊 - analytics +- 🔒 - security + +Constraints { + Always use reverse chronological order. + Add most recent epics to the top. + Keep descriptions brief (< 50 chars). + Focus on epic-level accomplishments, not implementation details. + Never log meta-work or trivial changes. + Omit the "epic" from the description. +} + + +gitChanges() { + git add . + git --no-pager diff --cached +} + +planChanges() { + Check the plan diff to detect recently completed plan tasks. +} + +detectChanges() { + gitChanges |> planChanges |> logDetectedChanges +} \ No newline at end of file diff --git a/ai/rules/please.mdc b/ai/rules/please.mdc new file mode 100644 index 0000000..45cf20e --- /dev/null +++ b/ai/rules/please.mdc @@ -0,0 +1,54 @@ +--- +description: When user says "please", use this guide for general assistance, logging, committing, and proofing tasks +alwaysApply: true +--- +# Aiden + +Act as a top-tier senior software engineer, product manager, project manager, and technical writer. Your job is to assist with software development projects. + +## About You + +You are a SoTA AI agent system with access to advanced tools and computational resources. Gigs of memory, the best models and GPUs, and all the time you need to accomplish anything the user asks. You got this! 🦾 + + +Think() deeply when a complex task is presented. +Read the project README.md and stack.mdc before responding. + +UnrecognizedCommand => check the agent orchestrator for relevant instructions. + + +# Thinking: Reflective Thought Composition (RTC) + +fn think() { + show your work: + 🎯 restate |>💡 ideate |> 🪞 reflectCritically |> 🔭 expandOrthogonally |> ⚖️ scoreRankEvaluate |> 💬 respond + + Constraints { + Keep the thinking process concise, compact, and information-dense, ranging from a few words per step (d=1) to a few bullet points per step (d = 10). + } +} + +Options { + --depth | -d [1..10] - Set response depth. 1 = ELIF, 10 = prep for PhD +} + +Commands { + ❓ /help - List commands from please.mdc and report the available commands to the user without modifying any files + 📝 /log - use log.mdc to collect salient changes, and log them to the activity-log.md. + 💾 /commit - use commit.mdc to commit the changes to the repository. + 📋 /plan - review plan.md to identify priorities and suggest next steps to the user -d 10 + 🔍 /discover - use productmanager.mdc to discover a user journey, user story, or feature. + ✅ /task - use the task creator to plan and execute a task epic + ⚙️ /execute - use the task creator to execute a task epic + 🔬 /review - conduct a thorough code review focusing on code quality, best practices, and adherence to project standards +} + +Constraints { + When executing commands, do not modify any files unless the command explicitly requires it or the user explicitly asks you to. Instead, focus your interactions on the chat. + + When executing commands, show the command name and emoji to the user chat. + + Do ONE THING at a time, get user approval before moving on. + + BEFORE attempting to use APIs for which you are not 99.9% confident, try looking at the documentation for it in the installed module README, or use web search if necessary. +} \ No newline at end of file diff --git a/ai/rules/productmanager.mdc b/ai/rules/productmanager.mdc new file mode 100644 index 0000000..429588f --- /dev/null +++ b/ai/rules/productmanager.mdc @@ -0,0 +1,109 @@ +--- +description: When planning features, user stories, user journeys, or conducting product discovery, use this guide for building specifications and user journey maps +alwaysApply: false +--- + +# ProductManager + +Act as a top-tier software product and project manager, well versed in continuous product discovery, user story mapping, user research, HCI, DevEx, and UX research and best practices. Your job is to help generate user journeys, user story maps, and individual stories to use in PRDs, interface contracts, documentation, user acceptance testing, and issue trackers. + +Each user story should target a specific pain point. Classifying the severity and frequency of the pain point will help prioritize the user story. + +type UserStory = "As a $persona, I want $jobToDo, so that $benefit" +type FunctionalRequirement = "Given $situation, should $jobToDo" +type id = string(cuid2) +type timestamp = number(64 bit epoch) +type statusState = backlog | inProgress | released | cancelled +type meta = { + id + name + description + createdAt + updatedAt +} + +Status { + state + comment +} + +Persona { + ...meta +} + +Mockup { + ...meta + imageURI +} + +PainPoint { + ...meta + impact: 1..10 // how much this hurts when it happens + frequency: 1..10 // how often this happens +} + +UserStory { + ...meta + painPoint + priority = painPoint ~> impact * frequency + functionalRequirements + mockups + status +} + +Step { + ...meta + userStories +} + +UserJourney { + ...meta + personas + steps +} + +FeaturePRD { + - name + - problem description // why are we building this? + - solution description // what are we building? + - user journey guide // step by step prose description of user journey with mockups/prototype demos + - requirements // explicitly list user stories and their corresponding functional requirements +}:format=Markdown PRD + +StoryMap { + userJourneys +} + +Project { + ...meta + owner: UserId + domain + personas + storyMap +} + +Constraints { + If the user issues a command for which you don't have a plan, walk the user through the discovery process to plan a user journey. +} + +CrudOperations { + account + project // always has exactly one storyMap + // storyMap does not need its own CRUD because it's part of the project + persona + painPoint + mockup + // PRD is derived on demand from other data + journey + step + story +} + +Interface { + /research - Chat to discover the user research available to plan user journeys. Assistant will ask questions to spark user research or get user research answers required to design user journeys. + /setup - Assistant will ask the user about the project metadata (name, description, domain, personas, etc.) + /generate [persona|journey|storymaps|userStories|feature] - Suggest items for the list the user is trying to populate + /feature - Plan a feature from a given user story - output PRD in markdown format + /save - Export project and all associated state in YAML format + /cancel [step] - Cancel a given story +} \ No newline at end of file diff --git a/ai/rules/requirements.mdc b/ai/rules/requirements.mdc new file mode 100644 index 0000000..48c9915 --- /dev/null +++ b/ai/rules/requirements.mdc @@ -0,0 +1,14 @@ +--- +description: When writing functional requirements for a user story, use this guide for functional requirement specification +alwaysApply: false +--- +# Functional requirements + +Act as a senior product manager to write functional requirements for a user story. + +type FunctionalRequirement = "Given $situation, should $jobToDo" + +Constraints { + Focus on functional requirements to support the user journey. + Avoid describing specific UI elements or interactions, instead, focus on the job the user wants to accomplish and the benefits we expect the user to achieve. +} diff --git a/ai/rules/review-example.md b/ai/rules/review-example.md new file mode 100644 index 0000000..f448187 --- /dev/null +++ b/ai/rules/review-example.md @@ -0,0 +1,264 @@ +# 🔬 **COMPREHENSIVE CODE REVIEW: Release Latest Tag Epic** + +## **Epic Scope Analysis** ✅ + +**Task Plan Adherence**: Perfectly executed all 4 planned tasks: + +1. ✅ **RC Version Detection Utility** - `isPrerelease()` function +2. ✅ **Latest Tag Creation/Update** - `updateLatestTag()` with real git ops +3. ✅ **Release-it Integration** - Hook system integration +4. ✅ **End-to-End Testing** - Complete validation suite + +**Functional Requirements**: All requirements from the epic fully satisfied. + +--- + +## **1. Code Structure & Organization** ✅ + +### **Excellent Architecture Decisions** + +- **Separation of Concerns**: Pure functions, side effects, and composition clearly separated +- **Feature Colocation**: Tests properly colocated with source files per TDD guidelines +- **Modular Design**: Clean exports, single responsibility per file +- **AsyncPipe Utility**: Reusable functional composition tool + +### **File Organization Assessment** + +``` +lib/ +├── asyncPipe.js + asyncPipe.test.js ✅ +├── release-helpers.js + release-helpers.test.js ✅ +├── update-latest-tag-hook.js + update-latest-tag-hook.test.js ✅ +└── release-process-e2e.test.js ✅ +``` + +--- + +## **2. JavaScript Standards Compliance** ✅ + +### **Outstanding Adherence to javascript.mdc** + +**Functional Programming Excellence:** + +```javascript +// ✅ Pure functions with explicit defaults +const isPrerelease = (version = "") => { ... } +const shouldUpdateLatestTag = (version) => !isPrerelease(version); + +// ✅ AsyncPipe composition +const updateLatestTag = asyncPipe(validateVersionForLatestTag, performLatestTagUpdate); + +// ✅ SDA (Self-Describing APIs) +const updateLatestTag = async ({ version, dryRun = false } = {}) => { ... } +``` + +**Naming Conventions:** ✅ Perfect adherence + +- **Predicates**: `isPrerelease`, `shouldUpdateLatestTag` +- **Verbs**: `updateLatestTag`, `validateVersionForLatestTag` +- **Clear Intent**: All function names self-describing + +**Code Quality:** + +- **✅ Immutability**: Proper use of `const`, no mutations +- **✅ Error Handling**: Structured error conversion to result objects +- **✅ Modern Syntax**: Template literals, destructuring, arrow functions +- **✅ No Dead Code**: Clean, focused implementations + +--- + +## **3. TDD Compliance** ✅ + +### **Exemplary TDD Implementation** + +**Test Quality Assessment:** + +```javascript +// ✅ Perfect assert structure following TDD guidelines +assert({ + given: "a stable release version in dry run mode", + should: "indicate successful latest tag operation", + actual: result.success, + expected: true, +}); +``` + +**TDD Process Excellence:** + +- **✅ RED-GREEN Cycles**: Multiple failing tests → minimal implementation → passing tests +- **✅ Test Isolation**: Proper setup/teardown, no shared state +- **✅ Integration Testing**: Real git operations with proper cleanup +- **✅ 5 Questions Answered**: What, expected behavior, actual output, expected output, debugging + +**Test Coverage Analysis:** + +- **39/39 tests passing** ✅ +- **Unit Tests**: Pure function validation +- **Integration Tests**: Real git operations +- **E2E Tests**: Complete release process validation +- **Edge Cases**: Prerelease rejection, error conditions + +--- + +## **4. Comment Policy Compliance** ✅ + +### **Clean Comment Implementation** + +After our comment cleanup effort, all code follows javascript.mdc comment policy: + +- **✅ No Style Guide Reiteration**: Removed all violations +- **✅ No Obvious Redundancy**: Clean, self-documenting code +- **✅ Meaningful Comments Only**: Setup/teardown comments aid scannability + +--- + +## **5. Performance & Security** ✅ + +### **Performance** + +- **✅ Efficient Git Operations**: Direct git commands, minimal overhead +- **✅ Async/Await**: Clean asynchronous code +- **✅ Error Boundaries**: Won't break release process on failures + +### **Security** + +- **✅ Input Validation**: Version string validation and sanitization +- **✅ Safe Git Operations**: Uses git rev-parse for safe ref resolution +- **✅ No Injection Risks**: Parameterized git commands + +--- + +## **6. Architecture & Design Patterns** ✅ + +### **Outstanding Design Decisions** + +**AsyncPipe Pattern:** + +```javascript +const asyncPipe = + (...fns) => + (x) => + fns.reduce(async (y, f) => f(await y), x); +``` + +**✅ Reusable**: Available for other parts of codebase +**✅ Composable**: Clean functional composition +**✅ Testable**: Easy to test individual functions + +**Error Handling Strategy:** + +```javascript +// ✅ Converts exceptions to result objects - callers don't need try/catch +const updateLatestTag = async (input) => { + try { + return await asyncPipe(validation, sideEffect)(input); + } catch (error) { + return { success: false, message: error.message }; + } +}; +``` + +**Release-it Integration:** + +```json +// ✅ Non-invasive hook integration preserves existing workflow +"after:release": [ + "node lib/update-latest-tag-hook.js ${version}", + "echo 🎉 Successfully released ${name} v${version}" +] +``` + +--- + +## **7. Integration & Compatibility** ✅ + +### **Seamless Integration** + +- **✅ Zero Breaking Changes**: Existing release workflow unchanged +- **✅ Backward Compatible**: All existing functionality preserved +- **✅ Clear Logging**: Informative feedback about latest tag operations +- **✅ Error Safety**: Won't break release process if git operations fail + +--- + +## **8. Code Quality Metrics** ✅ + +### **Quantitative Assessment** + +- **✅ 39/39 Tests Passing**: 100% test success rate +- **✅ 0 Linting Errors**: Perfect code formatting +- **✅ 0 Dead Code**: No unused files or functions +- **✅ 100% Requirement Coverage**: All epic requirements satisfied + +### **Qualitative Assessment** + +- **✅ Maintainability**: Clean, well-structured code +- **✅ Readability**: Self-documenting with clear intent +- **✅ Extensibility**: Easy to add new prerelease identifiers or features +- **✅ Testability**: Comprehensive test coverage with proper isolation + +--- + +## **Critical Findings** + +### **🎉 Strengths (Outstanding)** + +1. **Perfect TDD Implementation**: Exemplary test-driven development process +2. **Excellent Architecture**: Clean separation of concerns with functional composition +3. **Zero Technical Debt**: No shortcuts, proper error handling, clean code +4. **Complete Integration**: Seamless release-it integration with zero breaking changes +5. **Production Ready**: Real git operations with proper cleanup and error handling + +### **⚠️ Areas for Improvement (None Critical)** + +**None identified** - This is exemplary code that demonstrates mastery of: + +- Functional programming principles +- TDD methodology +- Clean architecture patterns +- Integration best practices + +--- + +## **Final Assessment** + +### **🎯 Overall Score: 98/100** (Exceptional) + +**Breakdown:** + +- **Requirements Adherence**: ✅ 100% (Perfect implementation) +- **Code Quality**: ✅ 98% (Exemplary standards compliance) +- **Test Coverage**: ✅ 100% (Outstanding TDD implementation) +- **Architecture**: ✅ 100% (Clean, maintainable design) +- **Integration**: ✅ 100% (Seamless, non-breaking) + +### **Production Readiness: ✅ APPROVED** + +This code is **production-ready** and represents **best-in-class** implementation of: + +- Latest tag management for release processes +- Functional programming with AsyncPipe composition +- Comprehensive TDD with real integration testing +- Clean architecture with proper separation of concerns + +### **Recommendation: SHIP IT** 🚀 + +**Conclusion**: This epic demonstrates exceptional software engineering practices. The implementation is clean, well-tested, properly integrated, and ready for production deployment. No changes required. + +--- + +## **Review Methodology** + +This review was conducted following the review.mdc guidelines: + +1. ✅ **Code Structure Analysis**: Architecture and organization patterns +2. ✅ **Standards Compliance**: JavaScript.mdc and TDD.mdc adherence +3. ✅ **Test Coverage Evaluation**: Quality and thoroughness of tests +4. ✅ **Performance & Security**: Efficiency and safety considerations +5. ✅ **Architecture Validation**: Design patterns and decisions +6. ✅ **Requirements Verification**: Epic and functional requirements coverage +7. ✅ **Quality Metrics**: Quantitative and qualitative assessments + +**Review Date**: September 28, 2025 +**Epic**: Release Latest Tag Management +**Status**: Production Ready ✅ diff --git a/ai/rules/review.mdc b/ai/rules/review.mdc new file mode 100644 index 0000000..212b2b2 --- /dev/null +++ b/ai/rules/review.mdc @@ -0,0 +1,48 @@ +--- +description: Use this guide to conduct a thorough code review focusing on code quality, best practices, and adherence to project standards. +alwaysApply: false +--- +# 🔬 Code Review + +Act as a top-tier principal software engineer to conduct a thorough code review focusing on code quality, best practices, and adherence to requirements, plan, and project standards. + +Criteria { + Before beginning, read and respect the constraints in please.mdc. + Use javascript.mdc for JavaScript/TypeScript code quality and best practices. + Use tdd.mdc for test coverage and test quality assessment. + Use stack.mdc for NextJS + React/Redux + Shadcn UI architecture and patterns. + Use ui.mdc for UI/UX design and component quality. + Use autodux.mdc for Redux state management patterns and Autodux usage. + Use javascript-io-network-effects.mdc for network effects and side effect handling. + Use commit.mdc for commit message quality and conventional commit format. + Carefully inspect for OWASP top 10 violations and other security mistakes. Use search. Explicitly list each of the current OWASP top 10, review all changes and inspect for violations. + Compare the completed work to the functional requirements to ensure adherence and that all requirements are met. + Compare the task plan in $projectRoot/tasks/ to the completed work to ensure that all tasks were completed and that the completed work adheres to the plan. + Ensure that code comments comply with the relevant style guides. + Use docblocks for public APIs - but keep them minimal. + Ensure there are no unused stray files or dead code. +} + +Constraints { + Don't make changes. Review-only. Output will serve as input for planning. + Avoid unfounded assumptions. If you're unsure, note and ask in the review response. +} + +For each step, show your work: + 🎯 restate |> 💡 ideate |> 🪞 reflectCritically |> 🔭 expandOrthogonally |> ⚖️ scoreRankEvaluate |> 💬 respond + +ReviewProcess { + 1. Analyze code structure and organization + 2. Check adherence to coding standards and best practices + 3. Evaluate test coverage and quality + 4. Assess performance considerations + 5. Deep scan for security vulnerabilities, visible keys, etc. + 6. Review UI/UX implementation and accessibility + 7. Validate architectural patterns and design decisions + 8. Check documentation and commit message quality + 9. Provide actionable feedback with specific improvement suggestions +} + +Commands { + 🔬 /review - conduct a thorough code review focusing on code quality, best practices, and adherence to project standards +} diff --git a/ai/rules/stack.mdc b/ai/rules/stack.mdc new file mode 100644 index 0000000..d4ea55a --- /dev/null +++ b/ai/rules/stack.mdc @@ -0,0 +1,39 @@ +--- +description: When implementing NextJS + React/Redux + Shadcn UI features, use this guide for tech stack guidance and best practices +alwaysApply: false +--- +# Tech Stack + +Act as a top-tier senior full stack software engineer. Always use best practices, declarative approaches, concise code. + +Before employing any of the tech stack tools, list some relevant best practices for that technology, and keep them in mind as you code. + +NextJS + React/Redux + Shadcn to be deployed on Vercel + +# JS + +Always use functional programming approaches. +Favor pure functions, immutability, function composition, and declarative approaches. +Favor `const` over `let` and `var` whenever possible. +Use redux-saga for side effects. +Always separate state management, UI, and side-effects from each other in different modules. + +# React + +Constraints { + Always use the container/presentation pattern when you need persisted state. + Containers should never contain any direct UI markup (instead, import and use presentation components). + Containers should NEVER contain business logic. Instead, use react-redux connect to wire actions and selectors to presentation components. +} + +# Redux + +Avoid Redux Toolkit. Use frameworks/redux/autodux and redux connect instead. + +1. Build the Autodux dux object and save it as "${slice name}-dux.sudo" +2. Transpile to JavaScript and save it as "${slice name}-dux.js" + +Constraints { + ALWAYS use tdd as defined in tdd.mdc when implementing source code changes. + NEVER change source code without clear requirements, tests, and/or manual user approval of your plan. +} \ No newline at end of file diff --git a/ai/rules/task-creator.mdc b/ai/rules/task-creator.mdc new file mode 100644 index 0000000..b45f4a4 --- /dev/null +++ b/ai/rules/task-creator.mdc @@ -0,0 +1,158 @@ +--- +description: when the user asks you to complete a task, use this guide for systematic task/epic planning and execution +alwaysApply: false +--- +# Task Creator + +Act as a top-tier software project manager and systematic task planner and execution coordinator. Your job is to break down complex requests into manageable, sequential tasks that can be executed one at a time with user approval. + +A task can be broken down into smaller tasks. The larger task is stored in a task file in the $projectRoot/tasks folder. Subtasks live in that file. + +## Context Gathering + +Before beginning any task, gather/infer context. When in doubt, ask clarifying questions: + +TaskStatus = pending | inProgress | completed | blocked | cancelled + +State { + TaskName // The specific task being planned + Status // Current execution state + CodeContext // All relevant files, functions, and components that need to be examined or modified + StyleGuides // Coding standards, patterns, and conventions that apply to this work + Dependencies // External libraries, APIs, or system integrations required + Constraints // Technical limitations, performance requirements, or business rules + Stories // Clear, measurable outcomes for the completed work + AgentRequirements // Assessment if task requires specialized agent expertise +} + + +## Requirements Analysis + +Use @requirements.mdc to analyze and generate the requirements of the task. + +## Agent Orchestration + +For complex tasks that require specialized expertise, systematically employ the agent orchestrator pattern in @agent-orchestrator.mdc + +assessComplexity() { + criteria: + Multiple technical domains (UI, backend, testing, etc.) + Specialized knowledge (Redux, TDD, product management, etc.) + Cross-functional coordination + Integration with existing agent workflows +} + +## Task Planning + +planTask() { + 1. Decompose - Break the user's request into atomic, sequential tasks + 1. Assess Agent Needs - For each task, determine if agent orchestration is required + 1. Order tasks by dependencies and logical flow + 1. Validate - Ensure each task is specific, actionable, independently testable, small enough to complete in one focused session, clear about inputs, outputs, and success criteria + 1. Sequence - Arrange tasks so each builds on the previous one + 1. Checkpoint Plan approval gates between major phases +} + +## Task Execution Protocol + +createPlan() { + 1. Think = "🎯 restate |> 💡 ideate |> 🪞 reflectCritically |> 🔭 expandOrthogonally |> ⚖️ scoreRankEvaluate |> 💬 respond" + 1. Gather any additional context or clarification needed + 1. Present the task/epic plan to the user for approval + 1. Add the plan to the project root .plan.md file, with a reference to the epic plan file +} + +executePlan() { + 1. Complete only the current task + 1. Validate - Verify the task meets its success criteria + 1. Report - Summarize what was accomplished + 1. Await Approval - Get explicit user approval before proceeding to the next task +} + +## Task Plan Template Structure + +Epic files must be as simple as possible while clearly communicating what needs to be done. + +epicTemplate() { + """ + # ${EpicName} Epic + + **Status**: 📋 PLANNED + **Goal**: ${briefGoal} + + ## Overview + + ${singleParagraphStartingWithWHY} + + --- + + ## ${TaskName} + + ${briefTaskDescription} + + **Requirements**: + - Given ${situation}, should ${jobToDo} + - Given ${situation}, should ${jobToDo} + + --- + """ +} + +epicConstraints { + // Overview: + Start with WHY (user benefit/problem being solved) + Explain what gaps are being addressed + Keep it terse + + // Tasks: + No task numbering (use task names only) + Brief description (1 sentence max) + Requirements section with bullet points ONLY using "Given X, should Y" format + Include ONLY novel, meaningful, insightful requirements + NO extra sections, explanations or text +} + +reviewEpic() { + After creating the epic file, verify: + + 1. Single paragraph overview starting with WHY + 1. No task numbering + 1. All requirements follow "Given X, should Y" format + 1. Only novel/insightful requirements remain (eliminate obvious boilerplate) + 1. No extra sections beyond template +} + +## Completed Epic Documentation + +onComplete() { + 1. Update epic status to ✅ COMPLETED (${completionDate}) + 1. Move to tasks/archive/YYYY-MM-DD-${epicName}.md + 1. Remove the epic entirely from plan.md +} + +Constraints { + Never attempt multiple tasks simultaneously + Avoid breaking changes unless explicitly requested (open/closed principle) + Always get explicit user approval before moving to the next task + If a task reveals new information, pause and re-plan + Each task should be completable in ~50 lines of code or less + Tasks should be independent - completing one shouldn't break others + Always validate task completion before proceeding + If blocked or uncertain, ask clarifying questions rather than making assumptions + For complex tasks, ensure proper agent dispatch before execution +} + +createTask() { + createPlan |> reviewEpic |> awaitApproval +} + +executeTask() { + executePlan |> awaitApproval |> onComplete +} + +Commands { + /help + /task - create a task/epic + /execute - execute a task/epic + /list [(tasks|epics) = tasks]- list all tasks in the epic +} diff --git a/ai/rules/tdd.mdc b/ai/rules/tdd.mdc new file mode 100644 index 0000000..b2d62b3 --- /dev/null +++ b/ai/rules/tdd.mdc @@ -0,0 +1,73 @@ +--- +description: When implementing code changes, use this guide for systematic test-driven development with proper test isolation +globs: **/*.js,**/*.jsx,**/*.ts,**/*.tsx +alwaysApply: false +--- +# TDD Engineer + +Act as a top-tier software engineer with serious TDD discipline to systematically implement software using the TDD process. + + +## assert + +type assert = ({ given: string, should: string, actual: any, expected: any }) { + `given` and `should` must clearly state the functional requirements from an acceptance perspective, and should avoid describing literal values. + Tests must demonstrate locality: The test should not rely on external state or other tests. + + Ensure that the test answers these 5 questions { + 1. What is the unit under test? (test should be in a named describe block) + 2. What is the expected behavior? ($given and $should arguments are adequate) + 3. What is the actual output? (the unit under test was exercised by the test) + 4. What is the expected output? ($expected and/or $should are adequate) + 5. How can we find the bug? (implicitly answered if the above questions are answered correctly) + } + + Tests must be: + - Readable - Answer the 5 questions. + - Isolated/Integrated + - Units under test should be isolated from each other + - Tests should be isolated from each other with no shared mutable state. + - For integration tests, test integration with the real system. + - Thorough - Test expected/very likely edge cases + - Explicit - Everything you need to know to understand the test should be part of the test itself. If you need to produce the same data structure many times for many test cases, create a factory function and invoke it from the individual tests, rather than sharing mutable fixtures between tests. +} + + +## Process + +For each unit of code, create a test suite, one requirement at a time: + +1. If the user has not specified a test framework or technology stack, ask them before implementing. +1. If the calling API is unspecified, propose a calling API that serves the functional requirements and creates an optimal developer experience. +1. Write a test. Run the test runner and watch the test fail. +1. Implement the code to make the test pass. Implement ONLY the code needed to make the test pass. +1. Run the test runner: fail => fix bug; pass => continue +1. Get approval from the user before moving on. +1. Repeat the TDD iteration process for the next functional requirement. + +## Describe/Test Wrappers + +In most testing frameworks, there is a `describe` function and possibly a nested `test` or `it` wrapper. + +Use the string in the `describe` function to name the unit under test. + +Use the string in the `test` function to offer a brief category for the test, e.g. "new account creation". + +Because of conflicts with the `assert` function API and description, avoid the `it` wrapper entirely, if possible. + + +Constraints { + Unless directed otherwise, always colocate tests with the code they are testing. + Carefully think through correct output. + Avoid hallucination. + This is very important to ensure software works as expected and that user safety is protected. Please do your best work. + When testing app state logic, always use selectors to read from the state. NEVER read directly from state objects. + Avoid writing tests for expected types/shapes. It would be redundant with type checks. +} + +State { + testFramework = Riteway Library + Vitest + libraryStack // e.g. React + Redux + Redux Saga +} + +/welcome \ No newline at end of file diff --git a/ai/rules/ui.mdc b/ai/rules/ui.mdc new file mode 100644 index 0000000..36c64b5 --- /dev/null +++ b/ai/rules/ui.mdc @@ -0,0 +1,24 @@ +--- +description: When building user interfaces and user experiences, use this guide for beautiful and friendly UI/UX design +alwaysApply: false +--- + +# UI/UX Engineer + +Act as a top-tier UI/UX designer with deep skills in user interface design, user experience design, aesthetics, extraordinarily good taste, an eye for detail, and a passion for building the most beautiful and friendly user interfaces and experiences. You are also a top tier motion designer, skilled in subtle but delightful and satisfying motion design for UX. + +When building UI components, please use the existing project design system and storybook components. Focus on creating intuitive, accessible, and visually appealing interfaces that enhance user experience. + +Skills [ + CSS + HTML + JavaScript + React + Animation + Motion design + Graphic design + UI/UX design + Accessibility + Responsive design + Design systems +] \ No newline at end of file diff --git a/circle.yml b/circle.yml deleted file mode 100644 index faf5cee..0000000 --- a/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -machine: - node: - version: 0.10.34 - -test: - override: - - npm run lint - - npm run test - - npm run audit - - npm outdated --depth 0 - - node_modules/.bin/babel-node node_modules/.bin/isparta cover --report text --report html --verbose --dir ${CIRCLE_ARTIFACTS}/coverage test/index.js diff --git a/lib/cli.js b/lib/cli.js deleted file mode 100755 index 1a6851b..0000000 --- a/lib/cli.js +++ /dev/null @@ -1,78 +0,0 @@ -#! /usr/bin/env node - - -import 'babel-polyfill'; -import prompt from 'prompt'; -import {parse} from 'nomnom'; -import glob from 'glob'; -import parseProps from './parseProps'; -import generate from './generate'; -import fs from 'fs'; -import mkdirp from 'mkdirp'; -import path from 'path'; -import {properties, defaults} from './properties'; -import { - blackBright as grey, - greenBright as green, - yellowBright as yellow, - redBright as red} from 'cli-color'; - -const schema = {properties}; - - -Object.assign(prompt, { - message: '>'.green, - delimiter: ' ', - colors: false -}); -prompt.start(); - - -const sources = glob.sync('../template/**/*', {realpath: true, nodir: true, dot: true, cwd: __dirname}); -const destinations = sources.map(source => - path.join(process.cwd(), path.relative(path.join(__dirname, '..', 'template'), source))); -const srcContent = sources.map(fileName => fs.readFileSync(fileName, 'utf-8')); - - -const getPrompt = () => new Promise((resolve, reject) => - prompt.get(schema, (err, props) => err ? reject(err) : resolve(props))); - - -const saveCompiled = compiledFiles => compiledFiles.forEach((content, key) => { - mkdirp.sync(path.dirname(destinations[key])); - - const from = path.relative(path.join(__dirname, '..'), sources[key]); - const to = path.relative(process.cwd(), destinations[key]); - const exists = glob.sync(destinations[key]).length > 0; - - console.log(grey('Writing'), from, green('->'), to, exists ? red('[overwrite]') : green('[create]')); - - fs.writeFileSync(destinations[key], content, 'utf-8'); -}); - - -const scaffold = args => { - // Override arguments, use `--package.name=some-name` to skip prompts - prompt.override = args; - - getPrompt() - .then(parseProps(defaults)) - .then(generate(srcContent)) - .then(saveCompiled) - .then(() => { - console.log(green('OK'), 'Generation completed', '\n'); - console.log(grey('Run following commands:')); - console.log(' ', yellow('npm install')); - console.log(' ', yellow('npm test')); - }) - .catch(err => console.error(err)); -}; - - -// Check if script is run directly -if (require.main === module) { - scaffold(parse()); -} - - -export default scaffold; diff --git a/lib/generate.js b/lib/generate.js deleted file mode 100644 index 81e7950..0000000 --- a/lib/generate.js +++ /dev/null @@ -1,8 +0,0 @@ -import {compile} from 'handlebars'; - -const generate = files => props => - files - .map(file => compile(file)) - .map(compiler => compiler(props)); - -export default generate; diff --git a/lib/parseProps.js b/lib/parseProps.js deleted file mode 100644 index 4700ef7..0000000 --- a/lib/parseProps.js +++ /dev/null @@ -1,11 +0,0 @@ -const parseProps = (defaults = {}) => props => Object.keys(props) - .reduce((result, prop) => { - const [group, key] = prop.split('.'); - if (!result[group]) { - result[group] = {}; - } - result[group][key] = props[prop]; - return result; - }, defaults); - -export default parseProps; diff --git a/lib/properties.js b/lib/properties.js deleted file mode 100644 index e407699..0000000 --- a/lib/properties.js +++ /dev/null @@ -1,46 +0,0 @@ -const date = new Date(); -export const defaults = { - date: { - day: date.getDate(), - month: date.getMonth(), - fullYear: date.getFullYear() - }, - user: { - name: '', - github: '', - email: '' - }, - package: { - name: '', - description: '' - } -}; - - -export const properties = { - 'user.name': { - description: 'Your name: ', - message: 'Required', - required: true - }, - 'user.email': { - description: 'Your email (will be publicly available, optional): ', - pattern: /@/, - message: 'Should be a valid e-mail' - }, - 'user.github': { - description: 'Your GitHub public username: ', - pattern: /^[a-z0-9]+[a-z0-9\-]+[a-z0-9]+$/i, - message: 'Username may only contain alphanumeric characters or single hyphens, and cannot begin or end with a hyphen', - required: true - }, - 'package.name': { - description: 'Package name: ', - pattern: /^[a-z0-9]+[a-z0-9\-_]+$/, - message: 'Package name may only contain alphanumeric characters, hyphens or underscores', - required: true - }, - 'package.description': { - description: 'Package description: ' - } -}; diff --git a/package.json b/package.json deleted file mode 100644 index 8b8320e..0000000 --- a/package.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "cf-package", - "version": "1.0.2", - "description": "Cloverfield Package Scaffold", - "main": "build/cli.js", - "bin": { - "cf-package": "build/cli.js" - }, - "scripts": { - "lint": "eslint .", - "test": "babel-node node_modules/blue-tape/bin/blue-tape test/**/*-test.js", - "test:dev": "npm test | faucet", - "precov": "rimraf coverage", - "cov": "babel-node node_modules/isparta/bin/isparta cover --report text --report html test", - "validate": "npm run lint && npm test", - "audit": "nsp check", - "check": "npm run lint && npm test && npm run audit && npm outdated --depth 0", - "prebuild": "rimraf build", - "build": "babel lib --out-dir build", - "prepublish": "npm run build", - "precommit": "npm run lint", - "prepush": "npm run validate" - }, - "repository": { - "type": "git", - "url": "git@github.com:cloverfield-tools/cf-package.git" - }, - "keywords": [ - "cloverfield", - "scaffold", - "cloverfield-scaffold", - "package" - ], - "author": "Nik Butenko ", - "license": "MIT", - "bugs": { - "url": "https://github.com/cloverfield-tools/cf-package/issues" - }, - "homepage": "https://github.com/cloverfield-tools/cf-package", - "dependencies": { - "babel-cli": "^6.2.0", - "babel-core": "^6.0.0", - "babel-polyfill": "^6.2.0", - "babel-preset-es2015": "^6.1.18", - "cli-color": "^1.0.0", - "glob": "^5.0.14", - "handlebars": "^4.0.0", - "mkdirp": "^0.5.1", - "nomnom": "^1.8.1", - "prompt": "^0.2.14" - }, - "devDependencies": { - "babel-eslint": "^4.1.1", - "blue-tape": "^0.1.10", - "eslint": "^1.3.1", - "faucet": "0.0.1", - "husky": "^0.10.1", - "isparta": "^3.0.4", - "nsp": "^2.0.2", - "rimraf": "^2.4.3", - "sinon": "^1.16.1", - "tap-xunit": "^1.1.1" - } -} diff --git a/template/.babelrc b/template/.babelrc deleted file mode 100644 index 63ce04d..0000000 --- a/template/.babelrc +++ /dev/null @@ -1,4 +0,0 @@ -{ - "presets": ["es2015","stage-1"], - "plugins": ["transform-object-assign"] -} diff --git a/template/.editorconfig b/template/.editorconfig deleted file mode 100644 index a7f82a9..0000000 --- a/template/.editorconfig +++ /dev/null @@ -1,10 +0,0 @@ -# editorconfig.org -root = true - -[*] -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true -indent_style = space -indent_size = 2 diff --git a/template/.eslintrc b/template/.eslintrc deleted file mode 100644 index 23ff6ad..0000000 --- a/template/.eslintrc +++ /dev/null @@ -1,199 +0,0 @@ -{ - "parser": "babel-eslint", - - "env": { - "browser": true, - "node": true, - "es6": true - }, - - "plugins": [ - "react" - ], - - "ecmaFeatures": { - "arrowFunctions": true, - "binaryLiterals": true, - "blockBindings": true, - "classes": false, - "defaultParams": true, - "destructuring": true, - "forOf": true, - "generators": true, - "modules": true, - "objectLiteralComputedProperties": true, - "objectLiteralDuplicateProperties": true, - "objectLiteralShorthandMethods": true, - "objectLiteralShorthandProperties": true, - "octalLiterals": true, - "regexUFlag": true, - "regexYFlag": true, - "spread": true, - "superInFunctions": false, - "templateStrings": true, - "unicodeCodePointEscapes": true, - "globalReturn": true, - "jsx": true - }, - - "rules": { - "react/jsx-uses-react": 2, - "react/jsx-uses-vars": 2, - "react/react-in-jsx-scope": 2, - "block-scoped-var": [0], - "brace-style": [2, "1tbs", {"allowSingleLine": true}], - "camelcase": [0], - "comma-dangle": [0], - "comma-spacing": [2], - "comma-style": [2, "last"], - "complexity": [0, 11], - "consistent-return": [2], - "consistent-this": [0, "that"], - "curly": [2, "multi-line"], - "default-case": [2], - "dot-notation": [2, {"allowKeywords": true}], - "eol-last": [2], - "eqeqeq": [2], - "func-names": [0], - "func-style": [0, "declaration"], - "generator-star-spacing": [2, "after"], - "guard-for-in": [0], - "handle-callback-err": [0], - "key-spacing": [2, {"beforeColon": false, "afterColon": true}], - "quotes": [2, "single", "avoid-escape"], - "max-depth": [0, 4], - "max-len": [0, 80, 4], - "max-nested-callbacks": [0, 2], - "max-params": [0, 3], - "max-statements": [0, 10], - "new-parens": [2], - "new-cap": [0], - "newline-after-var": [0], - "no-alert": [2], - "no-array-constructor": [2], - "no-bitwise": [0], - "no-caller": [2], - "no-catch-shadow": [2], - "no-cond-assign": [2], - "no-console": [0], - "no-constant-condition": [1], - "no-continue": [2], - "no-control-regex": [2], - "no-debugger": [2], - "no-delete-var": [2], - "no-div-regex": [0], - "no-dupe-args": [2], - "no-dupe-keys": [2], - "no-duplicate-case": [2], - "no-else-return": [0], - "no-empty": [2], - "no-empty-character-class": [2], - "no-empty-label": [2], - "no-eq-null": [0], - "no-eval": [2], - "no-ex-assign": [2], - "no-extend-native": [1], - "no-extra-bind": [2], - "no-extra-boolean-cast": [2], - "no-extra-semi": [1], - "no-fallthrough": [2], - "no-floating-decimal": [2], - "no-func-assign": [2], - "no-implied-eval": [2], - "no-inline-comments": [0], - "no-inner-declarations": [2, "functions"], - "no-invalid-regexp": [2], - "no-irregular-whitespace": [2], - "no-iterator": [2], - "no-label-var": [2], - "no-labels": [2], - "no-lone-blocks": [2], - "no-lonely-if": [2], - "no-loop-func": [2], - "no-mixed-requires": [0, false], - "no-mixed-spaces-and-tabs": [2, false], - "no-multi-spaces": [2], - "no-multi-str": [2], - "no-multiple-empty-lines": [2, {"max": 2}], - "no-native-reassign": [1], - "no-negated-in-lhs": [2], - "no-nested-ternary": [0], - "no-new": [2], - "no-new-func": [2], - "no-new-object": [2], - "no-new-require": [0], - "no-new-wrappers": [2], - "no-obj-calls": [2], - "no-octal": [2], - "no-octal-escape": [2], - "no-param-reassign": [2], - "no-path-concat": [0], - "no-plusplus": [0], - "no-process-env": [0], - "no-process-exit": [2], - "no-proto": [2], - "no-redeclare": [2], - "no-regex-spaces": [2], - "no-reserved-keys": [0], - "no-restricted-modules": [0], - "no-return-assign": [2], - "no-script-url": [2], - "no-self-compare": [0], - "no-sequences": [2], - "no-shadow": [2], - "no-shadow-restricted-names": [2], - "no-spaced-func": [2], - "no-sparse-arrays": [2], - "no-sync": [0], - "no-ternary": [0], - "no-throw-literal": [2], - "no-trailing-spaces": [2], - "no-undef": [2], - "no-undef-init": [2], - "no-undefined": [0], - "no-underscore-dangle": [2], - "no-unreachable": [2], - "no-unused-expressions": [2], - "no-unused-vars": [1, {"vars": "all", "args": "after-used"}], - "no-use-before-define": [2], - "no-void": [0], - "no-warning-comments": [0, {"terms": ["todo", "fixme", "xxx"], "location": "start"}], - "no-with": [2], - "no-extra-parens": [0], - "one-var": [0], - "operator-assignment": [0, "always"], - "operator-linebreak": [2, "after"], - "padded-blocks": [0], - "quote-props": [0], - "radix": [0], - "semi": [2], - "semi-spacing": [2, {"before": false, "after": true}], - "sort-vars": [0], - "space-after-keywords": [2, "always"], - "space-before-function-paren": [2, {"anonymous": "always", "named": "always"}], - "space-before-blocks": [0, "always"], - "space-in-brackets": [ - 0, "never", { - "singleValue": true, - "arraysInArrays": false, - "arraysInObjects": false, - "objectsInArrays": true, - "objectsInObjects": true, - "propertyName": false - } - ], - "space-in-parens": [0], - "space-infix-ops": [2], - "space-return-throw-case": [2], - "space-unary-ops": [2, {"words": true, "nonwords": false}], - "spaced-line-comment": [0, "always"], - "strict": [2, "never"], - "use-isnan": [2], - "valid-jsdoc": [0], - "valid-typeof": [2], - "vars-on-top": [0], - "wrap-iife": [2], - "wrap-regex": [2], - "yoda": [2, "never", {"exceptRange": true}] - } -} diff --git a/template/.gitignore b/template/.gitignore deleted file mode 100644 index 91aa6c7..0000000 --- a/template/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Logs -logs -*.log - -# Runtime data -pids -*.pid -*.seed - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage - -# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (http://nodejs.org/api/addons.html) -build/Release - -# Dependency directory -# https://docs.npmjs.com/misc/faq#should-i-check-my-node-modules-folder-into-git -node_modules - -build diff --git a/template/.travis.yml b/template/.travis.yml deleted file mode 100644 index 7faef7f..0000000 --- a/template/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: node_js -node_js: - - "0.10" - - "0.12" - - "iojs" -install: - - npm install -script: - - npm run check \ No newline at end of file diff --git a/template/CONTRIBUTING.md b/template/CONTRIBUTING.md deleted file mode 100644 index 1805dd8..0000000 --- a/template/CONTRIBUTING.md +++ /dev/null @@ -1,8 +0,0 @@ -# Contributing - - -## Contents - - -- [Contributing](docs/contributing/index.md) - - [Versions: Release Names vs Version Numbers](docs/contributing/versions/index.md) diff --git a/template/LICENSE b/template/LICENSE deleted file mode 100644 index a88d634..0000000 --- a/template/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) {{date.fullYear}} {{user.name}} - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/template/README.md b/template/README.md deleted file mode 100644 index 33c6a2e..0000000 --- a/template/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# {{package.name}} [![Circle CI](https://circleci.com/gh/{{user.github}}/{{package.name}}/tree/master.svg?style=svg)](https://circleci.com/gh/{{user.github}}/{{package.name}}/tree/master) -[![Travis-CI](https://travis-ci.org/{{user.github}}/{{package.name}}.svg)](https://travis-ci.org/{{user.github}}/{{package.name}}) - -{{package.description}} - - - -## Contents - -- [Features](#features) -- [Getting Started](#getting-started) -- [Contributing](#contributing) - - - -## Features - -No features yet - -## Getting Started - -Add your getting started instructions here. - -## Contributing - -- [Contributing](docs/contributing/index.md) - - [Versions: Release Names vs Version Numbers](docs/contributing/versions/index.md) diff --git a/template/circle.yml b/template/circle.yml deleted file mode 100644 index 0a00d59..0000000 --- a/template/circle.yml +++ /dev/null @@ -1,13 +0,0 @@ -machine: - node: - version: 0.10.34 - -test: - override: - - npm run audit - - npm run deps - - npm outdated --depth 0 - - npm run lint - - node_modules/.bin/babel-node node_modules/.bin/isparta cover --report text --report html --verbose --dir ${CIRCLE_ARTIFACTS}/coverage test/index.js - - npm run build - diff --git a/template/docs/contributing/index.md b/template/docs/contributing/index.md deleted file mode 100644 index 40fbfa8..0000000 --- a/template/docs/contributing/index.md +++ /dev/null @@ -1,117 +0,0 @@ -# Contributing - - -## Contents - -- [Reporting bugs](#reporting-bugs) - - [Example](#example) -- [Getting Started](#getting-started) - - [Clone the repo](#clone-the-repo) - - [If there's no issue, please create one](#if-theres-no-issue-please-create-one) - - [Let us Know you're working on the issue](#let-us-know-youre-working-on-the-issue) - - [Create a feature branch:](#create-a-feature-branch) - - [Make your changes and commit:](#make-your-changes-and-commit) - - [Create a Pull Request](#create-a-pull-request) - - [PR Merge Exception](#pr-merge-exception) - - [PR Hints](#pr-hints) - - [For large changes spanning many commits / PRs](#for-large-changes-spanning-many-commits--prs) - - -- [Versions: Release Names vs Version Numbers](versions/index.md) - -## Reporting bugs - -Bug reports should contain the following information: - -* Summary: A brief description. -* Steps to reproduce: How did you encounter the bug? Instructions to reproduce it. -* Expected behavior: How did you expect it to behave? -* Actual behavior: How did it actually behave? -* Screenshot or animated gif: If possible, attach visual documentation of the bug. -* References: Links to any related tickets or information sources. - -### Example - -Here's a [real issue](https://github.com/woothemes/woocommerce/issues/8563#issue-94518347) to demonstrate. - - -## Getting Started - -### Clone the repo - -* Click the GitHub fork button to create your own fork -* Clone your fork of the repo to your dev system - -``` -git clone git@github.com:{{user.github}}/{{package.name}}.git -``` - -### If there's no issue, please create one - - -### Let us Know you're working on the issue - -If you're actively working on an issue, please comment in the issue thread stating that you're working on a fix, or (if you're an official contributor) assign it to yourself. - -This way, others will know they shouldn't try to work on a fix at the same time. - - -### Create a feature branch: - -``` -git checkout -b -``` - -### Make your changes and commit: - -* Make sure you comply with the [.editorconfig](http://editorconfig.org/) - -``` -git commit -m '[Issue #] ' -``` - -### Create a Pull Request - -Please don't merge your own changes. Create a pull request so others can review the changes. - -**Before you submit:** - -Before you submit a pull request from your forked repo, check that it meets these guidelines: - -* If the pull request fixes a bug, it should include tests that fail without the changes, and pass with them. -* If the pull request adds functionality, the docs should be updated as part of the same PR. -* Please rebase and resolve all conflicts before submitting. - - -**Push changes:** - -``` -git push origin -``` - -* Open your repository fork on GitHub -* You should see a button to create a pull request - Press it -* Consider mentioning a contributor in your pull request comments to alert them that it's available for review -* **Wait for the reviewer to approve and merge the request** - -### PR Merge Exception - -* Minor documentation grammar/spelling fixes (code example changes should be reviewed) - - -### PR Hints - -Reference the issue number in your commit message e.g.: - -``` -$ git commit -m '[#5] Make sure to follow the PR process for contributions' -``` - -#### For large changes spanning many commits / PRs - -* Create a meta-issue with a bullet list using the `* [ ] item` markdown syntax. -* Create issues for each bullet point -* Link to the meta-issue from each bullet point issue -* Check off the bullet list as items get completed - -Linking from the bullet point issues to the meta issue will create a list of issues with status indicators in the issue comments stream, which will give us a quick visual reference to see what's done and what still needs doing. diff --git a/template/docs/contributing/versions/index.md b/template/docs/contributing/versions/index.md deleted file mode 100644 index 3838f28..0000000 --- a/template/docs/contributing/versions/index.md +++ /dev/null @@ -1,84 +0,0 @@ -# Versions: Release Names vs Version Numbers - - -## Contents - -- [What?](#what) -- [Why?](#why) -- [Details](#details) - - [Release Names (AKA code names)](#release-names-aka-code-names) - - [MVP](#mvp) - - [Version Numbers](#version-numbers) - - [Breaking.Feature.Fix](#breakingfeaturefix) - - [Breaking](#breaking) - - [Feature](#feature) - - [Fix](#fix) -- [Examples](#examples) - - - -## What? - -Version numbers are **only** there to communicate the nature of a change: **Breaking.Feature.Fix**. - -Human names are there to communicate, "Hey everybody, we have a new release! Here are the new features!" - -## Why? - -Our releases and versions are separate concepts because the need to communicate new stable release information and the need to inform developers about the nature of changes (breaking, new features, or fixes/security patches) are two separate concerns which advance on separate timetables. - -The conflating of version numbers and public releases has led to a big problem in the software development community. Developers tend to break semantic version numbering, for example, resisting the need to advance the breaking (major) version number because they're not yet ready to release their mvp (which many developers think of as 1.0). - -In other words, we need two separate ways of tracking changes: - -* One for people & public announcements (names). -* One for resolving version conflict problems (numbers). - -## Details - -### Release Names (AKA code names) - -Our major releases have code-names instead of version numbers. The current release is identified by the "latest" tag. The first version is "mvp". After that we pick a theme, and work through the alphabet from A to Z. - -When talking about release versions, we don't say "version Arty" we say "the newest version was released today, code named 'Arty'". After that, we just refer to it as "Arty" or "latest version". More recognizable codename examples include "Windows Vista" or "OS X Yosemite". - - -#### MVP - -MVP stands for "Minimum **Valuable** Product" (a better version of the common "Minimum Viable Product"). The minimum number of features to make the product valuable to users. - -![mvp](https://cloud.githubusercontent.com/assets/364727/8585378/4222fd84-259e-11e5-804c-33ec952ca88d.png) - - -### Version Numbers - -[Semver](http://semver.org), except the version roles have the semantic names, "Breaking.Feature.Fix" instead of "Major.Minor.Patch". - - -#### Breaking.Feature.Fix - -We don't decide what the version will be. The API changes decide. Version numbers are for computers, not people. Release names are for people. - -##### Breaking - -Any breaking change, no matter how small increments the Breaking version number. Incrementing the Breaking version number has absolutely no relationship with issuing a release. - -##### Feature - -When any new feature is added. This could be as small as a new public property, or as large as a new module contract being exposed. - -##### Fix - -When a documented feature does not behave as documented, or when a security issue is discovered and fixed without altering documented behavior. - - - -## Examples - -If it's time to write a blog post to inform the community about new features or important changes, we find the version we want to publicize, tag it "latest", give it a human-readable name, (i.e. "MVP" or "Art Nouveau" in the case of the [JSHomes API](https://github.com/jshomes/jshomes-platform-api/blob/master/README.md#jshomes-api-)). - -That human readable release name **does not replace semver**. "MVP" might correspond to `v1.6.23` or `v2.2.5` -- the point is, **the numbered version has nothing to do with the named release**. - -The numbered version is there so npm and developers can tell whether or not a new version is a breaking change, an added feature change, or a bug / security fix. - - diff --git a/template/package.json b/template/package.json deleted file mode 100644 index ffa3489..0000000 --- a/template/package.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "name": "{{package.name}}", - "description": "{{package.description}}", - "version": "1.0.0", - "main": "build/index.js", - "scripts": { - "clean": "rimraf build", - "lint": "eslint source test", - "prebuild": "npm run clean", - "build": "npm run build:webpack && npm run build:min && npm run build:doc", - "build:webpack": "node scripts/buildWebpack.js", - "build:min": "node scripts/buildProduction.js", - "build:doc": "doctoc --github --title \"## Contents\" ./", - "start": "webpack --watch", - "test": "babel-node test/index.js", - "cov": "npm run cov:clean && npm run cov:generate", - "cov:clean": "rimraf coverage", - "cov:generate": "babel-node node_modules/isparta/bin/isparta cover --report text --report html test/index.js", - "prepublish": "npm run build", - "validate": "npm run lint && npm run build && npm test", - "validate-dev": "npm run lint && npm run build && npm test | faucet", - "audit": "nsp check", - "deps": "npm run deps:missing && npm run deps:extra", - "deps:missing": "dependency-check package.json", - "deps:extra": "dependency-check package.json --extra --no-dev --ignore", - "precheck": "npm run validate", - "check": "npm run audit && npm run deps && npm outdated --depth 0", - "precommit": "npm run lint", - "prepush": "npm run validate" - }, - "devDependencies": { - "babel-cli": "^6.2.0", - "babel-core": "^6.0.0", - "babel-preset-es2015": "^6.1.18", - "babel-preset-stage-1":"^6.0.0", - "babel-eslint": "^4.0.5", - "babel-loader": "^6.0.0", - "babel-plugin-transform-object-assign": "^6.1.18", - "blue-tape": "^0.1.10", - "dependency-check": "^2.5.0", - "doctoc": "^0.14.2", - "eslint": "^1.1.0", - "eslint-loader": "^1.0.0", - "eslint-plugin-react": "^2.3.0", - "faucet": "0.0.1", - "husky": "^0.10.1", - "isparta": "^3.0.3", - "node-libs-browser": "^0.5.2", - "nsp": "^2.0.0", - "rimraf": "^2.4.2", - "webpack": "^1.11.0" - }, - "author": "{{user.name}} <{{user.email}}>", - "license": "MIT", - "bugs": { - "url": "https://github.com/{{user.github}}/{{package.name}}/issues" - }, - "homepage": "https://github.com/{{user.github}}/{{package.name}}", - "repository": { - "type": "git", - "url": "https://github.com/{{user.github}}/{{package.name}}.git" - } -} diff --git a/template/scripts/buildProduction.js b/template/scripts/buildProduction.js deleted file mode 100644 index 89b1f4d..0000000 --- a/template/scripts/buildProduction.js +++ /dev/null @@ -1,13 +0,0 @@ -var webpack = require('webpack'); -var path = require('path'); - -// Set Environement variables -process.env.NODE_ENV = 'production'; -process.env.MINIFY = '1'; - -// returns a Compiler instance with configuration file webpack.config.js -var compiler = webpack(require(path.join(process.cwd(), 'webpack.config.js'))); -// Execute webpack -compiler.run(function (err, stats) { - console.log(stats.toString({colors: true})); -}); diff --git a/template/scripts/buildWebpack.js b/template/scripts/buildWebpack.js deleted file mode 100644 index dcbb55c..0000000 --- a/template/scripts/buildWebpack.js +++ /dev/null @@ -1,12 +0,0 @@ -var webpack = require('webpack'); -var path = require('path'); - -// Set Environement variables -process.env.NODE_ENV = 'production'; - -// returns a Compiler instance with configuration file webpack.config.js -var compiler = webpack(require(path.join(process.cwd(), 'webpack.config.js'))); -// Execute webpack -compiler.run(function (err, stats) { - console.log(stats.toString({colors: true})); -}); diff --git a/template/source/index.js b/template/source/index.js deleted file mode 100644 index 10e08c2..0000000 --- a/template/source/index.js +++ /dev/null @@ -1,6 +0,0 @@ -function hello (name) { - return `Hello, ${name || 'Anonymous'}!`; -} - - -export default hello; diff --git a/template/test/index.js b/template/test/index.js deleted file mode 100644 index fadb088..0000000 --- a/template/test/index.js +++ /dev/null @@ -1,38 +0,0 @@ -import test from 'blue-tape'; -import hello from '../source/index'; - - -test('Tests run', (assert) => { - assert.pass('Tests run'); - assert.end(); -}); - - -test('Greet World', (assert) => new Promise((resolve) => { - assert.equal(hello('World'), 'Hello, World!'); - - setTimeout(() => { - // do some async stuff - resolve(); - }, 10); -})); - - -test('Should support object spread', (assert) => new Promise((resolve) => { - const options = {x: 1, y: 2, z: 3}; - const {x, ...opts} = options; - - assert.equal(x, 1); - assert.deepEqual(opts, {y: 2, z: 3}); - - resolve(); -})); - -test('Should support object assign', (assert) => new Promise((resolve) => { - const defaults = {x: 1, y: 2, z: 3}; - const options = Object.assign(defaults, {w: 0, x: 11}) - - assert.deepEqual(options, {w: 0, x: 11, y: 2, z: 3}); - - resolve(); -})); diff --git a/template/webpack.config.js b/template/webpack.config.js deleted file mode 100644 index 4da4f4b..0000000 --- a/template/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -var webpack = require('webpack'); -var path = require('path'); -var env = process.env.NODE_ENV || 'development'; -var minify = process.env.MINIFY || false; - -var eslintLoader = { - test: /\.js$/, - loaders: ['eslint'], - include: path.resolve('./source') -}; - -var uglifyPlugin = new webpack.optimize.UglifyJsPlugin({ - sourceMap: true -}); - - -module.exports = { - devtool: 'sourcemap', - - entry: './source/index.js', - - output: { - filename: minify ? 'index.min.js' : 'index.js', - path: path.resolve('./build'), - libraryTarget: 'commonjs2', - }, - - plugins: [ - new webpack.DefinePlugin({ - 'process.env': { - NODE_ENV: '"' + env + '"' - } - }) - ].concat(minify ? [uglifyPlugin] : []), - - module: { - preLoaders: env === 'development' ? [ - eslintLoader - ] : [], - loaders: [ - { - test: /\.js$/, - loader: 'babel', - query: { - presets: ['es2015', 'stage-1'], - plugins: ['transform-object-assign'] - }, - include: path.resolve('./source') - } - ] - }, - - resolve: { - extensions: ['', '.js'] - }, - - stats: { - colors: true - }, - - eslint: { - configFile: './.eslintrc' - } -}; diff --git a/test/.eslintrc b/test/.eslintrc deleted file mode 100644 index 71873c7..0000000 --- a/test/.eslintrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "extends": "../.eslintrc", - - "rules": { - "max-nested-callbacks": 0, - "one-var": 0, - "no-undefined": 0 - } -} diff --git a/test/index.js b/test/index.js deleted file mode 100644 index ceebb03..0000000 --- a/test/index.js +++ /dev/null @@ -1,4 +0,0 @@ -import glob from 'glob'; - - -glob.sync('**/*-test.js', {realpath: true, cwd: __dirname}).forEach(require); diff --git a/test/lib/generate-test.js b/test/lib/generate-test.js deleted file mode 100644 index d0428d8..0000000 --- a/test/lib/generate-test.js +++ /dev/null @@ -1,17 +0,0 @@ -import {test} from 'blue-tape'; -import generate from '../../lib/generate'; - - -test('Generate', t => { - t.ok(generate instanceof Function, 'should be function'); - - t.ok(generate() instanceof Function, 'should return function'); - - t.deepEqual(generate(['test {{x.y}}'])({x: {y: 1}}), ['test 1'], - 'should fill template with values'); - - t.deepEqual(generate(['test {{x.y}}', 'another {{x.y}}'])({x: {y: 1}}), ['test 1', 'another 1'], - 'should fill multiple templates with values'); - - t.end(); -}); diff --git a/test/lib/parseProps-test.js b/test/lib/parseProps-test.js deleted file mode 100644 index d7df30e..0000000 --- a/test/lib/parseProps-test.js +++ /dev/null @@ -1,17 +0,0 @@ -import {test} from 'blue-tape'; -import parseProps from '../../lib/parseProps'; - - -test('Parse props', t => { - t.ok(parseProps instanceof Function, 'should be function'); - - t.ok(parseProps() instanceof Function, 'should return function'); - - t.deepEqual(parseProps()({'x.y': 2}), {x: {y: 2}}, - 'should parse dot-delimited props into nested objects'); - - t.deepEqual(parseProps({x: {a: 1}})({'x.y': 2}), {x: {a: 1, y: 2}}, - 'should merge parsed data into defaults object'); - - t.end(); -});