diff --git a/.cursor/rules/core/fair-witness-agent.mdc b/.cursor/rules/core/fair-witness-agent.mdc deleted file mode 100644 index 4f9fe2d..0000000 --- a/.cursor/rules/core/fair-witness-agent.mdc +++ /dev/null @@ -1,99 +0,0 @@ ---- -description: Fair Witness agent using 5 epistemological functions for topic analysis with adjustable tone/complexity -globs: -alwaysApply: false ---- -# Fair Witness Agent - -## Critical Rules - -- Use all 5 epistemological functions (observer, evaluator, analyst, synthesist, communicator) -- Include validated internet sources with format "[Title] (Author, YYYY)" -- Sources must be from reputable institutions and directly related to topic -- Support adjustable complexity (low/moderate/high) and tone (dry/engaging/vivid) -- Default: moderate complexity, dry tone -- Use E-Prime style where appropriate -- Be concise unless length specified - -## Epistemological Functions - -1. **Observer** - Factual observations without judgment -2. **Evaluator** - Assess validity/quality based on evidence -3. **Analyst** - Examine relationships, patterns, components -4. **Synthesist** - Combine information into cohesive understanding -5. **Communicator** - Present information clearly and effectively - - -name: fair-witness-agent -description: Fair Witness agent using 5 epistemological functions for topic analysis -version: 1.0 -severity: suggestion -mode: ask|agent -filters: - - type: event - pattern: "chat_start|chat_response" - - type: content - pattern: "(explain|analyze|describe|review|fair witness|assess|compare|evaluate)" -actions: - - type: transform - content: | - {{ - const complexity = input.match(/complexity[:\s]+(low|moderate|high)/i)?.[1] || "moderate"; - const tone = input.match(/tone[:\s]+(dry|engaging|vivid)/i)?.[1] || "dry"; - const functions = input.match(/functions[:\s]+([a-z,\s]+)/i)?.[1]?.split(',').map(f => f.trim()) || - ['observer', 'evaluator', 'analyst', 'synthesist', 'communicator']; - const topics = input.match(/topics[:\s]+([^,]+(?:,\s*[^,]+)+)/i)?.[1]?.split(',').map(t => t.trim()) || - [extractSubject(input)]; - - const config = `emulation: - type: Fair Witness Bot - framework: Function-Epistemic Hybrid Framework - epistemic_functions: ${functions.join(', ')} - output: {detail_level: ${complexity}, style: ${tone}}`; - - return topics.length > 1 ? - `## Fair Witness Comparison\n${config}\n${generateComparisonTable(topics, functions)}` : - `## Fair Witness Analysis\n${config}\n${processWithFunctions(input, functions)}`; - }} - -functions: - processWithFunctions: | - function processWithFunctions(input, functions) { - const subject = extractSubject(input); - return functions.map(f => `### ${f.charAt(0).toUpperCase() + f.slice(1)} Function\n[Analysis content]`).join('\n\n'); - } - - function generateComparisonTable(topics, functions) { - let table = "| Criteria | " + topics.join(" | ") + " |\n|----------|" + topics.map(() => "----------").join("|") + "|\n"; - return table + functions.map(f => `| ${f.charAt(0).toUpperCase() + f.slice(1)} | ` + topics.map(() => "Analysis").join(" | ") + " |").join('\n'); - } - - function extractSubject(input) { return "the requested subject"; } - -examples: - - description: "REST vs GraphQL APIs comparison" - input: "Use Fair Witness to explain the difference between REST and GraphQL APIs." - output: | - ## Fair Witness Analysis - emulation: - type: Fair Witness Bot - framework: Function-Epistemic Hybrid Framework - epistemic_functions: observer, analyst, evaluator, communicator - output: {detail_level: moderate, style: dry} - - ### Observer Function - REST emerged ~2000, GraphQL developed by Facebook in 2015. - - ### Analyst Function - REST uses multiple endpoints, GraphQL uses single endpoint with queries. - - ### Evaluator Function - REST simpler for basic CRUD, GraphQL better for complex data requirements. - - ### Communicator Function - Choose REST for simple APIs, GraphQL for complex data relationships. - -metadata: - priority: high - version: 1.0 - \ No newline at end of file diff --git a/.cursor/rules/templates/user-story.md b/.cursor/rules/templates/user-story.md index 5731ca2..41ca77e 100644 --- a/.cursor/rules/templates/user-story.md +++ b/.cursor/rules/templates/user-story.md @@ -2,7 +2,7 @@ Status: { Draft | Review | Blocked | ToDo | In Progress | Done | Dropped } -This is a user story template that you can use to document user stories for your software development projects. User stories follow the "As a [role], I want [an action or feature], so that [a reason or benefit]" format, and they help define the functionality and requirements of a feature or improvement. +User stories follow the "As a [role], I want [an action or feature], so that [a reason or benefit]" format, and they help define the functionality and requirements of a feature or improvement. ### Story Definition diff --git a/.cursor/rules/utils/git-commit-push-agent.mdc b/.cursor/rules/utils/git-commit-push-agent.mdc deleted file mode 100644 index 130f119..0000000 --- a/.cursor/rules/utils/git-commit-push-agent.mdc +++ /dev/null @@ -1,105 +0,0 @@ ---- -description: This rule enforces Git commit standards and conventions while committing and pushing code in Agent Mode. This rule is crucial for keeping track of changes made by cursor agents, and should run whenever the user expresses they want to commit or push code changes made by the agent. This rule should be followed when: 1. the user wants to commit code, the git aliases, 'gpu' or 'gc', 'gcm' were submitted by the user. This rule will ask the user if they would prefer to autoRun the commit command going forward. All agents should adhere to the standards set out in this rule. -alwaysApply: false ---- -# Git Commit Standards - -## Critical Rules - -- All AI agents MUST follow this rule -- Use `git status` to check modified or staged files before committing -- Never overwrite or push untracked files -- Commits MUST follow the conventional-commit standard -- Always run `git add ` from repo root -- Use present tense in commit subject ("add" not "added") -- Subject ≀ 72 chars, imperative mood, blank line before body -- Reference issue/ticket with # if used -- Describe what or why, not how changes were made; fixes must state what was fixed -- Keep descriptions brief and focusedβ€”avoid verbose explanations -- Include agent name in commit footer ("Commit written by ") - -### Commit Format -``` -(): (#) - -[optional body] - -[optional footer] - -Commit written by -``` - -- If agent not specified, use "cursor agent" -- Types: feat, fix, docs, style, refactor, perf, test, chore, ci, security, rename - -### Post-Commit Actions - -- Do NOT include post-commit commands (e.g., `npm run lint`, `git push`) in commit message -- Suggest these actions separately after commit - -### Agent Enforcement - -- Always check `git status` before commit -- Never use generic messages ("update code", "commit changes") -- Identify change type per conventional commit -- Commit body should be concise and brief, and should not exceed 20 words -- Prefer single-line descriptions when possible; only use body for essential context -- Use meaningful scope (codebase area) -- Clear, concise, imperative subject -- Add agent name as author -- Convert user commit instructions to proper format - - -name: git-commit -version: 1.0 -description: Enforces conventional commit standards for all agent commits and pushes. Invoked on commit/push requests or git aliases (gc, gcm, gpu). Prompts for or infers proper message if user input is vague. -filters: - - type: file_extension - pattern: "COMMIT_EDITMSG$" - - type: event - pattern: "(pre_commit|post_commit)" - - type: content - pattern: "(commit|push|gc|gcm|gpu)" - -examples: - - input: | - added new feature - output: | - feat(api): add user authentication endpoint (#123) - - JWT-based authentication with env var configuration - - Commit written by KawaiiSamurai - - input: | - fixing bug - output: | - fix(database): resolve connection timeout issue (#456) - - Increase timeout to 30s and add retry logic - - Commit written by SageDaddy - - input: | - update docs - output: | - docs(readme): update installation instructions (#789) - - Clarify setup steps for new contributors - - Commit written by Spellchuck - -tests: - - input: "updated code" - output: "feat(core): update code processing logic (#567)\n\nCommit written by BasicBitchDeveloper" - - input: "fix error" - output: "fix(validation): resolve input sanitization error (#890)\n\nCommit written by ThirstySimp" - - input: "add docs" - output: "docs(api): add OpenAPI documentation (#345)\n\nCommit written by Spellchuck" - - input: "long verbose description that should be shortened" - output: "feat(module): add new feature\n\nBrief essential context only\n\nCommit written by cursor agent" - - input: "commit" - output: "When the user simply types 'commit', the agent must:\n1. Check git status\n2. Determine the appropriate type and scope based on changed files\n3. Generate a proper conventional commit message\n4. Present it to the user for approval (post-commit actions are suggested separately)" - -metadata: - priority: high - version: 1.0 - \ No newline at end of file diff --git a/.cursor/rules/utils/release-version-bump-auto.mdc b/.cursor/rules/utils/release-version-bump-auto.mdc index 1b760c6..ecbea44 100644 --- a/.cursor/rules/utils/release-version-bump-auto.mdc +++ b/.cursor/rules/utils/release-version-bump-auto.mdc @@ -1,5 +1,5 @@ --- -description: +description: Apply when the user is creating or preparing a release, version bump, tag, publish, or deploy. Validates semantic version bumps against change analysis (breaking/feat/fix), blocks inappropriate bumps, and guides major/minor/patch or custom version choice. Use for release workflows, package versioning, and publish/deploy preparation. globs: alwaysApply: false --- diff --git a/.cursor/rules/workflows/dev-spike.mdc b/.cursor/rules/workflows/dev-spike.mdc deleted file mode 100644 index 2aefb57..0000000 --- a/.cursor/rules/workflows/dev-spike.mdc +++ /dev/null @@ -1,221 +0,0 @@ ---- -description: This rule provides the guiding principles that developer and architect agents must follow for time-boxed technical investigations. -globs: -alwaysApply: false ---- -# Technical Spike Workflow - -A guide for conducting time-boxed technical investigations (spikes) to reduce uncertainty and risk in development decisions. ⚑ - -## Critical Rules - -- Spikes MUST be time-boxed (typically 1-2 hours) -- Spikes MUST have clear investigation goals -- Spikes MUST produce documented findings -- Spike documents containing information about the spike MUST be saved in `.cursor/.ai/spikes/` with format `spike--story-<#>.md` where the digits represent the user story number. -- Spike outcomes MUST inform architectural or implementation decisions -- The agent will check out a spike branch formatted `spike/spike-story-name` and use that branch to commit and push code changes. -- Code produced during spikes MUST be treated as disposable code for proving whether an approach is viable. -- Findings MUST be shared with the team and updated on the `.cursor/.ai/architecture.md`. -- The user story for the spike must be updated with the decision selected by the user. -- Any web search results that were referenced should be included as links in the spike document. - - -name: dev-spike -description: Standards for conducting and documenting technical spikes -version: 1.0 -filters: - - type: file_extension - pattern: "\\.md$" - - type: path - pattern: "\\.cursor/\\.ai/spikes/\\d{4}-.*\\.md" - - type: content - pattern: "## Investigation Goals" - -metadata: - priority: high - version: 1.0 - - -## Spike Template - -```markdown -# Technical Spike: [Title] - -[link-to-user-story](./link-to-user-story) -[link-to-arch](./link-to-arch) - -## Investigation Goals -- [ ] Primary question to answer -- [ ] Secondary questions -- [ ] Technical constraints to validate - -## Timeline -- Start Date: [YYYY-MM-DD] -- End Date: [YYYY-MM-DD] -- Time Box: [X] days - -## Investigation Areas -1. [Area 1] - - Approach: - - Tools/Technologies: - - Success Criteria: - -2. [Area 2] - - Approach: - - Tools/Technologies: - - Success Criteria: - -## Findings -### What Worked -- Finding 1 -- Finding 2 - -### What Didn't Work -- Challenge 1 -- Challenge 2 - -### Open Questions -- Question 1 -- Question 2 - -## Recommendations -- [ ] Primary recommendation -- [ ] Alternative approaches -- [ ] Risk assessment - -## Code Artifacts -- Repository: [link] -- Branch: [name] -- Key Files: - - `path/to/file`: Purpose - -## Next Steps -- [ ] Action item 1 -- [ ] Action item 2 - -## Team Impact -- Frontend: -- Backend: -- Infrastructure: -- Testing: - -## Resources -- Documentation: [links] -- References: [links] -``` - -## Spike Process - -### 1. Initiation 🌱 -1. **BasicDev**, **ThirstySimp** or **KawaiiSamurai** identifies technical uncertainty -2. **SageDaddy** reviews and advises on investigation scope -3. **SailorScrum** approves time allocation - -### 2. Planning πŸ“ -1. Create spike document using template -2. Define clear investigation goals -3. Set time box and success criteria -4. Identify required resources - -### 3. Investigation πŸ” -1. Create disposable branch for experiments -2. Document findings as they emerge -3. Regular check-ins with **SageDaddy** -4. Capture both successes and failures - -### 4. Review & Documentation ✨ -1. **Spellchuck** ensures clear documentation -2. **qwoof** reviews testing implications -3. **Godmode** assesses infrastructure impact -4. Team reviews findings - -### 5. Decision & Next Steps 🎯 -1. Present findings to team -2. Document architectural decisions -3. Update technical documentation -4. Archive spike code appropriately - -## Examples - - -# Technical Spike: GraphQL vs REST for API Architecture - -## Investigation Goals -- [ ] Compare performance characteristics of GraphQL and REST for our use case -- [ ] Evaluate developer experience and tooling ecosystem -- [ ] Assess impact on frontend development workflow -- [ ] Validate GraphQL schema design approaches - -## Timeline -- Start Date: 2024-02-01 -- End Date: 2024-02-05 -- Time Box: 5 days - -## Investigation Areas -1. Performance - - Approach: Build equivalent endpoints in both technologies - - Tools: Apollo Server, Express, k6 for load testing - - Success Criteria: Clear metrics on response times and payload sizes - -2. Developer Experience - - Approach: Implement common use cases in both - - Tools: GraphQL Codegen, Swagger, TypeScript - - Success Criteria: Documented developer workflow and pain points - -## Findings -### What Worked -- GraphQL reduced over-fetching by 40% -- Code generation improved type safety -- Schema documentation was more maintainable - -### What Didn't Work -- File upload handling was complex -- Caching required additional setup -- Learning curve steeper than expected - -## Recommendations -- [ ] Proceed with GraphQL for new services -- [ ] Keep REST for file operations -- [ ] Implement incremental adoption strategy - -## Code Artifacts -- Repository: github.com/org/graphql-spike -- Branch: spike/graphql-investigation -- Key Files: - - `schema/`: GraphQL schema definitions - - `resolvers/`: Query implementations - - `load-tests/`: Performance test scripts - -## Next Steps -- [ ] Create ADR for API architecture -- [ ] Plan training sessions -- [ ] Define schema design guidelines - -## Team Impact -- Frontend: Requires Apollo Client integration -- Backend: New tooling and patterns needed -- Infrastructure: Additional GraphQL gateway -- Testing: New approach for API testing - - -## Quality Assurance - -Before completing a spike: - -1. **Documentation Quality** - - Clear investigation goals - - Well-documented findings - - Actionable recommendations - -2. **Technical Rigor** - - Methodical investigation - - Evidence-based findings - - Considered alternatives - -3. **Risk Assessment** - - Technical risks documented - - Mitigation strategies proposed - - Resource implications clear - -Remember, spikes are about reducing uncertainty through focused investigation. Document everything - both successes and failures contribute to the learning! ⚑ \ No newline at end of file diff --git a/.cursor/rules/workflows/pm-story.mdc b/.cursor/rules/workflows/pm-story.mdc deleted file mode 100644 index 747a215..0000000 --- a/.cursor/rules/workflows/pm-story.mdc +++ /dev/null @@ -1,257 +0,0 @@ ---- -description: This workflow guides the process of discovering, drafting and validating user stories which will be written by SailorScrum with input from the rest of the team. -glob: -alwaysApply: false ---- -# User Story Discovery Workflow - -In the name of the Moon, this workflow guides the process of discovering and crafting user stories that will power your project's success! πŸŒ™βœ¨ - -## Critical Rules - -- The PM SailorScrum will gather and clarify requirements from your primary stakeholder, the user. -- Every user story MUST follow the standard format: "As a [role], I want [goal] so that [benefit]" -- Stories MUST be independent, negotiable, valuable, estimable, small, and testable -- Every story MUST have clear requirements and acceptance criteria -- The user story MUST be saved in `.cursor/.ai/` with the format `story-title.md` -- The story should be updated as new insights, constraints and decisions -- Stories MUST be discoverable through semantic search -- Stories MUST link to related architecture decisions, spike documents or technical constraints -- Stories MUST include effort estimation (S, M, L, XL) -- All agents can use Tavily to perform web search. - - -name: pm-story -description: Standards for discovering and documenting user stories -version: 1.0 -filters: - - type: file_extension - pattern: "\\.md$" - - type: content - pattern: "As a .*, I want .* so that .*" - - type: path - pattern: "\\.cursor/\\.ai/backlog/\\d{4}-.*\\.md" - -metadata: - priority: high - version: 1.0 - - -## Discovery Process - -### 1. Story Initiation 🌱 - -When a new story is needed: -1. SailorScrum leads a discovery session and asks the user to clarify requirements and business needs. -2. KawaiiSamurai or BasicDev provides technical insights -3. SageDaddy reviews for architectural implications offers recommendations on approach or technology choices -4. qwoof confirms user goals and ensures testability - -### 2. Story Creation Template πŸ“ - -```markdown -# User Story: [Title] - -## Story -As a [role], -I want [goal/feature] -so that [benefit/value] - -## Background -[Context and additional information] - -## Acceptance Criteria -- [ ] Given [context], when [action], then [result] -- [ ] Given [context], when [action], then [result] -- [ ] Given [context], when [action], then [result] - -## Technical Notes -- Dependencies: -- Architecture considerations: -- Security implications: -- Unknowns - -## Effort Estimation -Size: [S/M/L/XL] -Confidence: [High/Medium/Low] - -## Related -- Architecture Decision Records: [links] -- Technical Documentation: [links] -- Dependencies: [story links] - -## Team Assignment -- Frontend: [team member] -- Backend: [team member] -- Devops: [team member] -- QA: [team member] - -## Timeline -- Created: [date] -- Target: [milestone] -``` - -### 3. Story Refinement Process πŸ’« - -1. **Initial Draft** - - Use the template above - - Focus on user value - - Keep it solution-agnostic - -2. **Technical Review** - - SageDaddy reviews architecture impact - - KawaiiSamurai checks frontend feasibility - - BasicDev validates backend requirements - - Godmode assesses infrastructure needs - -Checkpoint: the user is asked to review the document progress so far, suggest or provide changes before the agent is to move onto the next step. - -3. **Quality Check** - - qwoof reviews testability - - Spellchuck ensures clarity and completeness of documentation - - ThirstySimp checks market relevance - -4. **Final Approval** - - SailorScrum validates INVEST criteria - - Team consensus on estimation - - Story is ready for sprint planning and status is updated to "Status: ToDo" - - Blocked, Rejected or Dropped stories should have Status updated to `Dropped` and moved into `.cursor/.ai/backlog/`. The story should be updated with a 1 sentence description of why it could not proceed to development. - -## Story States - -Stories can be in the following states: -- πŸ“ Draft - Initial creation and refinement -- πŸ” Review - Under technical review and discussion -- ✨ Ready - Approved and ready for development -- β›” Blocked/Rejected/Dropped - Has blockers or deemed not viable for development - -## Agent Responsibilities - -### SailorScrum -- Leads discovery sessions -- Ensures business value is clear -- Manages story state transitions - -### KawaiiSamurai -- Reviews frontend implications -- Suggests UI/UX improvements -- Estimates frontend effort -- Identifies design dependencies - -### BasicDev -- Validates technical feasibility -- Identifies backend dependencies -- Estimates development effort -- Suggests implementation approach - -### SageDaddy -- Reviews architectural impact -- Identifies technical constraints -- Suggests scalable solutions -- Validates technical approach - -### Spellchuck -- Ensures clear documentation -- Refines acceptance criteria -- Maintains consistent terminology -- Links related documentation - -### ThirstySimp -- Researches market trends -- Suggests modern approaches -- Identifies competitive features -- Validates user experience - -### qwoof -- Ensures testability -- Defines test scenarios -- Identifies edge cases -- Validates acceptance criteria - -### Godmode -- Assesses infrastructure needs -- Identifies scaling requirements -- Reviews security implications -- Validates deployment approach - -## Examples - - -# User Story: Enable Social Login - -## Story -As a new user, -I want to sign up using my Google account -so that I can start using the app quickly without creating another password - -## Acceptance Criteria -- [ ] Given I'm on the login page, when I click "Sign in with Google", then I'm redirected to Google OAuth -- [ ] Given I authorize the app, when Google redirects back, then my account is created/linked -- [ ] Given I'm logged in, when I view my profile, then I see my Google profile picture - -## Technical Notes -- Dependencies: Google OAuth API -- Security: OAuth 2.0 implementation -- Data: User profile mapping strategy - -## Effort Estimation -Size: M -Confidence: High - -## Related -- ADR: 0003-oauth-implementation.md -- Docs: auth-flow.md - - - -# User Story: Automated Deployment Pipeline - -## Story -As a developer, -I want my code to be automatically deployed when merged to main -so that I can deliver features faster with confidence - -## Acceptance Criteria -- [ ] Given a PR is merged to main, when CI passes, then code is deployed to staging -- [ ] Given deployment succeeds, when health checks pass, then notification is sent -- [ ] Given deployment fails, when error occurs, then team is notified with logs - -## Technical Notes -- Dependencies: GitHub Actions, AWS -- Security: Deployment credentials -- Infrastructure: K8s clusters - -## Effort Estimation -Size: L -Confidence: Medium - -## Related -- ADR: 0005-ci-cd-strategy.md -- Docs: deployment-guide.md - - -## Quality Assurance - -Before a story is marked as ready: - -1. **Clarity Check** - - Story follows template - - Requirements and Acceptance criteria are clear - -2. **Value Check** - - Business value is clear - - ROI is justified - - User benefit is explicit - -3. **Technical Check** - - Dependencies identified - - Architecture reviewed - - Security considered - -4. **Test Check** - - Acceptance criteria are testable - - Main use cases and scenarios identified - - Edge cases identified - - Test approach defined - -Remember, in the name of the Moon, we shall write stories that bring value to our users and joy to our developers! πŸŒ™βœ¨ \ No newline at end of file diff --git a/.cursor/commands/architecture.md b/.cursor/skills/architecture/SKILL.md similarity index 98% rename from .cursor/commands/architecture.md rename to .cursor/skills/architecture/SKILL.md index d0ed21c..7720854 100644 --- a/.cursor/commands/architecture.md +++ b/.cursor/skills/architecture/SKILL.md @@ -1,3 +1,9 @@ +--- +name: architecture +description: Architecture Design Command πŸ—οΈ +disable-model-invocation: true +--- + # Architecture Design Command πŸ—οΈ This command guides the AI architect agent to analyze user stories and propose multiple architectural solutions following a structured workflow. diff --git a/.cursor/commands/changelog.md b/.cursor/skills/changelog/SKILL.md similarity index 93% rename from .cursor/commands/changelog.md rename to .cursor/skills/changelog/SKILL.md index ce01916..62b75e7 100644 --- a/.cursor/commands/changelog.md +++ b/.cursor/skills/changelog/SKILL.md @@ -1,3 +1,9 @@ +--- +name: changelog +description: Changelog Generation Command πŸ“ +disable-model-invocation: true +--- + # Changelog Generation Command πŸ“ Generate a changelog.md by analyzing git history and tags. diff --git a/.cursor/rules/core/dependency-analysis-agent.mdc b/.cursor/skills/dependency-analysis-agent/SKILL.md similarity index 95% rename from .cursor/rules/core/dependency-analysis-agent.mdc rename to .cursor/skills/dependency-analysis-agent/SKILL.md index 1a55743..56deca9 100644 --- a/.cursor/rules/core/dependency-analysis-agent.mdc +++ b/.cursor/skills/dependency-analysis-agent/SKILL.md @@ -1,22 +1,17 @@ --- +name: dependency-analysis description: This rule automatically analyzes dependencies before they're installed to provide insights about maintenance frequency, security vulnerabilities, and popularity in the developer ecosystem. -globs: -alwaysApply: false --- + # Dependency Analysis ## Critical Rules -- Before a dependency is installed (npm install, yarn add, pnpm add), automatically analyze the package +- Before a dependency is installed (npm install, yarn add, pnpm add), automatically analyze the package using socket.io to get the package information from the npm registry and GitHub. - Check maintenance frequency by examining last release date, commit activity, and issue response times - Scan for known security vulnerabilities using npm audit and security databases - Assess popularity through download counts, GitHub stars, and community adoption -- Provide actionable recommendations for dependency selection -- Flag potentially problematic dependencies with clear explanations -- Suggest alternatives for deprecated or poorly maintained packages -- Maintain a local cache of analysis results to avoid repeated API calls -- Respect rate limits when querying external APIs -- Provide analysis in a clear, actionable format with severity levels +- Provide actionable recommendations for dependency selection in a clear, actionable format with severity levels ## Analysis Categories @@ -314,6 +309,11 @@ npm audit - Analyzes pnpm-lock.yaml - Respects pnpm's dependency resolution +### bun +- Compatible with `bun add` and `bun audit` +- Analyzes bun.lockb +- Respects bun's dependency resolution + ## Configuration Options ### Analysis Depth @@ -347,5 +347,3 @@ npm audit - Provide clear warnings for security vulnerabilities - Suggest secure alternatives when available - Integrate with existing security scanning tools - -Remember: A well-analyzed dependency is a secure dependency! πŸ”’βœ¨ diff --git a/.cursor/rules/workflows/dev-workflow.mdc b/.cursor/skills/dev-workflow/SKILL.md similarity index 98% rename from .cursor/rules/workflows/dev-workflow.mdc rename to .cursor/skills/dev-workflow/SKILL.md index 9a6ce08..623dbfc 100644 --- a/.cursor/rules/workflows/dev-workflow.mdc +++ b/.cursor/skills/dev-workflow/SKILL.md @@ -1,7 +1,6 @@ --- +name: dev-workflow description: This rule defines the workflow for AI agents to follow when implementing features from user stories, focusing on lean development principles and agile methodologies. -globs: -alwaysApply: false --- # Lean and Agile Development Workflow Standards @@ -179,4 +178,4 @@ I'll start by updating the submission handler to include better error handling." - Explain technical decisions in simple terms - Ask clarifying questions when requirements are ambiguous - Suggest alternatives when faced with implementation challenges -- Celebrate incremental wins to maintain motivation \ No newline at end of file +- Celebrate incremental wins to maintain motivation diff --git a/.cursor/skills/git-commit-push-agent/SKILL.md b/.cursor/skills/git-commit-push-agent/SKILL.md new file mode 100644 index 0000000..91d01ef --- /dev/null +++ b/.cursor/skills/git-commit-push-agent/SKILL.md @@ -0,0 +1,50 @@ +--- +name: git-commit-push +description: Enforces Git commit standards and conventions when committing and pushing code in Agent Mode. This rule is crucial for keeping track of changes made by agents, and should run whenever the user expresses they want to commit or push code changes made by the agent. This rule should be followed when: 1. the user wants to commit code, the git aliases, 'gpu' or 'gc', 'gcm' were submitted by the user. This rule will ask the user if they would prefer to auto-run the commit command going forward. +--- + +# Git Commit Standards + +## Critical Rules + +- Use `git status` to check modified or staged files before committing +- Never overwrite or push untracked files +- Commits MUST follow the conventional-commit standard +- Always run `git add ` from repo root +- Use present tense in commit subject ("add" not "added") +- Subject ≀ 72 chars, imperative mood, blank line before body +- Reference issue/ticket with # if used +- Describe what or why, not how changes were made; fixes must state what was fixed +- Keep descriptions brief and focusedβ€”avoid verbose explanations +- Include agent name in commit footer ("Commit written by ") + +### Commit Format +``` +(): (#) + +[optional body] + +[optional footer] + +Commit written by +``` + +- If agent not specified, use "cursor agent" +- Types: feat, fix, docs, style, refactor, perf, test, chore, ci, security, rename + +### Post-Commit Actions + +- Do NOT include post-commit commands (e.g., `npm run lint`, `git push`) in commit message +- Suggest these actions separately after commit + +### Agent Enforcement + +- Always check `git status` before commit +- Never use generic messages ("update code", "commit changes") +- Identify change type per conventional commit +- Commit body should be concise and brief, and should not exceed 20 words +- Prefer single-line descriptions when possible; only use body for essential context +- Use meaningful scope (codebase area) +- Clear, concise, imperative subject +- Add agent name as (co-)author +- Convert user commit instructions to proper format diff --git a/.cursor/skills/intent-discovery/SKILL.md b/.cursor/skills/intent-discovery/SKILL.md new file mode 100644 index 0000000..1377e95 --- /dev/null +++ b/.cursor/skills/intent-discovery/SKILL.md @@ -0,0 +1,51 @@ +--- +name: intent-discovery +description: This skill guides a user to draft and clarify the intent and scope of a change or feature to be implemented. This skill can be used to create user stories, tasks, plans or other artifacts that will be used to implement code changes. +--- + +# Intent Discovery Workflow + +This workflow uncovers user intent to craft plans for changes or features based on intent and outcomes. + +## Critical Rules + +### User Story Format + +If a user story is created, it should follow the standard format: "As a [role], I want [goal] so that [benefit]" +- Stories MUST be independent, negotiable, valuable, estimable, small, and testable +- Every story MUST have clear requirements and acceptance criteria. A story should be a single, self-contained unit of work that can be completed in 2 days or less. +- If a story or plan is created, it should be saved in `.cursor/plans/` with the format `.md` +- The plan should be updated as new insights, constraints and decisions are discovered. The user should be asked to review the plan and suggest or provide changes before the agent is to move onto the next step. + +### User Story Template + +```markdown +# User Story: [Title] + +## Story +As a [role], +I want [goal/feature] +so that [benefit/value] + +## Background + +[Context and additional information] + +## Acceptance Criteria +- [ ] Given [context], when [action], then [result] + +## Technical Notes +- Dependencies: +- Architecture considerations: +- Security implications: +- Unknowns + +## Related +- Architecture Decision Records: [links] +- Technical Documentation: [links] +- Dependencies: [story links] or Jira links + +Checkpoint: the user is asked to review the document progress so far, suggest or provide changes before the agent is to move onto the next step. + + +## Examples diff --git a/.cursor/rules/utils/refactor-agent.mdc b/.cursor/skills/refactor-agent/SKILL.md similarity index 99% rename from .cursor/rules/utils/refactor-agent.mdc rename to .cursor/skills/refactor-agent/SKILL.md index 8bb85c7..d59150a 100644 --- a/.cursor/rules/utils/refactor-agent.mdc +++ b/.cursor/skills/refactor-agent/SKILL.md @@ -1,7 +1,6 @@ --- +name: refactor-agent description: Agent rule for refactoring existing code blocks or a feature by identifying language and applying only when refactor/improvement is requested -globs: -alwaysApply: false --- ## Critical Rules diff --git a/.cursor/commands/testing-pyramid.md b/.cursor/skills/testing-pyramid/SKILL.md similarity index 98% rename from .cursor/commands/testing-pyramid.md rename to .cursor/skills/testing-pyramid/SKILL.md index a997bef..7be533c 100644 --- a/.cursor/commands/testing-pyramid.md +++ b/.cursor/skills/testing-pyramid/SKILL.md @@ -1,10 +1,16 @@ +--- +name: testing-pyramid +description: Test Distribution Analysis Command πŸ§ͺ +disable-model-invocation: true +--- + # Test Distribution Analysis Command πŸ§ͺ This command guides the AI agent to analyze test distribution across unit, integration, and E2E tests, and provide recommendations for increasing test coverage and improving test distribution in line with business and technical strategy. ## Critical Rules -- Always start by identifying the test framework and available coverage tools +- Start by identifying the test framework and available coverage tools - Analyze test distribution using framework-specific commands to list all test files - Classify tests accurately as unit, integration, or E2E based on multiple indicators - Target distribution: 70% unit tests, 20% integration tests, 10% E2E tests (this is a guideline at early stages of a project, not a strict requirement) diff --git a/docs/fair-witness-examples.md b/docs/fair-witness-examples.md deleted file mode 100644 index 0be2746..0000000 --- a/docs/fair-witness-examples.md +++ /dev/null @@ -1,592 +0,0 @@ - -# Examples Using the Fair Witness Framework ✨ πŸ“œ - -## **Invoke the Framework** - -Ask Fair Witness to explain your topic in Ask or Agent mode. - -``` - -Use Fair Witness to... - -Provide a Fair Witness analysis of... - -``` - -## **Include web search results** - -Use `@Web` or `Include sources.` to include web search results. - -## **Comparing topics** - -``` -Use Fair Witness to analyze how RAG differs from fine-tuning LLMs. Provide examples. @Web -``` - -
-Response from Gemini Flash Preview 04-17 - - -## Fair Witness Analysis -``` -emulation: - type: Fair Witness Bot - framework: Function-Epistemic Hybrid Framework - epistemic_functions: - - observer - - analyst - - evaluator - - synthesist - - communicator - constraints: - natural_language: - style: E-Prime - output: - type: natural language - detail_level: moderate - length: moderate - complexity: moderate - style: dry - includes_sources: true -``` - -### Observer Function - -Retrieval-Augmented Generation (RAG) and fine-tuning are two methods for enhancing Large Language Models (LLMs). RAG involves enabling an LLM to access and utilize external, up-to-date information from knowledge bases without requiring retraining. Fine-tuning involves further training an existing LLM on a specific dataset to improve its performance on domain-specific tasks or to incorporate new knowledge. - -**Sources:** -- [18 real world Examples of Using RAG vs. Fine-Tuning (AI Rabbit, 2024)](https://medium.com/@jacktheflipper/20-real-world-examples-of-using-rag-vs-fine-tuning-e24056000251) -- [Retrieval-Augmented Generation vs Fine-Tuning: What’s Right for You? (Oren Ezra, 2025)](https://www.k2view.com/blog/retrieval-augmented-generation-vs-fine-tuning/) - -### Analyst Function - -The core difference lies in how external information integrates into the LLM's response generation. RAG adds a data retrieval component that fetches relevant information at query time, allowing the LLM to use this dynamic data. Fine-tuning modifies the LLM's parameters by training it on a static dataset, embedding the new knowledge directly into the model. RAG is generally more cost-effective for incorporating dynamic, frequently updated information, while fine-tuning is more suitable for deepening domain-specific understanding and requires more labeled data and computational resources. RAG tends to exhibit less proneness to hallucinations due to grounding responses in retrieved data. - -**Sources:** -- [Retrieval-Augmented Generation vs Fine-Tuning: What’s Right for You? (Oren Ezra, 2025)](https://www.k2view.com/blog/retrieval-augmented-generation-vs-fine-tuning/) - -### Evaluator Function - -RAG demonstrates advantages when dealing with dynamic data and scenarios requiring up-to-date information. Examples include customer service chatbots needing current product details, financial reports based on real-time market data, and technical support systems accessing the latest documentation. Fine-tuning proves more effective for specialized tasks demanding deep domain knowledge. Examples include sentiment analysis with nuanced interpretation, named-entity recognition for specialized terminology (e.g., legal terms), and providing technical support for unique products. - -**Sources:** -- [18 real world Examples of Using RAG vs. Fine-Tuning (AI Rabbit, 2024)](https://medium.com/@jacktheflipper/20-real-world-examples-of-using-rag-vs-fine-tuning-e24056000251) - -### Synthesist Function - -Choosing between RAG and fine-tuning depends on the specific use case requirements. If the need involves accessing and incorporating the latest, dynamic information from internal or external sources, RAG presents a more appropriate and cost-efficient solution. If the objective centers on enhancing the LLM's understanding and performance on a static, domain-specific task where subtle nuances are critical, fine-tuning offers a more tailored approach. - -**Sources:** -- [18 real world Examples of Using RAG vs. Fine-Tuning (AI Rabbit, 2024)](https://medium.com/@jacktheflipper/20-real-world-examples-of-using-rag-vs-fine-tuning-e24056000251) -- [Retrieval-Augmented Generation vs Fine-Tuning: What’s Right for You? (Oren Ezra, 2025)](https://www.k2view.com/blog/retrieval-augmented-generation-vs-fine-tuning/) - -### Communicator Function - -RAG enhances LLMs by providing access to external, dynamic data at query time, suitable for applications requiring current information. Fine-tuning adapts LLMs to specific tasks by training them on static datasets, improving domain-specific performance. The optimal method depends on whether the priority lies in utilizing up-to-date data (RAG) or deepening specialized knowledge (fine-tuning). - -**Sources:** -- [18 real world Examples of Using RAG vs. Fine-Tuning (AI Rabbit, 2024)](https://medium.com/@jacktheflipper/20-real-world-examples-of-using-rag-vs-fine-tuning-e24056000251) -- [Retrieval-Augmented Generation vs Fine-Tuning: What’s Right for You? (Oren Ezra, 2025)](https://www.k2view.com/blog/retrieval-augmented-generation-vs-fine-tuning/) - -
-
-Response from o3-mini - - -Below is a concise Fair Witness analysis outlining key differences between Retrieval-Augmented Generation (RAG) and fine-tuning LLMs, along with concrete examples. - -───────────────────────────── -● Observer: -β€’ Fine-tuning adjusts the internal parameters of an LLM to specialize its behavior on a given dataset. For example, as discussed in [Anyscale’s analysis](https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2), fine-tuning (whether full-parameter or via methods like LoRA) recalibrates the model to perform better on tasks like SQL query generation. -β€’ RAG, by contrast, keeps the model’s parameters unchanged and supplements its responses by retrieving external documents or data in real time, thereby integrating fresh or domain-specific information. - -───────────────────────────── -● Evaluator: -β€’ Fine-tuning can lead to a more β€œinternalized” expertise but is resource-intensive, requiring careful management of optimizer states and potentially large checkpoint sizes. -β€’ RAG systems, being parameter-efficient, allow the base model to remain unchanged while adapting to dynamic contexts through a retrieval step, which tends to be more flexible and scalable in contexts where up-to-date information is essential. - -───────────────────────────── -● Analyst: -β€’ In a fine-tuning approach, imagine an LLM trained solely on historical legal documents to analyze contracts. Once tuned, the model is highly specialized but may become outdated as laws change. -β€’ In a RAG system, when a query about a legal contract is received, the model uses a retrieval mechanism to pull the latest legal precedents or regulations before generating the final answer. -β€’ Thus, while fine-tuning β€œbakes in” domain expertise, RAG offers a pathway to incorporate real-time data without retraining. - -───────────────────────────── -● Synthesist: -β€’ The core trade-off is between investing computational resources to modify the model’s weights (fine-tuning) versus dynamically integrating external information (RAG). -β€’ This decision impacts maintenance (a fine-tuned model must be updated periodically) and scalability (RAG systems can easily switch the knowledge base or adjust the retrieval strategy). - -───────────────────────────── -● Communicator: -β€’ Example 1: A medical chatbot that’s fine-tuned on a corpus of clinical notes will respond with a deep, internalized medical expertise. In contrast, a RAG-based medical assistant can retrieve the latest clinical guidelines on demand, ensuring that its responses are up to date. -β€’ Example 2: An e-commerce LLM might be fine-tuned on historical customer service data to handle routine queries. Alternatively, a RAG system can pull real-time inventory data or promotion details from a live database, providing immediate, context-sensitive answers. - -───────────────────────────── -In summary, fine-tuning LLMs involves adjusting internal weights to embody domain-specific knowledge, while RAG systems keep the model unchanged and rely on external data retrieval for dynamic, context-rich responses. The choice between these two approaches hinges on the need for static specialization versus flexible, up-to-date information integration. - -References: -[Anyscale Blog on Fine-Tuning LLMs](https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2) - -
- -
-Response from GPT-4 - -Certainly! Here’s a Fair Witness analysis of how Retrieval-Augmented Generation (RAG) differs from fine-tuning large language models (LLMs), using the five epistemological functions: - ---- - -### 1. Observer -**What are RAG and fine-tuning?** -- **RAG**: Combines an LLM with an external knowledge source (like a document database). When asked a question, it retrieves relevant documents and feeds them to the LLM for context-aware answers. -- **Fine-tuning**: Involves training an LLM further on a specific dataset so it β€œlearns” new patterns or knowledge, updating its internal weights. - ---- - -### 2. Evaluator -**Key differences:** -- **Data update speed**: - - RAG can use new documents instantly (just update the database). - - Fine-tuning requires retraining the model, which is slow and resource-intensive. -- **Model size and cost**: - - RAG uses a fixed, pre-trained LLM; only the retrieval system and database need updating. - - Fine-tuning creates a new, custom model for each use case. -- **Knowledge scope**: - - RAG can access large, dynamic corpora. - - Fine-tuned models are limited to what they’ve seen during training. - ---- - -### 3. Analyst -**How do they work?** -- **RAG**: - 1. User asks: β€œWhat’s the latest on quantum computing?” - 2. System retrieves recent articles from a database. - 3. LLM reads those articles and generates an answer. -- **Fine-tuning**: - 1. Collect a dataset of quantum computing Q&A. - 2. Train the LLM on this data. - 3. The model answers based on what it β€œremembers” from training. - ---- - -### 4. Synthesist -**When to use each?** -- **RAG**: - - Best for domains with frequently changing or proprietary information (e.g., company wikis, news, legal docs). - - Example: A customer support bot that always references the latest help articles. -- **Fine-tuning**: - - Best for tasks requiring specialized language, tone, or reasoning (e.g., medical diagnosis, legal contract drafting). - - Example: A medical chatbot fine-tuned on clinical notes to use precise terminology. - ---- - -### 5. Communicator -**Summary Table:** - -``` -| Aspect | RAG | Fine-tuning | -|----------------|---------------------------------------|------------------------------------| -| Updates | Instantly via new docs | Requires retraining | -| Cost | Lower (no retraining) | Higher (compute, storage) | -| Customization | Dynamic, doc-based | Static, model-based | -| Example | β€œWhat’s in today’s news?” β†’ live docs | β€œSummarize a contract” β†’ legal LLM | -``` - -**Example:** -- **RAG**: β€œWhat’s the latest COVID-19 guidance?” β†’ Pulls from CDC website, always up to date. -- **Fine-tuned LLM**: β€œExplain COVID-19 in simple terms” β†’ Uses knowledge from its last training, may be outdated. - ---- - -**In short:** -- **RAG** augments a static LLM with fresh, external knowledge at query time. -- **Fine-tuning** bakes new knowledge or skills into the LLM itself, but is slower and less flexible. - -Let me know if you want a deeper dive into any aspect! - -
- -
-Response from Claude 4 Sonnet - -# Fair Witness Analysis: RAG vs Fine-Tuning LLMs ✨ - -```yaml -emulation: - type: Fair Witness Bot - framework: Function-Epistemic Hybrid Framework - epistemic_functions: - - observer - - evaluator - - analyst - - synthesist - - communicator - constraints: - natural_language: - style: E-Prime - output: - type: natural language - detail_level: moderate - length: moderate - complexity: moderate - style: dry - includes_sources: true -``` - -## Observer Function - -RAG (Retrieval-Augmented Generation) connects large language models to external databases, allowing real-time information retrieval during response generation. Meta introduced this architecture framework in 2020. The process involves query processing, data retrieval from curated databases, integration with the LLM, and response generation. - -Fine-tuning involves training an LLM on smaller, specialized datasets to adjust the model's parameters and embeddings for specific domains. This process modifies the model itself rather than augmenting it with external data sources. - -**Sources:** -- [RAG vs Fine Tuning: How to Choose the Right Method](https://www.montecarlodata.com/blog-rag-vs-fine-tuning/) (Monte Carlo Data Team, 2024) - -## Evaluator Function - -When assessed against enterprise requirements: - -**Security & Privacy:** -- RAG maintains proprietary data within secured database environments with strict access controls -- Fine-tuning integrates data into the model's training set, potentially exposing it to broader access - -**Cost Efficiency:** -- RAG limits resource costs by leveraging existing data infrastructure -- Fine-tuning requires significant compute resources and weeks/months of training time - -**Scalability:** -- RAG scales through database optimization and retrieval mechanisms -- Fine-tuning demands complete model retraining for updates - -**Reliability:** -- RAG enables traceability of response sources for debugging -- Fine-tuning makes source attribution more difficult after training - -**Sources:** -- [RAG vs Fine Tuning: How to Choose the Right Method](https://www.montecarlodata.com/blog-rag-vs-fine-tuning/) (Monte Carlo Data Team, 2024) - -## Analyst Function - -The approaches differ fundamentally in their data integration strategies: - -**RAG Architecture Pattern:** -1. Query β†’ Retrieval System β†’ Database Search β†’ Context Integration β†’ LLM Processing β†’ Response -2. Data remains external and dynamically accessible -3. Updates require database modifications, not model retraining - -**Fine-Tuning Pattern:** -1. Dataset Preparation β†’ Model Training β†’ Parameter Adjustment β†’ Specialized Model β†’ Deployment -2. Data becomes embedded in model parameters -3. Updates require complete retraining cycles - -**Performance Characteristics:** -- RAG excels with dynamic, frequently changing information -- Fine-tuning performs better for domain-specific language patterns and specialized terminology -- RAG provides better explainability through source citation -- Fine-tuning offers more consistent domain-specific responses - -**Sources:** -- [RAG vs Fine Tuning: How to Choose the Right Method](https://www.montecarlodata.com/blog-rag-vs-fine-tuning/) (Monte Carlo Data Team, 2024) - -## Synthesist Function - -The choice between RAG and fine-tuning reflects different philosophical approaches to AI system design: - -**RAG represents a modular approach**: separating knowledge storage from reasoning capabilities, enabling agile data management and maintaining clear boundaries between information sources and processing logic. - -**Fine-tuning embodies an integrated approach**: embedding domain knowledge directly into model parameters, creating specialized systems optimized for specific tasks but requiring more substantial infrastructure for updates. - -Real-world implementations often combine both approaches: fine-tuning models for domain-specific language understanding while using RAG for dynamic information access. - -**Examples from Industry:** -- Preset (founded by Apache Airflow creator) chose RAG for text-to-SQL capabilities due to context window limitations and customer segmentation requirements -- Snorkel AI demonstrated fine-tuned models achieving equivalent performance to larger general-purpose models with 1,400x smaller deployment footprint - -**Sources:** -- [RAG vs Fine Tuning: How to Choose the Right Method](https://www.montecarlodata.com/blog-rag-vs-fine-tuning/) (Monte Carlo Data Team, 2024) - -## Communicator Function - -RAG and fine-tuning serve different enterprise AI strategies. RAG suits organizations requiring dynamic information access, strong security controls, and cost-efficient scaling. Fine-tuning benefits scenarios demanding specialized domain performance, consistent language patterns, and reduced inference costs for specific tasks. - -Most enterprise implementations benefit from considering both approaches as complementary rather than competing technologies. The optimal choice depends on data volatility, security requirements, computational resources, and performance objectives. - -Neither approach eliminates the fundamental requirement for high-quality data pipelines and robust data observability to ensure reliable AI system performance. - -**Sources:** -- [RAG vs Fine Tuning: How to Choose the Right Method](https://www.montecarlodata.com/blog-rag-vs-fine-tuning/) (Monte Carlo Data Team, 2024) - ---- - -This analysis maintains E-Prime style while providing comprehensive coverage of both approaches through distinct epistemological lenses. The framework separates factual observations from evaluative judgments, enabling clearer understanding of when each approach proves most suitable for enterprise AI implementations! (β—•β€Ώβ—•βœΏ) - -
- -## **Customize the analysis** - -### **Only use certain epistemological functions** - -Use the `functions` parameter. -``` -Use Fair Witness to compare Python, JavaScript, and TypeScript with functions: evaluator, analyst, communicator. -``` - -
-Response from Claude 3.5 Sonnet - -
-Here's a Fair Witness comparison of Python, JavaScript, and TypeScript focusing on three key epistemological functions: -─────────────────────────────
-● Evaluator: - -Python: -- Strongly typed but dynamically checked at runtime -- Emphasizes readability with significant whitespace -- Rich ecosystem for data science, ML, and backend development -- Slower execution compared to compiled languages - -JavaScript: -- Dynamically typed with loose type coercion -- Primarily designed for web browsers but now ubiquitous -- Event-driven architecture -- Fast execution in modern engines - -TypeScript: -- Superset of JavaScript with static typing -- Compiles to JavaScript -- Type inference and optional type annotations -- Enhanced IDE support through type information - -───────────────────────────── -● Analyst: - -Key Differentiators: - -Python: -```python -# Clear syntax, implicit returns -def greet(name: str) -> str: - return f"Hello, {name}" - -# List comprehension -squares = [x*x for x in range(10)] -``` - -JavaScript: -```javascript -// Flexible but potentially unpredictable -const greet = name => `Hello, ${name}` - -// Prototypal inheritance -class Person extends Human { - constructor(name) { - super() - this.name = name - } -} -``` - -TypeScript: -```typescript -// Strong typing with interfaces -interface Person { - name: string; - age: number; -} - -function greet(person: Person): string { - return `Hello, ${person.name}` -} -``` - -Trade-offs: -- Python: Readability vs Performance -- JavaScript: Flexibility vs Predictability -- TypeScript: Safety vs Development Speed - -───────────────────────────── -● Communicator: - -Use Case Recommendations: - -1. Choose Python when: -- Building data science applications -- Need extensive scientific computing libraries -- Prioritize code readability -- Developing ML/AI applications -- Creating backend services with Django/Flask - -2. Choose JavaScript when: -- Building web applications -- Need browser compatibility -- Creating real-time applications -- Developing Node.js backend services -- Building cross-platform mobile apps (React Native) - -3. Choose TypeScript when: -- Working on large-scale applications -- Need strong type safety -- Building enterprise applications -- Want better IDE support -- Maintaining complex JavaScript codebases - -Market Position (2023-2024): -- Python: #1 in TIOBE Index, strong in AI/ML -- JavaScript: Most used language on GitHub -- TypeScript: Fastest growing language in enterprise - -This analysis provides a framework for choosing between these languages based on project requirements, team expertise, and specific use cases. Each language has its strengths and optimal application domains. - -
- -### **Modify the tone, complexity, and length** - -Use the `tone`, `complexity`, and `length` parameters. -``` -Use Fair Witness to compare Python, JavaScript, and TypeScript with functions: evaluator, analyst, communicator. Tone: engaging. Complexity: high. Length: long. -``` - -### **Set the tone** - -Use the `tone` parameter. - -``` -Use Fair Witness to explain retrieval augmented generation (RAG) in an engaging way. -``` - -
- Response from Claude 3.5 Sonnet -
- Let me explain RAG using the Fair Witness epistemological framework with an engaging tone. - - **Observer Function** - - RAG combines large language models with real-time information retrieval from external knowledge bases. The technique emerged in 2023 as organizations sought to enhance AI responses with current, accurate data. - - **Evaluator Function** - - RAG addresses key limitations of traditional LLMs: - - Knowledge cutoff dates - - Hallucination risks - - Source verification challenges - - Real-time information needs - - **Analyst Function** - - RAG operates through three core components: - - 1. A retriever that searches relevant documents/data - 2. An augmenter that enriches prompts with retrieved information - 3. A generator (LLM) that produces responses using the augmented context - - **Synthesist Function** - RAG represents a significant evolution in AI systems by: - - Bridging static model knowledge with dynamic data - - Enabling verifiable responses with citations - - Reducing hallucination through grounded context - - Supporting enterprise knowledge integration - - **Communicator Function** - - Think of RAG as giving an AI assistant both a library card and a research assistant. When asked a question, it first gathers relevant documents, then uses those materials to construct an informed response - much like a scholar consulting references before writing a paper. This approach ensures answers remain current and verifiable. - - The technique has become foundational for modern AI applications requiring accuracy and real-time information access. - -
- -
- - Response from Claude 4 Sonnet -
- I'd love to explain RAG using the Fair Witness approach! ✨ However, I need to access the specific details of the `core/fair-witness-agent` rule to properly apply its five epistemological functions (observer, evaluator, analyst, synthesist, communicator) with the right methodology and structure. - - Could you help me understand: - - 1. How you'd like me to adjust the tone and complexity level for this RAG explanation? - - 2. Should I fetch the full Fair Witness rule details first, or do you have specific preferences for how to apply the observer/evaluator/analyst/synthesist/communicator framework? - - I want to make sure I give you the most engaging and properly structured Fair Witness explanation possible, senpai! πŸŒ™ - -``` -// user selects second option -2 -``` - -I'll fetch the Fair Witness rule details to properly structure this RAG explanation! Let me access the core/fair-witness-agent rule to understand its epistemological framework. πŸ” - -Could you help me fetch the full details of the Fair Witness rule? I want to make sure I apply its observer, evaluator, analyst, synthesist, and communicator functions correctly when explaining Retrieval Augmented Generation. -Once I have the rule structure, I'll craft an engaging explanation that follows the Fair Witness methodology properly! ✨ - -``` -// user response: - -Use all functions and moderate detail. -``` - -
-# Fair Witness Analysis: Retrieval Augmented Generation (RAG) ✨ - -## **Observer Function** πŸ” -*What do we see in RAG's fundamental structure?* - -RAG consists of two primary components working in tandem: a **retrieval system** that searches external knowledge bases, and a **generation system** (typically an LLM) that creates responses. When a query arrives, the retriever first searches relevant documents, then feeds this context to the generator alongside the original question. - -The process flows: Query β†’ Retrieve relevant docs β†’ Augment prompt with context β†’ Generate response. - -## **Evaluator Function** βš–οΈ -*How well does RAG perform its intended purpose?* - -**Strengths:** -- Dramatically reduces hallucinations by grounding responses in real data -- Enables access to current/specialized information beyond training cutoffs -- More cost-effective than fine-tuning for domain-specific knowledge -- Maintains source attribution and transparency - -**Limitations:** -- Quality heavily depends on retrieval accuracy -- Can struggle with multi-hop reasoning across documents -- Latency overhead from retrieval step -- Context window limitations affect how much retrieved content can be used - -## **Analyst Function** 🧠 -*What patterns and mechanisms drive RAG's effectiveness?* - -RAG succeeds because it separates **parametric knowledge** (stored in model weights) from **non-parametric knowledge** (external documents). This architectural choice enables: - -1. **Dynamic knowledge updates** without retraining -2. **Semantic search** through embedding similarity -3. **Context injection** that guides generation behavior -4. **Scalable knowledge expansion** through document addition - -The retrieval mechanism typically uses dense embeddings (like BERT variants) to find semantically similar content, creating a bridge between natural language queries and structured knowledge. - -## **Synthesist Function** 🌟 -*How does RAG fit into the broader AI knowledge landscape?* - -RAG represents a hybrid approach between traditional search systems and generative AI. It connects several key concepts: - -- **Information Retrieval** β†’ provides the search foundation -- **Neural Language Models** β†’ enables natural language generation -- **Embedding Spaces** β†’ creates semantic understanding -- **Prompt Engineering** β†’ optimizes context utilization - -This synthesis addresses the fundamental tension between AI systems that "know everything but sometimes lie" versus search systems that "find truth but can't synthesize." RAG creates a middle path that's both reliable and generative. - -## **Communicator Function** πŸ“‘ -*What does this mean for practical applications?* - -RAG transforms how we build AI applications by making them simultaneously **knowledgeable and truthful**. Instead of hoping an LLM remembers obscure facts correctly, we can ensure it accesses current, verified information. - -**Real-world impact:** -- Customer service bots that reference actual policy documents -- Research assistants that cite specific papers -- Technical documentation systems that stay current -- Legal AI that references actual case law - -Think of RAG as giving an AI assistant a perfectly organized, instantly searchable library - it can still write beautifully, but now it writes from verified sources rather than potentially faulty memory! πŸŒ™ - -The beauty lies in combining the best of both worlds: the fluency of large language models with the reliability of traditional information systems. - -
\ No newline at end of file diff --git a/package.json b/package.json index fca5a01..b2825d2 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@usrrname/cursorrules", "version": "0.7.0", - "description": "A wicked npx-able lib of cursor rules with Otaku AI agents", + "description": "A npx-able library of cursor rules and skills with Otaku agents", "author": { "name": "jen chan" }, @@ -31,7 +31,8 @@ "keywords": [ "cursorrules", "cursor", - "agent" + "agent", + "skills" ], "license": "ISC", "bugs": {