commit fe38508201c2893bddad451ade98f6e7e3216d09 Author: allagroup <> Date: Fri Apr 17 07:21:17 2026 +0000 Initial commit diff --git a/.agents/skills/drizzle/SKILL.md b/.agents/skills/drizzle/SKILL.md new file mode 100644 index 0000000..2eaa8eb --- /dev/null +++ b/.agents/skills/drizzle/SKILL.md @@ -0,0 +1,205 @@ +--- +name: drizzle +description: Drizzle ORM schema and database guide. Use when working with database schemas (src/database/schemas/*), defining tables, creating migrations, or database model code. Triggers on Drizzle schema definition, database migrations, or ORM usage questions. +--- + +# Drizzle ORM Schema Style Guide + +## Configuration + +- Config: `drizzle.config.ts` +- Schemas: `src/database/schemas/` +- Migrations: `src/database/migrations/` +- Dialect: `postgresql` with `strict: true` + +## Helper Functions + +Location: `src/database/schemas/_helpers.ts` + +- `timestamptz(name)`: Timestamp with timezone +- `createdAt()`, `updatedAt()`, `accessedAt()`: Standard timestamp columns +- `timestamps`: Object with all three for easy spread + +## Naming Conventions + +- **Tables**: Plural snake_case (`users`, `session_groups`) +- **Columns**: snake_case (`user_id`, `created_at`) + +## Column Definitions + +### Primary Keys + +```typescript +id: text('id') + .primaryKey() + .$defaultFn(() => idGenerator('agents')) + .notNull(), +``` + +ID prefixes make entity types distinguishable. For internal tables, use `uuid`. + +### Foreign Keys + +```typescript +userId: text('user_id') + .references(() => users.id, { onDelete: 'cascade' }) + .notNull(), +``` + +### Timestamps + +```typescript +...timestamps, // Spread from _helpers.ts +``` + +### Indexes + +```typescript +// Return array (object style deprecated) +(t) => [uniqueIndex('client_id_user_id_unique').on(t.clientId, t.userId)], +``` + +## Type Inference + +```typescript +export const insertAgentSchema = createInsertSchema(agents); +export type NewAgent = typeof agents.$inferInsert; +export type AgentItem = typeof agents.$inferSelect; +``` + +## Example Pattern + +```typescript +export const agents = pgTable( + 'agents', + { + id: text('id') + .primaryKey() + .$defaultFn(() => idGenerator('agents')) + .notNull(), + slug: varchar('slug', { length: 100 }) + .$defaultFn(() => randomSlug(4)) + .unique(), + userId: text('user_id') + .references(() => users.id, { onDelete: 'cascade' }) + .notNull(), + clientId: text('client_id'), + chatConfig: jsonb('chat_config').$type(), + ...timestamps, + }, + (t) => [uniqueIndex('client_id_user_id_unique').on(t.clientId, t.userId)], +); +``` + +## Common Patterns + +### Junction Tables (Many-to-Many) + +```typescript +export const agentsKnowledgeBases = pgTable( + 'agents_knowledge_bases', + { + agentId: text('agent_id') + .references(() => agents.id, { onDelete: 'cascade' }) + .notNull(), + knowledgeBaseId: text('knowledge_base_id') + .references(() => knowledgeBases.id, { onDelete: 'cascade' }) + .notNull(), + userId: text('user_id') + .references(() => users.id, { onDelete: 'cascade' }) + .notNull(), + enabled: boolean('enabled').default(true), + ...timestamps, + }, + (t) => [primaryKey({ columns: [t.agentId, t.knowledgeBaseId] })], +); +``` + +## Query Style + +**Always use `db.select()` builder API. Never use `db.query.*` relational API** (`findMany`, `findFirst`, `with:`). + +The relational API generates complex lateral joins with `json_build_array` that are fragile and hard to debug. + +### Select Single Row + +```typescript +// ✅ Good +const [result] = await this.db + .select() + .from(agents) + .where(eq(agents.id, id)) + .limit(1); +return result; + +// ❌ Bad: relational API +return this.db.query.agents.findFirst({ + where: eq(agents.id, id), +}); +``` + +### Select with JOIN + +```typescript +// ✅ Good: explicit select + leftJoin +const rows = await this.db + .select({ + runId: agentEvalRunTopics.runId, + score: agentEvalRunTopics.score, + testCase: agentEvalTestCases, + topic: topics, + }) + .from(agentEvalRunTopics) + .leftJoin(agentEvalTestCases, eq(agentEvalRunTopics.testCaseId, agentEvalTestCases.id)) + .leftJoin(topics, eq(agentEvalRunTopics.topicId, topics.id)) + .where(eq(agentEvalRunTopics.runId, runId)) + .orderBy(asc(agentEvalRunTopics.createdAt)); + +// ❌ Bad: relational API with `with:` +return this.db.query.agentEvalRunTopics.findMany({ + where: eq(agentEvalRunTopics.runId, runId), + with: { testCase: true, topic: true }, +}); +``` + +### Select with Aggregation + +```typescript +// ✅ Good: select + leftJoin + groupBy +const rows = await this.db + .select({ + id: agentEvalDatasets.id, + name: agentEvalDatasets.name, + testCaseCount: count(agentEvalTestCases.id).as('testCaseCount'), + }) + .from(agentEvalDatasets) + .leftJoin(agentEvalTestCases, eq(agentEvalDatasets.id, agentEvalTestCases.datasetId)) + .groupBy(agentEvalDatasets.id); +``` + +### One-to-Many (Separate Queries) + +When you need a parent record with its children, use two queries instead of relational `with:`: + +```typescript +// ✅ Good: two simple queries +const [dataset] = await this.db + .select() + .from(agentEvalDatasets) + .where(eq(agentEvalDatasets.id, id)) + .limit(1); + +if (!dataset) return undefined; + +const testCases = await this.db + .select() + .from(agentEvalTestCases) + .where(eq(agentEvalTestCases.datasetId, id)) + .orderBy(asc(agentEvalTestCases.sortOrder)); + +return { ...dataset, testCases }; +``` + +## Database Migrations + +See the `db-migrations` skill for the detailed migration guide. diff --git a/.agents/skills/elysiajs/SKILL.md b/.agents/skills/elysiajs/SKILL.md new file mode 100644 index 0000000..d707a64 --- /dev/null +++ b/.agents/skills/elysiajs/SKILL.md @@ -0,0 +1,475 @@ +--- +name: elysiajs +description: Create backend with ElysiaJS, a type-safe, high-performance framework. +--- + +# ElysiaJS Development Skill + +Always consult [elysiajs.com/llms.txt](https://elysiajs.com/llms.txt) for code examples and latest API. + +## Overview + +ElysiaJS is a TypeScript framework for building Bun-first (but not limited to Bun) type-safe, high-performance backend servers. This skill provides comprehensive guidance for developing with Elysia, including routing, validation, authentication, plugins, integrations, and deployment. + +## When to Use This Skill + +Trigger this skill when the user asks to: +- Create or modify ElysiaJS routes, handlers, or servers +- Setup validation with TypeBox or other schema libraries (Zod, Valibot) +- Implement authentication (JWT, session-based, macros, guards) +- Add plugins (CORS, OpenAPI, Static files, JWT) +- Integrate with external services (Drizzle ORM, Better Auth, Next.js, Eden Treaty) +- Setup WebSocket endpoints for real-time features +- Create unit tests for Elysia instances +- Deploy Elysia servers to production + +## Quick Start +Quick scaffold: +```bash +bun create elysia app +``` + +### Basic Server +```typescript +import { Elysia, t, status } from 'elysia' + +const app = new Elysia() + .get('/', () => 'Hello World') + .post('/user', ({ body }) => body, { + body: t.Object({ + name: t.String(), + age: t.Number() + }) + }) + .get('/id/:id', ({ params: { id } }) => { + if(id > 1_000_000) return status(404, 'Not Found') + + return id + }, { + params: t.Object({ + id: t.Number({ + minimum: 1 + }) + }), + response: { + 200: t.Number(), + 404: t.Literal('Not Found') + } + }) + .listen(3000) +``` + +## Basic Usage + +### HTTP Methods +```typescript +import { Elysia } from 'elysia' + +new Elysia() + .get('/', 'GET') + .post('/', 'POST') + .put('/', 'PUT') + .patch('/', 'PATCH') + .delete('/', 'DELETE') + .options('/', 'OPTIONS') + .head('/', 'HEAD') +``` + +### Path Parameters +```typescript +.get('/user/:id', ({ params: { id } }) => id) +.get('/post/:id/:slug', ({ params }) => params) +``` + +### Query Parameters +```typescript +.get('/search', ({ query }) => query.q) +// GET /search?q=elysia → "elysia" +``` + +### Request Body +```typescript +.post('/user', ({ body }) => body) +``` + +### Headers +```typescript +.get('/', ({ headers }) => headers.authorization) +``` + +## TypeBox Validation + +### Basic Types +```typescript +import { Elysia, t } from 'elysia' + +.post('/user', ({ body }) => body, { + body: t.Object({ + name: t.String(), + age: t.Number(), + email: t.String({ format: 'email' }), + website: t.Optional(t.String({ format: 'uri' })) + }) +}) +``` + +### Nested Objects +```typescript +body: t.Object({ + user: t.Object({ + name: t.String(), + address: t.Object({ + street: t.String(), + city: t.String() + }) + }) +}) +``` + +### Arrays +```typescript +body: t.Object({ + tags: t.Array(t.String()), + users: t.Array(t.Object({ + id: t.String(), + name: t.String() + })) +}) +``` + +### File Upload +```typescript +.post('/upload', ({ body }) => body.file, { + body: t.Object({ + file: t.File({ + type: 'image', // image/* mime types + maxSize: '5m' // 5 megabytes + }), + files: t.Files({ // Multiple files + type: ['image/png', 'image/jpeg'] + }) + }) +}) +``` + +### Response Validation +```typescript +.get('/user/:id', ({ params: { id } }) => ({ + id, + name: 'John', + email: 'john@example.com' +}), { + params: t.Object({ + id: t.Number() + }), + response: { + 200: t.Object({ + id: t.Number(), + name: t.String(), + email: t.String() + }), + 404: t.String() + } +}) +``` + +## Standard Schema (Zod, Valibot, ArkType) + +### Zod +```typescript +import { z } from 'zod' + +.post('/user', ({ body }) => body, { + body: z.object({ + name: z.string(), + age: z.number().min(0), + email: z.string().email() + }) +}) +``` + +## Error Handling + +```typescript +.get('/user/:id', ({ params: { id }, status }) => { + const user = findUser(id) + + if (!user) { + return status(404, 'User not found') + } + + return user +}) +``` + +## Guards (Apply to Multiple Routes) + +```typescript +.guard({ + params: t.Object({ + id: t.Number() + }) +}, app => app + .get('/user/:id', ({ params: { id } }) => id) + .delete('/user/:id', ({ params: { id } }) => id) +) +``` + +## Macro + +```typescript +.macro({ + hi: (word: string) => ({ + beforeHandle() { console.log(word) } + }) +}) +.get('/', () => 'hi', { hi: 'Elysia' }) +``` + +### Project Structure (Recommended) +Elysia takes an unopinionated approach but based on user request. But without any specific preference, we recommend a feature-based and domain driven folder structure where each feature has its own folder containing controllers, services, and models. + +``` +src/ +├── index.ts # Main server entry +├── modules/ +│ ├── auth/ +│ │ ├── index.ts # Auth routes (Elysia instance) +│ │ ├── service.ts # Business logic +│ │ └── model.ts # TypeBox schemas/DTOs +│ └── user/ +│ ├── index.ts +│ ├── service.ts +│ └── model.ts +└── plugins/ + └── custom.ts + +public/ # Static files (if using static plugin) +test/ # Unit tests +``` + +Each file has its own responsibility as follows: +- **Controller (index.ts)**: Handle HTTP routing, request validation, and cookie. +- **Service (service.ts)**: Handle business logic, decoupled from Elysia controller if possible. +- **Model (model.ts)**: Define the data structure and validation for the request and response. + +## Best Practice +Elysia is unopinionated on design pattern, but if not provided, we can relies on MVC pattern pair with feature based folder structure. + +- Controller: + - Prefers Elysia as a controller for HTTP dependant controller + - For non HTTP dependent, prefers service instead unless explicitly asked + - Use `onError` to handle local custom errors + - Register Model to Elysia instance via `Elysia.models({ ...models })` and prefix model by namespace `Elysia.prefix('model', 'Namespace.') + - Prefers Reference Model by name provided by Elysia instead of using an actual `Model.name` +- Service: + - Prefers class (or abstract class if possible) + - Prefers interface/type derive from `Model` + - Return `status` (`import { status } from 'elysia'`) for error + - Prefers `return Error` instead of `throw Error` +- Models: + - Always export validation model and type of validation model + - Custom Error should be in contains in Model + +## Elysia Key Concept +Elysia has a every important concepts/rules to understand before use. + +## Encapsulation - Isolates by Default + +Lifecycles (hooks, middleware) **don't leak** between instances unless scoped. + +**Scope levels:** +- `local` (default) - current instance + descendants +- `scoped` - parent + current + descendants +- `global` - all instances + +```ts +.onBeforeHandle(() => {}) // only local instance +.onBeforeHandle({ as: 'global' }, () => {}) // exports to all +``` + +## Method Chaining - Required for Types + +**Must chain**. Each method returns new type reference. + +❌ Don't: +```ts +const app = new Elysia() +app.state('build', 1) // loses type +app.get('/', ({ store }) => store.build) // build doesn't exists +``` + +✅ Do: +```ts +new Elysia() + .state('build', 1) + .get('/', ({ store }) => store.build) +``` + +## Explicit Dependencies + +Each instance independent. **Declare what you use.** + +```ts +const auth = new Elysia() + .decorate('Auth', Auth) + .model(Auth.models) + +new Elysia() + .get('/', ({ Auth }) => Auth.getProfile()) // Auth doesn't exists + +new Elysia() + .use(auth) // must declare + .get('/', ({ Auth }) => Auth.getProfile()) +``` + +**Global scope when:** +- No types added (cors, helmet) +- Global lifecycle (logging, tracing) + +**Explicit when:** +- Adds types (state, models) +- Business logic (auth, db) + +## Deduplication + +Plugins re-execute unless named: + +```ts +new Elysia() // rerun on `.use` +new Elysia({ name: 'ip' }) // runs once across all instances +``` + +## Order Matters + +Events apply to routes **registered after** them. + +```ts +.onBeforeHandle(() => console.log('1')) +.get('/', () => 'hi') // has hook +.onBeforeHandle(() => console.log('2')) // doesn't affect '/' +``` + +## Type Inference + +**Inline functions only** for accurate types. + +For controllers, destructure in inline wrapper: + +```ts +.post('/', ({ body }) => Controller.greet(body), { + body: t.Object({ name: t.String() }) +}) +``` + +Get type from schema: +```ts +type MyType = typeof MyType.static +``` + +## Reference Model +Model can be reference by name, especially great for documenting an API +```ts +new Elysia() + .model({ + book: t.Object({ + name: t.String() + }) + }) + .post('/', ({ body }) => body.name, { + body: 'book' + }) +``` + +Model can be renamed by using `.prefix` / `.suffix` +```ts +new Elysia() + .model({ + book: t.Object({ + name: t.String() + }) + }) + .prefix('model', 'Namespace') + .post('/', ({ body }) => body.name, { + body: 'Namespace.Book' + }) +``` + +Once `prefix`, model name will be capitalized by default. + +## Technical Terms +The following are technical terms that is use for Elysia: +- `OpenAPI Type Gen` - function name `fromTypes` from `@elysiajs/openapi` for generating OpenAPI from types, see `plugins/openapi.md` +- `Eden`, `Eden Treaty` - e2e type safe RPC client for share type from backend to frontend + +## Resources +Use the following references as needed. + +It's recommended to checkout `route.md` for as it contains the most important foundation building blocks with examples. + +`plugin.md` and `validation.md` is important as well but can be check as needed. + +### references/ +Detailed documentation split by topic: +- `bun-fullstack-dev-server.md` - Bun Fullstack Dev Server with HMR. React without bundler. +- `cookie.md` - Detailed documentation on cookie +- `deployment.md` - Production deployment guide / Docker +- `eden.md` - e2e type safe RPC client for share type from backend to frontend +- `guard.md` - Setting validation/lifecycle all at once +- `macro.md` - Compose multiple schema/lifecycle as a reusable Elysia via key-value (recommended for complex setup, eg. authentication, authorization, Role-based Access Check) +- `plugin.md` - Decouple part of Elysia into a standalone component +- `route.md` - Elysia foundation building block: Routing, Handler and Context +- `testing.md` - Unit tests with examples +- `validation.md` - Setup input/output validation and list of all custom validation rules +- `websocket.md` - Real-time features + +### plugins/ +Detailed documentation, usage and configuration reference for official Elysia plugin: +- `bearer.md` - Add bearer capability to Elysia (`@elysiajs/bearer`) +- `cors.md` - Out of box configuration for CORS (`@elysiajs/cors`) +- `cron.md` - Run cron job with access to Elysia context (`@elysiajs/cron`) +- `graphql-apollo.md` - Integration GraphQL Apollo (`@elysiajs/graphql-apollo`) +- `graphql-yoga.md` - Integration with GraphQL Yoga (`@elysiajs/graphql-yoga`) +- `html.md` - HTML and JSX plugin setup and usage (`@elysiajs/html`) +- `jwt.md` - JWT / JWK plugin (`@elysiajs/jwt`) +- `openapi.md` - OpenAPI documentation and OpenAPI Type Gen / OpenAPI from types (`@elysiajs/openapi`) +- `opentelemetry.md` - OpenTelemetry, instrumentation, and record span utilities (`@elysiajs/opentelemetry`) +- `server-timing.md` - Server Timing metric for debug (`@elysiajs/server-timing`) +- `static.md` - Serve static files/folders for Elysia Server (`@elysiajs/static`) + +### integrations/ +Guide to integrate Elysia with external library/runtime: +- `ai-sdk.md` - Using Vercel AI SDK with Elysia +- `astro.md` - Elysia in Astro API route +- `better-auth.md` - Integrate Elysia with better-auth +- `cloudflare-worker.md` - Elysia on Cloudflare Worker adapter +- `deno.md` - Elysia on Deno +- `drizzle.md` - Integrate Elysia with Drizzle ORM +- `expo.md` - Elysia in Expo API route +- `nextjs.md` - Elysia in Nextjs API route +- `nodejs.md` - Run Elysia on Node.js +- `nuxt.md` - Elysia on API route +- `prisma.md` - Integrate Elysia with Prisma +- `react-email.d` - Create and Send Email with React and Elysia +- `sveltekit.md` - Run Elysia on Svelte Kit API route +- `tanstack-start.md` - Run Elysia on Tanstack Start / React Query +- `vercel.md` - Deploy Elysia to Vercel + +### examples/ (optional) +- `basic.ts` - Basic Elysia example +- `body-parser.ts` - Custom body parser example via `.onParse` +- `complex.ts` - Comprehensive usage of Elysia server +- `cookie.ts` - Setting cookie +- `error.ts` - Error handling +- `file.ts` - Returning local file from server +- `guard.ts` - Setting mulitple validation schema and lifecycle +- `map-response.ts` - Custom response mapper +- `redirect.ts` - Redirect response +- `rename.ts` - Rename context's property +- `schema.ts` - Setup validation +- `state.ts` - Setup global state +- `upload-file.ts` - File upload with validation +- `websocket.ts` - Web Socket for realtime communication + +### patterns/ (optional) +- `patterns/mvc.md` - Detail guideline for using Elysia with MVC patterns diff --git a/.agents/skills/elysiajs/examples/basic.ts b/.agents/skills/elysiajs/examples/basic.ts new file mode 100644 index 0000000..61c8d14 --- /dev/null +++ b/.agents/skills/elysiajs/examples/basic.ts @@ -0,0 +1,9 @@ +import { Elysia, t } from 'elysia' + +new Elysia() + .get('/', 'Hello Elysia') + .post('/', ({ body: { name } }) => name, { + body: t.Object({ + name: t.String() + }) + }) diff --git a/.agents/skills/elysiajs/examples/body-parser.ts b/.agents/skills/elysiajs/examples/body-parser.ts new file mode 100644 index 0000000..533c7bf --- /dev/null +++ b/.agents/skills/elysiajs/examples/body-parser.ts @@ -0,0 +1,33 @@ +import { Elysia, t } from 'elysia' + +const app = new Elysia() + // Add custom body parser + .onParse(async ({ request, contentType }) => { + switch (contentType) { + case 'application/Elysia': + return request.text() + } + }) + .post('/', ({ body: { username } }) => `Hi ${username}`, { + body: t.Object({ + id: t.Number(), + username: t.String() + }) + }) + // Increase id by 1 from body before main handler + .post('/transform', ({ body }) => body, { + transform: ({ body }) => { + body.id = body.id + 1 + }, + body: t.Object({ + id: t.Number(), + username: t.String() + }), + detail: { + summary: 'A' + } + }) + .post('/mirror', ({ body }) => body) + .listen(3000) + +console.log('🦊 Elysia is running at :8080') diff --git a/.agents/skills/elysiajs/examples/complex.ts b/.agents/skills/elysiajs/examples/complex.ts new file mode 100644 index 0000000..436eda0 --- /dev/null +++ b/.agents/skills/elysiajs/examples/complex.ts @@ -0,0 +1,112 @@ +import { Elysia, t, file } from 'elysia' + +const loggerPlugin = new Elysia() + .get('/hi', () => 'Hi') + .decorate('log', () => 'A') + .decorate('date', () => new Date()) + .state('fromPlugin', 'From Logger') + .use((app) => app.state('abc', 'abc')) + +const app = new Elysia() + .onRequest(({ set }) => { + set.headers = { + 'Access-Control-Allow-Origin': '*' + } + }) + .onError(({ code }) => { + if (code === 'NOT_FOUND') + return 'Not Found :(' + }) + .use(loggerPlugin) + .state('build', Date.now()) + .get('/', 'Elysia') + .get('/tako', file('./example/takodachi.png')) + .get('/json', () => ({ + hi: 'world' + })) + .get('/root/plugin/log', ({ log, store: { build } }) => { + log() + + return build + }) + .get('/wildcard/*', () => 'Hi Wildcard') + .get('/query', () => 'Elysia', { + beforeHandle: ({ query }) => { + console.log('Name:', query?.name) + + if (query?.name === 'aom') return 'Hi saltyaom' + }, + query: t.Object({ + name: t.String() + }) + }) + .post('/json', async ({ body }) => body, { + body: t.Object({ + name: t.String(), + additional: t.String() + }) + }) + .post('/transform-body', async ({ body }) => body, { + beforeHandle: (ctx) => { + ctx.body = { + ...ctx.body, + additional: 'Elysia' + } + }, + body: t.Object({ + name: t.String(), + additional: t.String() + }) + }) + .get('/id/:id', ({ params: { id } }) => id, { + transform({ params }) { + params.id = +params.id + }, + params: t.Object({ + id: t.Number() + }) + }) + .post('/new/:id', async ({ body, params }) => body, { + params: t.Object({ + id: t.Number() + }), + body: t.Object({ + username: t.String() + }) + }) + .get('/trailing-slash', () => 'A') + .group('/group', (app) => + app + .onBeforeHandle(({ query }) => { + if (query?.name === 'aom') return 'Hi saltyaom' + }) + .get('/', () => 'From Group') + .get('/hi', () => 'HI GROUP') + .get('/elysia', () => 'Welcome to Elysian Realm') + .get('/fbk', () => 'FuBuKing') + ) + .get('/response-header', ({ set }) => { + set.status = 404 + set.headers['a'] = 'b' + + return 'A' + }) + .get('/this/is/my/deep/nested/root', () => 'Hi') + .get('/build', ({ store: { build } }) => build) + .get('/ref', ({ date }) => date()) + .get('/response', () => new Response('Hi')) + .get('/error', () => new Error('Something went wrong')) + .get('/401', ({ set }) => { + set.status = 401 + + return 'Status should be 401' + }) + .get('/timeout', async () => { + await new Promise((resolve) => setTimeout(resolve, 2000)) + + return 'A' + }) + .all('/all', () => 'hi') + .listen(8080, ({ hostname, port }) => { + console.log(`🦊 Elysia is running at http://${hostname}:${port}`) + }) diff --git a/.agents/skills/elysiajs/examples/cookie.ts b/.agents/skills/elysiajs/examples/cookie.ts new file mode 100644 index 0000000..9a42720 --- /dev/null +++ b/.agents/skills/elysiajs/examples/cookie.ts @@ -0,0 +1,45 @@ +import { Elysia, t } from 'elysia' + +const app = new Elysia({ + cookie: { + secrets: 'Fischl von Luftschloss Narfidort', + sign: ['name'] + } +}) + .get( + '/council', + ({ cookie: { council } }) => + (council.value = [ + { + name: 'Rin', + affilation: 'Administration' + } + ]), + { + cookie: t.Cookie({ + council: t.Array( + t.Object({ + name: t.String(), + affilation: t.String() + }) + ) + }) + } + ) + .get('/create', ({ cookie: { name } }) => (name.value = 'Himari')) + .get( + '/update', + ({ cookie: { name } }) => { + name.value = 'seminar: Rio' + name.value = 'seminar: Himari' + name.maxAge = 86400 + + return name.value + }, + { + cookie: t.Cookie({ + name: t.Optional(t.String()) + }) + } + ) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/error.ts b/.agents/skills/elysiajs/examples/error.ts new file mode 100644 index 0000000..2c2f126 --- /dev/null +++ b/.agents/skills/elysiajs/examples/error.ts @@ -0,0 +1,38 @@ +import { Elysia, t } from 'elysia' + +class CustomError extends Error { + constructor(public name: string) { + super(name) + } +} + +new Elysia() + .error({ + CUSTOM_ERROR: CustomError + }) + // global handler + .onError(({ code, error, status }) => { + switch (code) { + case "CUSTOM_ERROR": + return status(401, { message: error.message }) + + case "NOT_FOUND": + return "Not found :(" + } + }) + .post('/', ({ body }) => body, { + body: t.Object({ + username: t.String(), + password: t.String(), + nested: t.Optional( + t.Object({ + hi: t.String() + }) + ) + }), + // local handler + error({ error }) { + console.log(error) + } + }) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/file.ts b/.agents/skills/elysiajs/examples/file.ts new file mode 100644 index 0000000..504cad7 --- /dev/null +++ b/.agents/skills/elysiajs/examples/file.ts @@ -0,0 +1,10 @@ +import { Elysia, file } from 'elysia' + +/** + * Example of handle single static file + * + * @see https://github.com/elysiajs/elysia-static + */ +new Elysia() + .get('/tako', file('./example/takodachi.png')) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/guard.ts b/.agents/skills/elysiajs/examples/guard.ts new file mode 100644 index 0000000..2fe158f --- /dev/null +++ b/.agents/skills/elysiajs/examples/guard.ts @@ -0,0 +1,34 @@ +import { Elysia, t } from 'elysia' + +new Elysia() + .state('name', 'salt') + .get('/', ({ store: { name } }) => `Hi ${name}`, { + query: t.Object({ + name: t.String() + }) + }) + // If query 'name' is not preset, skip the whole handler + .guard( + { + query: t.Object({ + name: t.String() + }) + }, + (app) => + app + // Query type is inherited from guard + .get('/profile', ({ query }) => `Hi`) + // Store is inherited + .post('/name', ({ store: { name }, body, query }) => name, { + body: t.Object({ + id: t.Number({ + minimum: 5 + }), + username: t.String(), + profile: t.Object({ + name: t.String() + }) + }) + }) + ) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/map-response.ts b/.agents/skills/elysiajs/examples/map-response.ts new file mode 100644 index 0000000..8cd4be4 --- /dev/null +++ b/.agents/skills/elysiajs/examples/map-response.ts @@ -0,0 +1,15 @@ +import { Elysia } from 'elysia' + +const prettyJson = new Elysia() + .mapResponse(({ response }) => { + if (response instanceof Object) + return new Response(JSON.stringify(response, null, 4)) + }) + .as('scoped') + +new Elysia() + .use(prettyJson) + .get('/', () => ({ + hello: 'world' + })) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/redirect.ts b/.agents/skills/elysiajs/examples/redirect.ts new file mode 100644 index 0000000..28171b0 --- /dev/null +++ b/.agents/skills/elysiajs/examples/redirect.ts @@ -0,0 +1,6 @@ +import { Elysia } from 'elysia' + +new Elysia() + .get('/', () => 'Hi') + .get('/redirect', ({ redirect }) => redirect('/')) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/rename.ts b/.agents/skills/elysiajs/examples/rename.ts new file mode 100644 index 0000000..361f06f --- /dev/null +++ b/.agents/skills/elysiajs/examples/rename.ts @@ -0,0 +1,32 @@ +import { Elysia, t } from 'elysia' + +// ? Elysia#83 | Proposal: Standardized way of renaming third party plugin-scoped stuff +// this would be a plugin provided by a third party +const myPlugin = new Elysia() + .decorate('myProperty', 42) + .model('salt', t.String()) + +new Elysia() + .use( + myPlugin + // map decorator, rename "myProperty" to "renamedProperty" + .decorate(({ myProperty, ...decorators }) => ({ + renamedProperty: myProperty, + ...decorators + })) + // map model, rename "salt" to "pepper" + .model(({ salt, ...models }) => ({ + ...models, + pepper: t.String() + })) + // Add prefix + .prefix('decorator', 'unstable') + ) + .get( + '/mapped', + ({ unstableRenamedProperty }) => unstableRenamedProperty + ) + .post('/pepper', ({ body }) => body, { + body: 'pepper', + // response: t.String() + }) diff --git a/.agents/skills/elysiajs/examples/schema.ts b/.agents/skills/elysiajs/examples/schema.ts new file mode 100644 index 0000000..db79300 --- /dev/null +++ b/.agents/skills/elysiajs/examples/schema.ts @@ -0,0 +1,61 @@ +import { Elysia, t } from 'elysia' + +const app = new Elysia() + .model({ + name: t.Object({ + name: t.String() + }), + b: t.Object({ + response: t.Number() + }), + authorization: t.Object({ + authorization: t.String() + }) + }) + // Strictly validate response + .get('/', () => 'hi') + // Strictly validate body and response + .post('/', ({ body, query }) => body.id, { + body: t.Object({ + id: t.Number(), + username: t.String(), + profile: t.Object({ + name: t.String() + }) + }) + }) + // Strictly validate query, params, and body + .get('/query/:id', ({ query: { name }, params }) => name, { + query: t.Object({ + name: t.String() + }), + params: t.Object({ + id: t.String() + }), + response: { + 200: t.String(), + 300: t.Object({ + error: t.String() + }) + } + }) + .guard( + { + headers: 'authorization' + }, + (app) => + app + .derive(({ headers }) => ({ + userId: headers.authorization + })) + .get('/', ({ userId }) => 'A') + .post('/id/:id', ({ query, body, params, userId }) => body, { + params: t.Object({ + id: t.Number() + }), + transform({ params }) { + params.id = +params.id + } + }) + ) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/state.ts b/.agents/skills/elysiajs/examples/state.ts new file mode 100644 index 0000000..8bcc993 --- /dev/null +++ b/.agents/skills/elysiajs/examples/state.ts @@ -0,0 +1,6 @@ +import { Elysia } from 'elysia' + +new Elysia() + .state('counter', 0) + .get('/', ({ store }) => store.counter++) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/upload-file.ts b/.agents/skills/elysiajs/examples/upload-file.ts new file mode 100644 index 0000000..4af5a19 --- /dev/null +++ b/.agents/skills/elysiajs/examples/upload-file.ts @@ -0,0 +1,20 @@ +import { Elysia, t } from 'elysia' + +const app = new Elysia() + .post('/single', ({ body: { file } }) => file, { + body: t.Object({ + file: t.File({ + maxSize: '1m' + }) + }) + }) + .post( + '/multiple', + ({ body: { files } }) => files.reduce((a, b) => a + b.size, 0), + { + body: t.Object({ + files: t.Files() + }) + } + ) + .listen(3000) diff --git a/.agents/skills/elysiajs/examples/websocket.ts b/.agents/skills/elysiajs/examples/websocket.ts new file mode 100644 index 0000000..f97e47b --- /dev/null +++ b/.agents/skills/elysiajs/examples/websocket.ts @@ -0,0 +1,25 @@ +import { Elysia } from 'elysia' + +const app = new Elysia() + .state('start', 'here') + .ws('/ws', { + open(ws) { + ws.subscribe('asdf') + console.log('Open Connection:', ws.id) + }, + close(ws) { + console.log('Closed Connection:', ws.id) + }, + message(ws, message) { + ws.publish('asdf', message) + ws.send(message) + } + }) + .get('/publish/:publish', ({ params: { publish: text } }) => { + app.server!.publish('asdf', text) + + return text + }) + .listen(3000, (server) => { + console.log(`http://${server.hostname}:${server.port}`) + }) diff --git a/.agents/skills/elysiajs/integrations/ai-sdk.md b/.agents/skills/elysiajs/integrations/ai-sdk.md new file mode 100644 index 0000000..99f5409 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/ai-sdk.md @@ -0,0 +1,92 @@ +# AI SDK Integration + +## What It Is +Seamless integration with Vercel AI SDK via response streaming. + +## Response Streaming +Return `ReadableStream` or `Response` directly: +```typescript +import { streamText } from 'ai' +import { openai } from '@ai-sdk/openai' + +new Elysia().get('/', () => { + const stream = streamText({ + model: openai('gpt-5'), + system: 'You are Yae Miko from Genshin Impact', + prompt: 'Hi! How are you doing?' + }) + + return stream.textStream // ReadableStream + // or + return stream.toUIMessageStream() // UI Message Stream +}) +``` + +Elysia auto-handles stream. + +## Server-Sent Events +Wrap `ReadableStream` with `sse`: +```typescript +import { sse } from 'elysia' + +.get('/', () => { + const stream = streamText({ /* ... */ }) + + return sse(stream.textStream) + // or + return sse(stream.toUIMessageStream()) +}) +``` + +Each chunk → SSE. + +## As Response +Return stream directly (no Eden type safety): +```typescript +.get('/', () => { + const stream = streamText({ /* ... */ }) + + return stream.toTextStreamResponse() + // or + return stream.toUIMessageStreamResponse() // Uses SSE +}) +``` + +## Manual Streaming +Generator function for control: +```typescript +import { sse } from 'elysia' + +.get('/', async function* () { + const stream = streamText({ /* ... */ }) + + for await (const data of stream.textStream) + yield sse({ data, event: 'message' }) + + yield sse({ event: 'done' }) +}) +``` + +## Fetch for Unsupported Models +Direct fetch with streaming proxy: +```typescript +.get('/', () => { + return fetch('https://api.openai.com/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${process.env.OPENAI_API_KEY}` + }, + body: JSON.stringify({ + model: 'gpt-5', + stream: true, + messages: [ + { role: 'system', content: 'You are Yae Miko' }, + { role: 'user', content: 'Hi! How are you doing?' } + ] + }) + }) +}) +``` + +Elysia auto-proxies fetch response with streaming. diff --git a/.agents/skills/elysiajs/integrations/astro.md b/.agents/skills/elysiajs/integrations/astro.md new file mode 100644 index 0000000..41cd451 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/astro.md @@ -0,0 +1,59 @@ +# Astro Integration - SKILLS.md + +## What It Is +Run Elysia on Astro via Astro Endpoint. + +## Setup +1. Set output to server: +```javascript +// astro.config.mjs +export default defineConfig({ + output: 'server' +}) +``` + +2. Create `pages/[...slugs].ts` +3. Define Elysia server + export handlers: +```typescript +// pages/[...slugs].ts +import { Elysia, t } from 'elysia' + +const app = new Elysia() + .get('/api', () => 'hi') + .post('/api', ({ body }) => body, { + body: t.Object({ name: t.String() }) + }) + +const handle = ({ request }: { request: Request }) => app.handle(request) + +export const GET = handle +export const POST = handle +``` + +WinterCG compliance - works normally. + +Recommended: Run Astro on Bun (Elysia designed for Bun). + +## Prefix for Non-Root +If placed in `pages/api/[...slugs].ts`, set prefix: +```typescript +// pages/api/[...slugs].ts +const app = new Elysia({ prefix: '/api' }) + .get('/', () => 'hi') + +const handle = ({ request }: { request: Request }) => app.handle(request) + +export const GET = handle +export const POST = handle +``` + +Ensures routing works in any location. + +## Benefits +Co-location of frontend + backend. End-to-end type safety with Eden. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/better-auth.md b/.agents/skills/elysiajs/integrations/better-auth.md new file mode 100644 index 0000000..0dfa3af --- /dev/null +++ b/.agents/skills/elysiajs/integrations/better-auth.md @@ -0,0 +1,117 @@ +# Better Auth Integration +Elysia + Better Auth integration guide + +## What It Is +Framework-agnostic TypeScript auth/authz. Comprehensive features + plugin ecosystem. + +## Setup +```typescript +import { betterAuth } from 'better-auth' +import { Pool } from 'pg' + +export const auth = betterAuth({ + database: new Pool() +}) +``` + +## Handler Mounting +```typescript +import { auth } from './auth' + +new Elysia() + .mount(auth.handler) // http://localhost:3000/api/auth + .listen(3000) +``` + +### Custom Endpoint +```typescript +// Mount with prefix +.mount('/auth', auth.handler) // http://localhost:3000/auth/api/auth + +// Customize basePath +export const auth = betterAuth({ + basePath: '/api' // http://localhost:3000/auth/api +}) +``` + +Cannot set `basePath` to empty or `/`. + +## OpenAPI Integration +Extract docs from Better Auth: +```typescript +import { openAPI } from 'better-auth/plugins' + +let _schema: ReturnType +const getSchema = async () => (_schema ??= auth.api.generateOpenAPISchema()) + +export const OpenAPI = { + getPaths: (prefix = '/auth/api') => + getSchema().then(({ paths }) => { + const reference: typeof paths = Object.create(null) + + for (const path of Object.keys(paths)) { + const key = prefix + path + reference[key] = paths[path] + + for (const method of Object.keys(paths[path])) { + const operation = (reference[key] as any)[method] + operation.tags = ['Better Auth'] + } + } + + return reference + }) as Promise, + components: getSchema().then(({ components }) => components) as Promise +} as const +``` + +Apply to Elysia: +```typescript +new Elysia().use(openapi({ + documentation: { + components: await OpenAPI.components, + paths: await OpenAPI.getPaths() + } +})) +``` + +## CORS +```typescript +import { cors } from '@elysiajs/cors' + +new Elysia() + .use(cors({ + origin: 'http://localhost:3001', + methods: ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'], + credentials: true, + allowedHeaders: ['Content-Type', 'Authorization'] + })) + .mount(auth.handler) +``` + +## Macro for Auth +Use macro + resolve for session/user: +```typescript +const betterAuth = new Elysia({ name: 'better-auth' }) + .mount(auth.handler) + .macro({ + auth: { + async resolve({ status, request: { headers } }) { + const session = await auth.api.getSession({ headers }) + + if (!session) return status(401) + + return { + user: session.user, + session: session.session + } + } + } + }) + +new Elysia() + .use(betterAuth) + .get('/user', ({ user }) => user, { auth: true }) +``` + +Access `user` and `session` in all routes. diff --git a/.agents/skills/elysiajs/integrations/cloudflare-worker.md b/.agents/skills/elysiajs/integrations/cloudflare-worker.md new file mode 100644 index 0000000..4245c1a --- /dev/null +++ b/.agents/skills/elysiajs/integrations/cloudflare-worker.md @@ -0,0 +1,95 @@ + +# Cloudflare Worker Integration + +## What It Is +**Experimental** Cloudflare Worker adapter for Elysia. + +## Setup +1. Install Wrangler: +```bash +wrangler init elysia-on-cloudflare +``` + +2. Apply adapter + compile: +```typescript +import { Elysia } from 'elysia' +import { CloudflareAdapter } from 'elysia/adapter/cloudflare-worker' + +export default new Elysia({ + adapter: CloudflareAdapter +}) + .get('/', () => 'Hello Cloudflare Worker!') + .compile() // Required +``` + +3. Set compatibility date (min `2025-06-01`): +```json +// wrangler.json +{ + "name": "elysia-on-cloudflare", + "main": "src/index.ts", + "compatibility_date": "2025-06-01" +} +``` + +4. Dev server: +```bash +wrangler dev +# http://localhost:8787 +``` + +No `nodejs_compat` flag needed. + +## Limitations +1. `Elysia.file` + Static Plugin don't work (no `fs` module) +2. OpenAPI Type Gen doesn't work (no `fs` module) +3. Cannot define Response before server start +4. Cannot inline values: +```typescript +// ❌ Throws error +.get('/', 'Hello Elysia') + +// ✅ Works +.get('/', () => 'Hello Elysia') +``` + +## Static Files +Use Cloudflare's built-in static serving: +```json +// wrangler.json +{ + "assets": { "directory": "public" } +} +``` + +Structure: +``` +├─ public +│ ├─ kyuukurarin.mp4 +│ └─ static/mika.webp +``` + +Access: +- `http://localhost:8787/kyuukurarin.mp4` +- `http://localhost:8787/static/mika.webp` + +## Binding +Import env from `cloudflare:workers`: +```typescript +import { env } from 'cloudflare:workers' + +export default new Elysia({ adapter: CloudflareAdapter }) + .get('/', () => `Hello ${await env.KV.get('my-key')}`) + .compile() +``` + +## AoT Compilation +As of Elysia 1.4.7, AoT works with Cloudflare Worker. Drop `aot: false` flag. + +Cloudflare now supports Function compilation during startup. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/deno.md b/.agents/skills/elysiajs/integrations/deno.md new file mode 100644 index 0000000..28687d5 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/deno.md @@ -0,0 +1,34 @@ +# Deno Integration +Run Elysia on Deno + +## What It Is +Run Elysia on Deno via Web Standard Request/Response. + +## Setup +Wrap `Elysia.fetch` in `Deno.serve`: +```typescript +import { Elysia } from 'elysia' + +const app = new Elysia() + .get('/', () => 'Hello Elysia') + .listen(3000) + +Deno.serve(app.fetch) +``` + +Run: +```bash +deno serve --watch src/index.ts +``` + +## Port Config +```typescript +Deno.serve(app.fetch) // Default +Deno.serve({ port: 8787 }, app.fetch) // Custom port +``` + +## pnpm +[Inference] pnpm doesn't auto-install peer deps. Manual install required: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/drizzle.md b/.agents/skills/elysiajs/integrations/drizzle.md new file mode 100644 index 0000000..779db4e --- /dev/null +++ b/.agents/skills/elysiajs/integrations/drizzle.md @@ -0,0 +1,258 @@ +# Drizzle Integration +Elysia + Drizzle integration guide + +## What It Is +Headless TypeScript ORM. Convert Drizzle schema → Elysia validation models via `drizzle-typebox`. + +## Flow +``` +Drizzle → drizzle-typebox → Elysia validation → OpenAPI + Eden Treaty +``` + +## Installation +```bash +bun add drizzle-orm drizzle-typebox +``` + +### Pin TypeBox Version +Prevent Symbol conflicts: +```bash +grep "@sinclair/typebox" node_modules/elysia/package.json +``` + +Add to `package.json`: +```json +{ + "overrides": { + "@sinclair/typebox": "0.32.4" + } +} +``` + +## Drizzle Schema +```typescript +// src/database/schema.ts +import { pgTable, varchar, timestamp } from 'drizzle-orm/pg-core' +import { createId } from '@paralleldrive/cuid2' + +export const user = pgTable('user', { + id: varchar('id').$defaultFn(() => createId()).primaryKey(), + username: varchar('username').notNull().unique(), + password: varchar('password').notNull(), + email: varchar('email').notNull().unique(), + salt: varchar('salt', { length: 64 }).notNull(), + createdAt: timestamp('created_at').defaultNow().notNull() +}) + +export const table = { user } as const +export type Table = typeof table +``` + +## drizzle-typebox +```typescript +import { t } from 'elysia' +import { createInsertSchema } from 'drizzle-typebox' +import { table } from './database/schema' + +const _createUser = createInsertSchema(table.user, { + email: t.String({ format: 'email' }) // Replace with Elysia type +}) + +new Elysia() + .post('/sign-up', ({ body }) => {}, { + body: t.Omit(_createUser, ['id', 'salt', 'createdAt']) + }) +``` + +## Type Instantiation Error +**Error**: "Type instantiation is possibly infinite" + +**Cause**: Circular reference when nesting drizzle-typebox into Elysia schema. + +**Fix**: Explicitly define type between them: +```typescript +// ✅ Works +const _createUser = createInsertSchema(table.user, { + email: t.String({ format: 'email' }) +}) +const createUser = t.Omit(_createUser, ['id', 'salt', 'createdAt']) + +// ❌ Infinite loop +const createUser = t.Omit( + createInsertSchema(table.user, { email: t.String({ format: 'email' }) }), + ['id', 'salt', 'createdAt'] +) +``` + +Always declare variable for drizzle-typebox then reference it. + +## Utility Functions +Copy as-is for simplified usage: +```typescript +// src/database/utils.ts +/** + * @lastModified 2025-02-04 + * @see https://elysiajs.com/recipe/drizzle.html#utility + */ + +import { Kind, type TObject } from '@sinclair/typebox' +import { + createInsertSchema, + createSelectSchema, + BuildSchema, +} from 'drizzle-typebox' + +import { table } from './schema' +import type { Table } from 'drizzle-orm' + +type Spread< + T extends TObject | Table, + Mode extends 'select' | 'insert' | undefined, +> = + T extends TObject + ? { + [K in keyof Fields]: Fields[K] + } + : T extends Table + ? Mode extends 'select' + ? BuildSchema< + 'select', + T['_']['columns'], + undefined + >['properties'] + : Mode extends 'insert' + ? BuildSchema< + 'insert', + T['_']['columns'], + undefined + >['properties'] + : {} + : {} + +/** + * Spread a Drizzle schema into a plain object + */ +export const spread = < + T extends TObject | Table, + Mode extends 'select' | 'insert' | undefined, +>( + schema: T, + mode?: Mode, +): Spread => { + const newSchema: Record = {} + let table + + switch (mode) { + case 'insert': + case 'select': + if (Kind in schema) { + table = schema + break + } + + table = + mode === 'insert' + ? createInsertSchema(schema) + : createSelectSchema(schema) + + break + + default: + if (!(Kind in schema)) throw new Error('Expect a schema') + table = schema + } + + for (const key of Object.keys(table.properties)) + newSchema[key] = table.properties[key] + + return newSchema as any +} + +/** + * Spread a Drizzle Table into a plain object + * + * If `mode` is 'insert', the schema will be refined for insert + * If `mode` is 'select', the schema will be refined for select + * If `mode` is undefined, the schema will be spread as is, models will need to be refined manually + */ +export const spreads = < + T extends Record, + Mode extends 'select' | 'insert' | undefined, +>( + models: T, + mode?: Mode, +): { + [K in keyof T]: Spread +} => { + const newSchema: Record = {} + const keys = Object.keys(models) + + for (const key of keys) newSchema[key] = spread(models[key], mode) + + return newSchema as any +} +``` + +Usage: +```typescript +// ✅ Using spread +const user = spread(table.user, 'insert') +const createUser = t.Object({ + id: user.id, + username: user.username, + password: user.password +}) + +// ⚠️ Using t.Pick +const _createUser = createInsertSchema(table.user) +const createUser = t.Pick(_createUser, ['id', 'username', 'password']) +``` + +## Table Singleton Pattern +```typescript +// src/database/model.ts +import { table } from './schema' +import { spreads } from './utils' + +export const db = { + insert: spreads({ user: table.user }, 'insert'), + select: spreads({ user: table.user }, 'select') +} as const +``` + +Usage: +```typescript +// src/index.ts +import { db } from './database/model' +const { user } = db.insert + +new Elysia() + .post('/sign-up', ({ body }) => {}, { + body: t.Object({ + id: user.username, + username: user.username, + password: user.password + }) + }) +``` + +## Refinement +```typescript +// src/database/model.ts +import { createInsertSchema, createSelectSchema } from 'drizzle-typebox' + +export const db = { + insert: spreads({ + user: createInsertSchema(table.user, { + email: t.String({ format: 'email' }) + }) + }, 'insert'), + select: spreads({ + user: createSelectSchema(table.user, { + email: t.String({ format: 'email' }) + }) + }, 'select') +} as const +``` + +`spread` skips refined schemas. diff --git a/.agents/skills/elysiajs/integrations/expo.md b/.agents/skills/elysiajs/integrations/expo.md new file mode 100644 index 0000000..fad1471 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/expo.md @@ -0,0 +1,95 @@ +# Expo Integration +Run Elysia on Expo (React Native) + +## What It Is +Create API routes in Expo app (SDK 50+, App Router v3). + +## Setup +1. Create `app/[...slugs]+api.ts` +2. Define Elysia server +3. Export `Elysia.fetch` as HTTP methods + +```typescript +// app/[...slugs]+api.ts +import { Elysia, t } from 'elysia' + +const app = new Elysia() + .get('/', 'hello Expo') + .post('/', ({ body }) => body, { + body: t.Object({ name: t.String() }) + }) + +export const GET = app.fetch +export const POST = app.fetch +``` + +## Prefix for Non-Root +If placed in `app/api/[...slugs]+api.ts`, set prefix: +```typescript +const app = new Elysia({ prefix: '/api' }) + .get('/', 'Hello Expo') + +export const GET = app.fetch +export const POST = app.fetch +``` + +Ensures routing works in any location. + +## Eden (End-to-End Type Safety) +1. Export type: +```typescript +// app/[...slugs]+api.ts +const app = new Elysia() + .get('/', 'Hello Nextjs') + .post('/user', ({ body }) => body, { + body: treaty.schema('User', { name: 'string' }) + }) + +export type app = typeof app + +export const GET = app.fetch +export const POST = app.fetch +``` + +2. Create client: +```typescript +// lib/eden.ts +import { treaty } from '@elysiajs/eden' +import type { app } from '../app/[...slugs]+api' + +export const api = treaty('localhost:3000/api') +``` + +3. Use in components: +```tsx +// app/page.tsx +import { api } from '../lib/eden' + +export default async function Page() { + const message = await api.get() + return

Hello, {message}

+} +``` + +## Deployment +- Deploy as normal Elysia app OR +- Use experimental Expo server runtime + +With Expo runtime: +```bash +expo export +# Creates dist/server/_expo/functions/[...slugs]+api.js +``` + +Edge function, not normal server (no port allocation). + +### Adapters +- Express +- Netlify +- Vercel + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/nextjs.md b/.agents/skills/elysiajs/integrations/nextjs.md new file mode 100644 index 0000000..ddbc849 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/nextjs.md @@ -0,0 +1,103 @@ + +# Next.js Integration + +## What It Is +Run Elysia on Next.js App Router. + +## Setup +1. Create `app/api/[[...slugs]]/route.ts` +2. Define Elysia + export handlers: +```typescript +// app/api/[[...slugs]]/route.ts +import { Elysia, t } from 'elysia' + +const app = new Elysia({ prefix: '/api' }) + .get('/', 'Hello Nextjs') + .post('/', ({ body }) => body, { + body: t.Object({ name: t.String() }) + }) + +export const GET = app.fetch +export const POST = app.fetch +``` + +WinterCG compliance - works as normal Next.js API route. + +## Prefix for Non-Root +If placed in `app/user/[[...slugs]]/route.ts`, set prefix: +```typescript +const app = new Elysia({ prefix: '/user' }) + .get('/', 'Hello Nextjs') + +export const GET = app.fetch +export const POST = app.fetch +``` + +## Eden (End-to-End Type Safety) +Isomorphic fetch pattern: +- Server: Direct calls (no network) +- Client: Network calls + +1. Export type: +```typescript +// app/api/[[...slugs]]/route.ts +export const app = new Elysia({ prefix: '/api' }) + .get('/', 'Hello Nextjs') + .post('/user', ({ body }) => body, { + body: treaty.schema('User', { name: 'string' }) + }) + +export type app = typeof app + +export const GET = app.fetch +export const POST = app.fetch +``` + +2. Create client: +```typescript +// lib/eden.ts +import { treaty } from '@elysiajs/eden' +import type { app } from '../app/api/[[...slugs]]/route' + +export const api = + typeof process !== 'undefined' + ? treaty(app).api + : treaty('localhost:3000').api +``` + +Use `typeof process` not `typeof window` (window undefined at build time → hydration error). + +3. Use in components: +```tsx +// app/page.tsx +import { api } from '../lib/eden' + +export default async function Page() { + const message = await api.get() + return

Hello, {message}

+} +``` + +Works with server/client components + ISR. + +## React Query +```tsx +import { useQuery } from '@tanstack/react-query' + +function App() { + const { data: response } = useQuery({ + queryKey: ['get'], + queryFn: () => getTreaty().get() + }) + + return response?.data +} +``` + +Works with all React Query features. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/nodejs.md b/.agents/skills/elysiajs/integrations/nodejs.md new file mode 100644 index 0000000..ce2edfa --- /dev/null +++ b/.agents/skills/elysiajs/integrations/nodejs.md @@ -0,0 +1,64 @@ +# Node.js Integration +Run Elysia on Node.js + +## What It Is +Runtime adapter to run Elysia on Node.js. + +## Installation +```bash +bun add elysia @elysiajs/node +``` + +## Setup +Apply node adapter: +```typescript +import { Elysia } from 'elysia' +import { node } from '@elysiajs/node' + +const app = new Elysia({ adapter: node() }) + .get('/', () => 'Hello Elysia') + .listen(3000) +``` + +## Additional Setup (Recommended) +Install `tsx` for hot-reload: +```bash +bun add -d tsx @types/node typescript +``` + +Scripts in `package.json`: +```json +{ + "scripts": { + "dev": "tsx watch src/index.ts", + "build": "tsc src/index.ts --outDir dist", + "start": "NODE_ENV=production node dist/index.js" + } +} +``` + +- **dev**: Hot-reload dev mode +- **build**: Production build +- **start**: Production server + +Create `tsconfig.json`: +```bash +tsc --init +``` + +Update strict mode: +```json +{ + "compilerOptions": { + "strict": true + } +} +``` + +Provides hot-reload + JSX support similar to `bun dev`. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/nuxt.md b/.agents/skills/elysiajs/integrations/nuxt.md new file mode 100644 index 0000000..0b4d13d --- /dev/null +++ b/.agents/skills/elysiajs/integrations/nuxt.md @@ -0,0 +1,67 @@ +# Nuxt Integration + +## What It Is +Community plugin `nuxt-elysia` for Nuxt API routes with Eden Treaty. + +## Installation +```bash +bun add elysia @elysiajs/eden +bun add -d nuxt-elysia +``` + +## Setup +1. Add to Nuxt config: +```typescript +export default defineNuxtConfig({ + modules: ['nuxt-elysia'] +}) +``` + +2. Create `api.ts` at project root: +```typescript +// api.ts +export default () => new Elysia() + .get('/hello', () => ({ message: 'Hello world!' })) +``` + +3. Use Eden Treaty: +```vue + + +``` + +Auto-setup on Nuxt API route. + +## Prefix +Default: `/_api`. Customize: +```typescript +export default defineNuxtConfig({ + nuxtElysia: { + path: '/api' + } +}) +``` + +Mounts on `/api` instead of `/_api`. + +See [nuxt-elysia](https://github.com/tkesgar/nuxt-elysia) for more config. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/prisma.md b/.agents/skills/elysiajs/integrations/prisma.md new file mode 100644 index 0000000..f0684f1 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/prisma.md @@ -0,0 +1,93 @@ + +# Prisma Integration +Elysia + Prisma integration guide + +## What It Is +Type-safe ORM. Generate Elysia validation models from Prisma schema via `prismabox`. + +## Flow +``` +Prisma → prismabox → Elysia validation → OpenAPI + Eden Treaty +``` + +## Installation +```bash +bun add @prisma/client prismabox && \ +bun add -d prisma +``` + +## Prisma Schema +Add `prismabox` generator: +```prisma +// prisma/schema.prisma +generator client { + provider = "prisma-client" + output = "../generated/prisma" +} + +datasource db { + provider = "sqlite" + url = env("DATABASE_URL") +} + +generator prismabox { + provider = "prismabox" + typeboxImportDependencyName = "elysia" + typeboxImportVariableName = "t" + inputModel = true + output = "../generated/prismabox" +} + +model User { + id String @id @default(cuid()) + email String @unique + name String? + posts Post[] +} + +model Post { + id String @id @default(cuid()) + title String + content String? + published Boolean @default(false) + author User @relation(fields: [authorId], references: [id]) + authorId String +} +``` + +Generates: +- `User` → `generated/prismabox/User.ts` +- `Post` → `generated/prismabox/Post.ts` + +## Using Generated Models +```typescript +// src/index.ts +import { Elysia, t } from 'elysia' +import { PrismaClient } from '../generated/prisma' +import { UserPlain, UserPlainInputCreate } from '../generated/prismabox/User' + +const prisma = new PrismaClient() + +new Elysia() + .put('/', async ({ body }) => + prisma.user.create({ data: body }), { + body: UserPlainInputCreate, + response: UserPlain + } + ) + .get('/id/:id', async ({ params: { id }, status }) => { + const user = await prisma.user.findUnique({ where: { id } }) + + if (!user) return status(404, 'User not found') + + return user + }, { + response: { + 200: UserPlain, + 404: t.String() + } + }) + .listen(3000) +``` + +Reuses DB schema in Elysia validation models. diff --git a/.agents/skills/elysiajs/integrations/react-email.md b/.agents/skills/elysiajs/integrations/react-email.md new file mode 100644 index 0000000..1cb636f --- /dev/null +++ b/.agents/skills/elysiajs/integrations/react-email.md @@ -0,0 +1,134 @@ +# React Email Integration + +## What It Is +Use React components to create emails. Direct JSX import via Bun. + +## Installation +```bash +bun add -d react-email +bun add @react-email/components react react-dom +``` + +Script in `package.json`: +```json +{ + "scripts": { + "email": "email dev --dir src/emails" + } +} +``` + +Email templates → `src/emails` directory. + +### TypeScript +Add to `tsconfig.json`: +```json +{ + "compilerOptions": { + "jsx": "react" + } +} +``` + +## Email Template +```tsx +// src/emails/otp.tsx +import * as React from 'react' +import { Tailwind, Section, Text } from '@react-email/components' + +export default function OTPEmail({ otp }: { otp: number }) { + return ( + +
+
+ + Verify your Email Address + + + Use the following code to verify your email address + + {otp} + + This code is valid for 10 minutes + + + Thank you for joining us + +
+
+
+ ) +} + +OTPEmail.PreviewProps = { otp: 123456 } +``` + +`@react-email/components` → email-client compatible (Gmail, Outlook). Tailwind support. + +`PreviewProps` → playground only. + +## Preview +```bash +bun email +``` + +Opens browser with preview. + +## Send Email +Render with `react-dom/server`, submit via provider: + +### Nodemailer +```typescript +import { renderToStaticMarkup } from 'react-dom/server' +import OTPEmail from './emails/otp' +import nodemailer from 'nodemailer' + +const transporter = nodemailer.createTransport({ + host: 'smtp.gehenna.sh', + port: 465, + auth: { user: 'makoto', pass: '12345678' } +}) + +.get('/otp', async ({ body }) => { + const otp = ~~(Math.random() * 900_000) + 100_000 + const html = renderToStaticMarkup() + + await transporter.sendMail({ + from: '[email protected]', + to: body, + subject: 'Verify your email address', + html + }) + + return { success: true } +}, { + body: t.String({ format: 'email' }) +}) +``` + +### Resend +```typescript +import OTPEmail from './emails/otp' +import Resend from 'resend' + +const resend = new Resend('re_123456789') + +.get('/otp', ({ body }) => { + const otp = ~~(Math.random() * 900_000) + 100_000 + + await resend.emails.send({ + from: '[email protected]', + to: body, + subject: 'Verify your email address', + html: // Direct JSX + }) + + return { success: true } +}) +``` + +Direct JSX import thanks to Bun. + +Other providers: AWS SES, SendGrid. + +See [React Email Integrations](https://react.email/docs/integrations/overview). diff --git a/.agents/skills/elysiajs/integrations/sveltekit.md b/.agents/skills/elysiajs/integrations/sveltekit.md new file mode 100644 index 0000000..4ad306a --- /dev/null +++ b/.agents/skills/elysiajs/integrations/sveltekit.md @@ -0,0 +1,53 @@ + +# SvelteKit Integration + +## What It Is +Run Elysia on SvelteKit server routes. + +## Setup +1. Create `src/routes/[...slugs]/+server.ts` +2. Define Elysia server +3. Export fallback handler: +```typescript +// src/routes/[...slugs]/+server.ts +import { Elysia, t } from 'elysia' + +const app = new Elysia() + .get('/', 'hello SvelteKit') + .post('/', ({ body }) => body, { + body: t.Object({ name: t.String() }) + }) + +interface WithRequest { + request: Request +} + +export const fallback = ({ request }: WithRequest) => app.handle(request) +``` + +Treat as normal SvelteKit server route. + +## Prefix for Non-Root +If placed in `src/routes/api/[...slugs]/+server.ts`, set prefix: +```typescript +// src/routes/api/[...slugs]/+server.ts +import { Elysia, t } from 'elysia' + +const app = new Elysia({ prefix: '/api' }) + .get('/', () => 'hi') + .post('/', ({ body }) => body, { + body: t.Object({ name: t.String() }) + }) + +type RequestHandler = (v: { request: Request }) => Response | Promise + +export const fallback: RequestHandler = ({ request }) => app.handle(request) +``` + +Ensures routing works in any location. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/tanstack-start.md b/.agents/skills/elysiajs/integrations/tanstack-start.md new file mode 100644 index 0000000..2a1e642 --- /dev/null +++ b/.agents/skills/elysiajs/integrations/tanstack-start.md @@ -0,0 +1,87 @@ +# Tanstack Start Integration + +## What It Is +Elysia runs inside Tanstack Start server routes. + +## Setup +1. Create `src/routes/api.$.ts` +2. Define Elysia server +3. Export handlers in `server.handlers`: +```typescript +// src/routes/api.$.ts +import { Elysia } from 'elysia' +import { createFileRoute } from '@tanstack/react-router' +import { createIsomorphicFn } from '@tanstack/react-start' + +const app = new Elysia({ + prefix: '/api' +}).get('/', 'Hello Elysia!') + +const handle = ({ request }: { request: Request }) => app.fetch(request) + +export const Route = createFileRoute('/api/$')({ + server: { + handlers: { + GET: handle, + POST: handle + } + } +}) +``` + +Runs on `/api`. Add methods to `server.handlers` as needed. + +## Eden (End-to-End Type Safety) +Isomorphic pattern with `createIsomorphicFn`: +```typescript +// src/routes/api.$.ts +export const getTreaty = createIsomorphicFn() + .server(() => treaty(app).api) + .client(() => treaty('localhost:3000').api) +``` + +- Server: Direct call (no HTTP overhead) +- Client: HTTP call + +## Loader Data +Fetch before render: +```tsx +// src/routes/index.tsx +import { createFileRoute } from '@tanstack/react-router' +import { getTreaty } from './api.$' + +export const Route = createFileRoute('/a')({ + component: App, + loader: () => getTreaty().get().then((res) => res.data) +}) + +function App() { + const data = Route.useLoaderData() + return data +} +``` + +Executed server-side during SSR. No HTTP overhead. Type-safe. + +## React Query +```tsx +import { useQuery } from '@tanstack/react-query' +import { getTreaty } from './api.$' + +function App() { + const { data: response } = useQuery({ + queryKey: ['get'], + queryFn: () => getTreaty().get() + }) + + return response?.data +} +``` + +Works with all React Query features. + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` diff --git a/.agents/skills/elysiajs/integrations/vercel.md b/.agents/skills/elysiajs/integrations/vercel.md new file mode 100644 index 0000000..555ec8e --- /dev/null +++ b/.agents/skills/elysiajs/integrations/vercel.md @@ -0,0 +1,55 @@ +# Vercel Integration +Deploy Elysia on Vercel + +## What It Is +Zero-config deployment on Vercel (Bun or Node runtime). + +## Setup +1. Create/import Elysia server in `src/index.ts` +2. Export as default: +```typescript +import { Elysia, t } from 'elysia' + +export default new Elysia() + .get('/', () => 'Hello Vercel Function') + .post('/', ({ body }) => body, { + body: t.Object({ name: t.String() }) + }) +``` + +3. Develop locally: +```bash +vc dev +``` + +4. Deploy: +```bash +vc deploy +``` + +## Node.js Runtime +Set in `package.json`: +```json +{ + "name": "elysia-app", + "type": "module" +} +``` + +## Bun Runtime +Set in `vercel.json`: +```json +{ + "$schema": "https://openapi.vercel.sh/vercel.json", + "bunVersion": "1.x" +} +``` + +## pnpm +Manual install: +```bash +pnpm add @sinclair/typebox openapi-types +``` + +## Troubleshooting +Vercel has zero config for Elysia. For additional config, see [Vercel docs](https://vercel.com/docs/frameworks/backend/elysia). diff --git a/.agents/skills/elysiajs/patterns/mvc.md b/.agents/skills/elysiajs/patterns/mvc.md new file mode 100644 index 0000000..dc227c6 --- /dev/null +++ b/.agents/skills/elysiajs/patterns/mvc.md @@ -0,0 +1,380 @@ +# MVC pattern +This file contains a guideline for using Elysia with MVC or Model View Controller patterns + +- Controller: + - Prefers Elysia as a controller for HTTP dependant + - For non HTTP dependent, prefers service instead unless explicitly asked + - Use `onError` to handle local custom errors + - Register Model to Elysia instance via `Elysia.models({ ...models })` and prefix model by namespace `Elysia.prefix('model', 'Namespace.') + - Prefers Reference Model by name provided by Elysia instead of using an actual `Model.name` +- Service: + - Prefers class (or abstract class if possible) + - Prefers interface/type derive from `Model` + - Return `status` (`import { status } from 'elysia'`) for error + - Prefers `return Error` instead of `throw Error` +- Models: + - Always export validation model and type of validation model + - Custom Error should be in contains in Model + +## Controller +Due to type soundness of Elysia, it's not recommended to use a traditional controller class that is tightly coupled with Elysia's `Context` because: + +1. **Elysia type is complex** and heavily depends on plugin and multiple level of chaining. +2. **Hard to type**, Elysia type could change at anytime, especially with decorators, and store +3. **Loss of type integrity**, and inconsistency between types and runtime code. + +We recommended one of the following approach to implement a controller in Elysia. +1. Use Elysia instance as a controller itself +2. Create a controller that is not tied with HTTP request or Elysia. + +--- + +### 1. Elysia instance as a controller +> 1 Elysia instance = 1 controller + +Treat an Elysia instance as a controller, and define your routes directly on the Elysia instance. + +```typescript +// Do +import { Elysia } from 'elysia' +import { Service } from './service' + +new Elysia() + .get('/', ({ stuff }) => { + Service.doStuff(stuff) + }) +``` + +This approach allows Elysia to infer the `Context` type automatically, ensuring type integrity and consistency between types and runtime code. + +```typescript +// Don't +import { Elysia, t, type Context } from 'elysia' + +abstract class Controller { + static root(context: Context) { + return Service.doStuff(context.stuff) + } +} + +new Elysia() + .get('/', Controller.root) +``` + +This approach makes it hard to type `Context` properly, and may lead to loss of type integrity. + +### 2. Controller without HTTP request +If you want to create a controller class, we recommend creating a class that is not tied to HTTP request or Elysia at all. + +This approach allows you to decouple the controller from Elysia, making it easier to test, reuse, and even swap a framework while still follows the MVC pattern. + +```typescript +import { Elysia } from 'elysia' + +abstract class Controller { + static doStuff(stuff: string) { + return Service.doStuff(stuff) + } +} + +new Elysia() + .get('/', ({ stuff }) => Controller.doStuff(stuff)) +``` + +Tying the controller to Elysia Context may lead to: +1. Loss of type integrity +2. Make it harder to test and reuse +3. Lead to vendor lock-in + +We recommended to keep the controller decoupled from Elysia as much as possible. + +### Don't: Pass entire `Context` to a controller +**Context is a highly dynamic type** that can be inferred from Elysia instance. + +Do not pass an entire `Context` to a controller, instead use object destructuring to extract what you need and pass it to the controller. + +```typescript +import type { Context } from 'elysia' + +abstract class Controller { + constructor() {} + + // Don't do this + static root(context: Context) { + return Service.doStuff(context.stuff) + } +} +``` + +This approach makes it hard to type `Context` properly, and may lead to loss of type integrity. + +### Testing +If you're using Elysia as a controller, you can test your controller using `handle` to directly call a function (and it's lifecycle) + +```typescript +import { Elysia } from 'elysia' +import { Service } from './service' + +import { describe, it, expect } from 'bun:test' + +const app = new Elysia() + .get('/', ({ stuff }) => { + Service.doStuff(stuff) + + return 'ok' + }) + +describe('Controller', () => { + it('should work', async () => { + const response = await app + .handle(new Request('http://localhost/')) + .then((x) => x.text()) + + expect(response).toBe('ok') + }) +}) +``` + +You may find more information about testing in [Unit Test](/patterns/unit-test.html). + +## Service +Service is a set of utility/helper functions decoupled as a business logic to use in a module/controller, in our case, an Elysia instance. + +Any technical logic that can be decoupled from controller may live inside a **Service**. + +There are 2 types of service in Elysia: +1. Non-request dependent service +2. Request dependent service + +### 1. Abstract away Non-request dependent service + +We recommend abstracting a service class/function away from Elysia. + +If the service or function isn't tied to an HTTP request or doesn't access a `Context`, it's recommended to implement it as a static class or function. + +```typescript +import { Elysia, t } from 'elysia' + +abstract class Service { + static fibo(number: number): number { + if(number < 2) + return number + + return Service.fibo(number - 1) + Service.fibo(number - 2) + } +} + +new Elysia() + .get('/fibo', ({ body }) => { + return Service.fibo(body) + }, { + body: t.Numeric() + }) +``` + +If your service doesn't need to store a property, you may use `abstract class` and `static` instead to avoid allocating class instance. + +### 2. Request dependent service as Elysia instance + +**If the service is a request-dependent service** or needs to process HTTP requests, we recommend abstracting it as an Elysia instance to ensure type integrity and inference: + +```typescript +import { Elysia } from 'elysia' + +// Do +const AuthService = new Elysia({ name: 'Auth.Service' }) + .macro({ + isSignIn: { + resolve({ cookie, status }) { + if (!cookie.session.value) return status(401) + + return { + session: cookie.session.value, + } + } + } + }) + +const UserController = new Elysia() + .use(AuthService) + .get('/profile', ({ Auth: { user } }) => user, { + isSignIn: true + }) +``` + +### Do: Decorate only request dependent property + +It's recommended to `decorate` only request-dependent properties, such as `requestIP`, `requestTime`, or `session`. + +Overusing decorators may tie your code to Elysia, making it harder to test and reuse. + +```typescript +import { Elysia } from 'elysia' + +new Elysia() + .decorate('requestIP', ({ request }) => request.headers.get('x-forwarded-for') || request.ip) + .decorate('requestTime', () => Date.now()) + .decorate('session', ({ cookie }) => cookie.session.value) + .get('/', ({ requestIP, requestTime, session }) => { + return { requestIP, requestTime, session } + }) +``` + +### Don't: Pass entire `Context` to a service +**Context is a highly dynamic type** that can be inferred from Elysia instance. + +Do not pass an entire `Context` to a service, instead use object destructuring to extract what you need and pass it to the service. +```typescript +import type { Context } from 'elysia' + +class AuthService { + constructor() {} + + // Don't do this + isSignIn({ status, cookie: { session } }: Context) { + if (session.value) + return status(401) + } +} +``` + +As Elysia type is complex, and heavily depends on plugin and multiple level of chaining, it can be challenging to manually type as it's highly dynamic. + +## Model +Model or [DTO (Data Transfer Object)](https://en.wikipedia.org/wiki/Data_transfer_object) is handle by [Elysia.t (Validation)](/essential/validation.html#elysia-type). + +Elysia has a validation system built-in which can infers type from your code and validate it at runtime. + +### Do: Use Elysia's validation system + +Elysia strength is prioritizing a single source of truth for both type and runtime validation. + +Instead of declaring an interface, reuse validation's model instead: +```typescript twoslash +// Do +import { Elysia, t } from 'elysia' + +const customBody = t.Object({ + username: t.String(), + password: t.String() +}) + +// Optional if you want to get the type of the model +// Usually if we didn't use the type, as it's already inferred by Elysia +type CustomBody = typeof customBody.static + +export { customBody } +``` + +We can get type of model by using `typeof` with `.static` property from the model. + +Then you can use the `CustomBody` type to infer the type of the request body. + +```typescript twoslash +// Do +new Elysia() + .post('/login', ({ body }) => { + return body + }, { + body: customBody + }) +``` + +### Don't: Declare a class instance as a model + +Do not declare a class instance as a model: +```typescript +// Don't +class CustomBody { + username: string + password: string + + constructor(username: string, password: string) { + this.username = username + this.password = password + } +} + +// Don't +interface ICustomBody { + username: string + password: string +} +``` + +### Don't: Declare type separate from the model +Do not declare a type separate from the model, instead use `typeof` with `.static` property to get the type of the model. + +```typescript +// Don't +import { Elysia, t } from 'elysia' + +const customBody = t.Object({ + username: t.String(), + password: t.String() +}) + +type CustomBody = { + username: string + password: string +} + +// Do +const customBody = t.Object({ + username: t.String(), + password: t.String() +}) + +type CustomBody = typeof customBody.static +``` + +### Group +You can group multiple models into a single object to make it more organized. + +```typescript +import { Elysia, t } from 'elysia' + +export const AuthModel = { + sign: t.Object({ + username: t.String(), + password: t.String() + }) +} + +const models = AuthModel.models +``` + +### Model Injection +Though this is optional, if you are strictly following MVC pattern, you may want to inject like a service into a controller. We recommended using Elysia reference model + +Using Elysia's model reference +```typescript twoslash +import { Elysia, t } from 'elysia' + +const customBody = t.Object({ + username: t.String(), + password: t.String() +}) + +const AuthModel = new Elysia() + .model({ + sign: customBody + }) + +const models = AuthModel.models + +const UserController = new Elysia({ prefix: '/auth' }) + .use(AuthModel) + .prefix('model', 'auth.') + .post('/sign-in', async ({ body, cookie: { session } }) => { + return true + }, { + body: 'auth.Sign' + }) +``` + +This approach provide several benefits: +1. Allow us to name a model and provide auto-completion. +2. Modify schema for later usage, or perform a [remap](/essential/handler.html#remap). +3. Show up as "models" in OpenAPI compliance client, eg. OpenAPI. +4. Improve TypeScript inference speed as model type will be cached during registration. diff --git a/.agents/skills/elysiajs/plugins/bearer.md b/.agents/skills/elysiajs/plugins/bearer.md new file mode 100644 index 0000000..df529e5 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/bearer.md @@ -0,0 +1,30 @@ +# Bearer +Plugin for Elysia for retrieving the Bearer token. + +## Installation +```bash +bun add @elysiajs/bearer +``` + +## Basic Usage +```typescript twoslash +import { Elysia } from 'elysia' +import { bearer } from '@elysiajs/bearer' + +const app = new Elysia() + .use(bearer()) + .get('/sign', ({ bearer }) => bearer, { + beforeHandle({ bearer, set, status }) { + if (!bearer) { + set.headers[ + 'WWW-Authenticate' + ] = `Bearer realm='sign', error="invalid_request"` + + return status(400, 'Unauthorized') + } + } + }) + .listen(3000) +``` + +This plugin is for retrieving a Bearer token specified in RFC6750 diff --git a/.agents/skills/elysiajs/plugins/cors.md b/.agents/skills/elysiajs/plugins/cors.md new file mode 100644 index 0000000..2d8db2a --- /dev/null +++ b/.agents/skills/elysiajs/plugins/cors.md @@ -0,0 +1,141 @@ +# CORS + +Plugin for Elysia that adds support for customizing Cross-Origin Resource Sharing behavior. + +## Installation +```bash +bun add @elysiajs/cors +``` + +## Basic Usage +```typescript twoslash +import { Elysia } from 'elysia' +import { cors } from '@elysiajs/cors' + +new Elysia().use(cors()).listen(3000) +``` + +This will set Elysia to accept requests from any origin. + +## Config + +Below is a config which is accepted by the plugin + +### origin + +@default `true` + +Indicates whether the response can be shared with the requesting code from the given origins. + +Value can be one of the following: + +- **string** - Name of origin which will directly assign to Access-Control-Allow-Origin header. +- **boolean** - If set to true, Access-Control-Allow-Origin will be set to `*` (any origins) +- **RegExp** - Pattern to match request's URL, allowed if matched. +- **Function** - Custom logic to allow resource sharing, allow if `true` is returned. + - Expected to have the type of: + ```typescript + cors(context: Context) => boolean | void + ``` +- **Array** - iterate through all cases above in order, allowed if any of the values are `true`. + +--- + +### methods + +@default `*` + +Allowed methods for cross-origin requests by assign `Access-Control-Allow-Methods` header. + +Value can be one of the following: +- **undefined | null | ''** - Ignore all methods. +- **\*** - Allows all methods. +- **string** - Expects either a single method or a comma-delimited string + - (eg: `'GET, PUT, POST'`) +- **string[]** - Allow multiple HTTP methods. + - eg: `['GET', 'PUT', 'POST']` + +--- + +### allowedHeaders + +@default `*` + +Allowed headers for an incoming request by assign `Access-Control-Allow-Headers` header. + +Value can be one of the following: +- **string** - Expects either a single header or a comma-delimited string + - eg: `'Content-Type, Authorization'`. +- **string[]** - Allow multiple HTTP headers. + - eg: `['Content-Type', 'Authorization']` + +--- + +### exposeHeaders + +@default `*` + +Response CORS with specified headers by sssign Access-Control-Expose-Headers header. + +Value can be one of the following: +- **string** - Expects either a single header or a comma-delimited string. + - eg: `'Content-Type, X-Powered-By'`. +- **string[]** - Allow multiple HTTP headers. + - eg: `['Content-Type', 'X-Powered-By']` + +--- + +### credentials + +@default `true` + +The Access-Control-Allow-Credentials response header tells browsers whether to expose the response to the frontend JavaScript code when the request's credentials mode Request.credentials is `include`. + +Credentials are cookies, authorization headers, or TLS client certificates by assign `Access-Control-Allow-Credentials` header. + +--- + +### maxAge + +@default `5` + +Indicates how long the results of a preflight request that is the information contained in the `Access-Control-Allow-Methods` and `Access-Control-Allow-Headers` headers) can be cached. + +Assign `Access-Control-Max-Age` header. + +--- + +### preflight + +The preflight request is a request sent to check if the CORS protocol is understood and if a server is aware of using specific methods and headers. + +Response with **OPTIONS** request with 3 HTTP request headers: +- **Access-Control-Request-Method** +- **Access-Control-Request-Headers** +- **Origin** + +This config indicates if the server should respond to preflight requests. + +--- + +## Pattern + +Below you can find the common patterns to use the plugin. + +## Allow CORS by top-level domain + +```typescript twoslash +import { Elysia } from 'elysia' +import { cors } from '@elysiajs/cors' + +const app = new Elysia() + .use( + cors({ + origin: /.*\.saltyaom\.com$/ + }) + ) + .get('/', () => 'Hi') + .listen(3000) +``` + +This will allow requests from top-level domains with `saltyaom.com` diff --git a/.agents/skills/elysiajs/plugins/cron.md b/.agents/skills/elysiajs/plugins/cron.md new file mode 100644 index 0000000..3905ad5 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/cron.md @@ -0,0 +1,265 @@ +# Cron Plugin + +This plugin adds support for running cronjob to Elysia server. + +## Installation + +```bash +bun add @elysiajs/cron +``` + +## Basic Usage +```typescript twoslash +import { Elysia } from 'elysia' +import { cron } from '@elysiajs/cron' + +new Elysia() + .use( + cron({ + name: 'heartbeat', + pattern: '*/10 * * * * *', + run() { + console.log('Heartbeat') + } + }) + ) + .listen(3000) +``` + +The above code will log `heartbeat` every 10 seconds. + +## Config +Below is a config which is accepted by the plugin + +### cron + +Create a cronjob for the Elysia server. + +``` +cron(config: CronConfig, callback: (Instance['store']) => void): this +``` + +`CronConfig` accepts the parameters specified below: + +--- + +### CronConfig.name + +Job name to register to `store`. + +This will register the cron instance to `store` with a specified name, which can be used to reference in later processes eg. stop the job. + +--- + +### CronConfig.pattern + +Time to run the job as specified by cron syntax. + +``` +┌────────────── second (optional) +│ ┌──────────── minute +│ │ ┌────────── hour +│ │ │ ┌──────── day of the month +│ │ │ │ ┌────── month +│ │ │ │ │ ┌──── day of week +│ │ │ │ │ │ +* * * * * * +``` + +--- + +### CronConfig.timezone +Time zone in Europe/Stockholm format + +--- + +### CronConfig.startAt +Schedule start time for the job + +--- + +### CronConfig.stopAt +Schedule stop time for the job + +--- + +### CronConfig.maxRuns +Maximum number of executions + +--- + +### CronConfig.catch +Continue execution even if an unhandled error is thrown by a triggered function. + +### CronConfig.interval +The minimum interval between executions, in seconds. + +--- + +## CronConfig.Pattern +Below you can find the common patterns to use the plugin. + +--- + +## Pattern + +Below you can find the common patterns to use the plugin. + +## Stop cronjob + +You can stop cronjob manually by accessing the cronjob name registered to `store`. + +```typescript +import { Elysia } from 'elysia' +import { cron } from '@elysiajs/cron' + +const app = new Elysia() + .use( + cron({ + name: 'heartbeat', + pattern: '*/1 * * * * *', + run() { + console.log('Heartbeat') + } + }) + ) + .get( + '/stop', + ({ + store: { + cron: { heartbeat } + } + }) => { + heartbeat.stop() + + return 'Stop heartbeat' + } + ) + .listen(3000) +``` + +--- + +## Predefined patterns + +You can use predefined patterns from `@elysiajs/cron/schedule` + +```typescript +import { Elysia } from 'elysia' +import { cron, Patterns } from '@elysiajs/cron' + +const app = new Elysia() + .use( + cron({ + name: 'heartbeat', + pattern: Patterns.everySecond(), + run() { + console.log('Heartbeat') + } + }) + ) + .get( + '/stop', + ({ + store: { + cron: { heartbeat } + } + }) => { + heartbeat.stop() + + return 'Stop heartbeat' + } + ) + .listen(3000) +``` + +### Functions + +| Function | Description | +| ---------------------------------------- | ----------------------------------------------------- | +| `.everySeconds(2)` | Run the task every 2 seconds | +| `.everyMinutes(5)` | Run the task every 5 minutes | +| `.everyHours(3)` | Run the task every 3 hours | +| `.everyHoursAt(3, 15)` | Run the task every 3 hours at 15 minutes | +| `.everyDayAt('04:19')` | Run the task every day at 04:19 | +| `.everyWeekOn(Patterns.MONDAY, '19:30')` | Run the task every Monday at 19:30 | +| `.everyWeekdayAt('17:00')` | Run the task every day from Monday to Friday at 17:00 | +| `.everyWeekendAt('11:00')` | Run the task on Saturday and Sunday at 11:00 | + +### Function aliases to constants + +| Function | Constant | +| ----------------- | ---------------------------------- | +| `.everySecond()` | EVERY_SECOND | +| `.everyMinute()` | EVERY_MINUTE | +| `.hourly()` | EVERY_HOUR | +| `.daily()` | EVERY_DAY_AT_MIDNIGHT | +| `.everyWeekday()` | EVERY_WEEKDAY | +| `.everyWeekend()` | EVERY_WEEKEND | +| `.weekly()` | EVERY_WEEK | +| `.monthly()` | EVERY_1ST_DAY_OF_MONTH_AT_MIDNIGHT | +| `.everyQuarter()` | EVERY_QUARTER | +| `.yearly()` | EVERY_YEAR | + +### Constants + +| Constant | Pattern | +| ---------------------------------------- | -------------------- | +| `.EVERY_SECOND` | `* * * * * *` | +| `.EVERY_5_SECONDS` | `*/5 * * * * *` | +| `.EVERY_10_SECONDS` | `*/10 * * * * *` | +| `.EVERY_30_SECONDS` | `*/30 * * * * *` | +| `.EVERY_MINUTE` | `*/1 * * * *` | +| `.EVERY_5_MINUTES` | `0 */5 * * * *` | +| `.EVERY_10_MINUTES` | `0 */10 * * * *` | +| `.EVERY_30_MINUTES` | `0 */30 * * * *` | +| `.EVERY_HOUR` | `0 0-23/1 * * *` | +| `.EVERY_2_HOURS` | `0 0-23/2 * * *` | +| `.EVERY_3_HOURS` | `0 0-23/3 * * *` | +| `.EVERY_4_HOURS` | `0 0-23/4 * * *` | +| `.EVERY_5_HOURS` | `0 0-23/5 * * *` | +| `.EVERY_6_HOURS` | `0 0-23/6 * * *` | +| `.EVERY_7_HOURS` | `0 0-23/7 * * *` | +| `.EVERY_8_HOURS` | `0 0-23/8 * * *` | +| `.EVERY_9_HOURS` | `0 0-23/9 * * *` | +| `.EVERY_10_HOURS` | `0 0-23/10 * * *` | +| `.EVERY_11_HOURS` | `0 0-23/11 * * *` | +| `.EVERY_12_HOURS` | `0 0-23/12 * * *` | +| `.EVERY_DAY_AT_1AM` | `0 01 * * *` | +| `.EVERY_DAY_AT_2AM` | `0 02 * * *` | +| `.EVERY_DAY_AT_3AM` | `0 03 * * *` | +| `.EVERY_DAY_AT_4AM` | `0 04 * * *` | +| `.EVERY_DAY_AT_5AM` | `0 05 * * *` | +| `.EVERY_DAY_AT_6AM` | `0 06 * * *` | +| `.EVERY_DAY_AT_7AM` | `0 07 * * *` | +| `.EVERY_DAY_AT_8AM` | `0 08 * * *` | +| `.EVERY_DAY_AT_9AM` | `0 09 * * *` | +| `.EVERY_DAY_AT_10AM` | `0 10 * * *` | +| `.EVERY_DAY_AT_11AM` | `0 11 * * *` | +| `.EVERY_DAY_AT_NOON` | `0 12 * * *` | +| `.EVERY_DAY_AT_1PM` | `0 13 * * *` | +| `.EVERY_DAY_AT_2PM` | `0 14 * * *` | +| `.EVERY_DAY_AT_3PM` | `0 15 * * *` | +| `.EVERY_DAY_AT_4PM` | `0 16 * * *` | +| `.EVERY_DAY_AT_5PM` | `0 17 * * *` | +| `.EVERY_DAY_AT_6PM` | `0 18 * * *` | +| `.EVERY_DAY_AT_7PM` | `0 19 * * *` | +| `.EVERY_DAY_AT_8PM` | `0 20 * * *` | +| `.EVERY_DAY_AT_9PM` | `0 21 * * *` | +| `.EVERY_DAY_AT_10PM` | `0 22 * * *` | +| `.EVERY_DAY_AT_11PM` | `0 23 * * *` | +| `.EVERY_DAY_AT_MIDNIGHT` | `0 0 * * *` | +| `.EVERY_WEEK` | `0 0 * * 0` | +| `.EVERY_WEEKDAY` | `0 0 * * 1-5` | +| `.EVERY_WEEKEND` | `0 0 * * 6,0` | +| `.EVERY_1ST_DAY_OF_MONTH_AT_MIDNIGHT` | `0 0 1 * *` | +| `.EVERY_1ST_DAY_OF_MONTH_AT_NOON` | `0 12 1 * *` | +| `.EVERY_2ND_HOUR` | `0 */2 * * *` | +| `.EVERY_2ND_HOUR_FROM_1AM_THROUGH_11PM` | `0 1-23/2 * * *` | +| `.EVERY_2ND_MONTH` | `0 0 1 */2 *` | +| `.EVERY_QUARTER` | `0 0 1 */3 *` | +| `.EVERY_6_MONTHS` | `0 0 1 */6 *` | +| `.EVERY_YEAR` | `0 0 1 1 *` | +| `.EVERY_30_MINUTES_BETWEEN_9AM_AND_5PM` | `0 */30 9-17 * * *` | +| `.EVERY_30_MINUTES_BETWEEN_9AM_AND_6PM` | `0 */30 9-18 * * *` | +| `.EVERY_30_MINUTES_BETWEEN_10AM_AND_7PM` | `0 */30 10-19 * * *` | diff --git a/.agents/skills/elysiajs/plugins/graphql-apollo.md b/.agents/skills/elysiajs/plugins/graphql-apollo.md new file mode 100644 index 0000000..655f258 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/graphql-apollo.md @@ -0,0 +1,90 @@ +# GraphQL Apollo + +Plugin for Elysia to use GraphQL Apollo. + +## Installation +```bash +bun add graphql @elysiajs/apollo @apollo/server +``` + +## Basic Usage + +```typescript +import { Elysia } from 'elysia' +import { apollo, gql } from '@elysiajs/apollo' + +const app = new Elysia() + .use( + apollo({ + typeDefs: gql` + type Book { + title: String + author: String + } + + type Query { + books: [Book] + } + `, + resolvers: { + Query: { + books: () => { + return [ + { + title: 'Elysia', + author: 'saltyAom' + } + ] + } + } + } + }) + ) + .listen(3000) +``` + +Accessing `/graphql` should show Apollo GraphQL playground work with. + +## Context + +Because Elysia is based on Web Standard Request and Response which is different from Node's `HttpRequest` and `HttpResponse` that Express uses, results in `req, res` being undefined in context. + +Because of this, Elysia replaces both with `context` like route parameters. + +```typescript +const app = new Elysia() + .use( + apollo({ + typeDefs, + resolvers, + context: async ({ request }) => { + const authorization = request.headers.get('Authorization') + + return { + authorization + } + } + }) + ) + .listen(3000) +``` + +## Config + +This plugin extends Apollo's [ServerRegistration](https://www.apollographql.com/docs/apollo-server/api/apollo-server/#options) (which is `ApolloServer`'s' constructor parameter). + +Below are the extended parameters for configuring Apollo Server with Elysia. + +### path + +@default `"/graphql"` + +Path to expose Apollo Server. + +--- + +### enablePlayground + +@default `process.env.ENV !== 'production'` + +Determine whether should Apollo should provide Apollo Playground. diff --git a/.agents/skills/elysiajs/plugins/graphql-yoga.md b/.agents/skills/elysiajs/plugins/graphql-yoga.md new file mode 100644 index 0000000..3203d02 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/graphql-yoga.md @@ -0,0 +1,87 @@ +# GraphQL Yoga + +This plugin integrates GraphQL yoga with Elysia + +## Installation +```bash +bun add @elysiajs/graphql-yoga +``` + +## Basic Usage +```typescript +import { Elysia } from 'elysia' +import { yoga } from '@elysiajs/graphql-yoga' + +const app = new Elysia() + .use( + yoga({ + typeDefs: /* GraphQL */ ` + type Query { + hi: String + } + `, + resolvers: { + Query: { + hi: () => 'Hello from Elysia' + } + } + }) + ) + .listen(3000) +``` + +Accessing `/graphql` in the browser (GET request) would show you a GraphiQL instance for the GraphQL-enabled Elysia server. + +optional: you can install a custom version of optional peer dependencies as well: + +```bash +bun add graphql graphql-yoga +``` + +## Resolver + +Elysia uses Mobius to infer type from **typeDefs** field automatically, allowing you to get full type-safety and auto-complete when typing **resolver** types. + +## Context + +You can add custom context to the resolver function by adding **context** + +```ts +import { Elysia } from 'elysia' +import { yoga } from '@elysiajs/graphql-yoga' + +const app = new Elysia() + .use( + yoga({ + typeDefs: /* GraphQL */ ` + type Query { + hi: String + } + `, + context: { + name: 'Mobius' + }, + // If context is a function on this doesn't present + // for some reason it won't infer context type + useContext(_) {}, + resolvers: { + Query: { + hi: async (parent, args, context) => context.name + } + } + }) + ) + .listen(3000) +``` + +## Config + +This plugin extends [GraphQL Yoga's createYoga options, please refer to the GraphQL Yoga documentation](https://the-guild.dev/graphql/yoga-server/docs) with inlining `schema` config to root. + +Below is a config which is accepted by the plugin + +### path + +@default `/graphql` + +Endpoint to expose GraphQL handler diff --git a/.agents/skills/elysiajs/plugins/html.md b/.agents/skills/elysiajs/plugins/html.md new file mode 100644 index 0000000..777a536 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/html.md @@ -0,0 +1,188 @@ +# HTML + +Allows you to use JSX and HTML with proper headers and support. + +## Installation + +```bash +bun add @elysiajs/html +``` + +## Basic Usage +```tsx twoslash +import React from 'react' +import { Elysia } from 'elysia' +import { html, Html } from '@elysiajs/html' + +new Elysia() + .use(html()) + .get( + '/html', + () => ` + + + Hello World + + +

Hello World

+ + ` + ) + .get('/jsx', () => ( + + + Hello World + + +

Hello World

+ + + )) + .listen(3000) +``` + +This plugin will automatically add `Content-Type: text/html; charset=utf8` header to the response, add ``, and convert it into a Response object. + +## JSX +Elysia can use JSX + +1. Replace your file that needs to use JSX to end with affix **"x"**: +- .js -> .jsx +- .ts -> .tsx + +2. Register the TypeScript type by append the following to **tsconfig.json**: +```jsonc +// tsconfig.json +{ + "compilerOptions": { + "jsx": "react", + "jsxFactory": "Html.createElement", + "jsxFragmentFactory": "Html.Fragment" + } +} +``` + +3. Starts using JSX in your file +```tsx twoslash +import React from 'react' +import { Elysia } from 'elysia' +import { html, Html } from '@elysiajs/html' + +new Elysia() + .use(html()) + .get('/', () => ( + + + Hello World + + +

Hello World

+ + + )) + .listen(3000) +``` + +If the error `Cannot find name 'Html'. Did you mean 'html'?` occurs, this import must be added to the JSX template: + +```tsx +import { Html } from '@elysiajs/html' +``` + +It is important that it is written in uppercase. + +## XSS + +Elysia HTML is based use of the Kita HTML plugin to detect possible XSS attacks in compile time. + +You can use a dedicated `safe` attribute to sanitize user value to prevent XSS vulnerability. + +```tsx +import { Elysia, t } from 'elysia' +import { html, Html } from '@elysiajs/html' + +new Elysia() + .use(html()) + .post( + '/', + ({ body }) => ( + + + Hello World + + +

{body}

+ + + ), + { + body: t.String() + } + ) + .listen(3000) +``` + +However, when are building a large-scale app, it's best to have a type reminder to detect possible XSS vulnerabilities in your codebase. + +To add a type-safe reminder, please install: + +```sh +bun add @kitajs/ts-html-plugin +``` + +Then appends the following **tsconfig.json** + +```jsonc +// tsconfig.json +{ + "compilerOptions": { + "jsx": "react", + "jsxFactory": "Html.createElement", + "jsxFragmentFactory": "Html.Fragment", + "plugins": [{ "name": "@kitajs/ts-html-plugin" }] + } +} +``` + +## Config +Below is a config which is accepted by the plugin + +### contentType + +- Type: `string` +- Default: `'text/html; charset=utf8'` + +The content-type of the response. + +### autoDetect + +- Type: `boolean` +- Default: `true` + +Whether to automatically detect HTML content and set the content-type. + +### autoDoctype + +- Type: `boolean | 'full'` +- Default: `true` + +Whether to automatically add `` to a response starting with ``, if not found. + +Use `full` to also automatically add doctypes on responses returned without this plugin + +```ts +// without the plugin +app.get('/', () => '') + +// With the plugin +app.get('/', ({ html }) => html('')) +``` + +### isHtml + +- Type: `(value: string) => boolean` +- Default: `isHtml` (exported function) + +The function is used to detect if a string is a html or not. Default implementation if length is greater than 7, starts with `<` and ends with `>`. + +Keep in mind there's no real way to validate HTML, so the default implementation is a best guess. diff --git a/.agents/skills/elysiajs/plugins/jwt.md b/.agents/skills/elysiajs/plugins/jwt.md new file mode 100644 index 0000000..b5767bf --- /dev/null +++ b/.agents/skills/elysiajs/plugins/jwt.md @@ -0,0 +1,197 @@ +# JWT Plugin +This plugin adds support for using JWT in Elysia handlers. + +## Installation +```bash +bun add @elysiajs/jwt +``` + +## Basic Usage +```typescript [cookie] +import { Elysia } from 'elysia' +import { jwt } from '@elysiajs/jwt' + +const app = new Elysia() + .use( + jwt({ + name: 'jwt', + secret: 'Fischl von Luftschloss Narfidort' + }) + ) + .get('/sign/:name', async ({ jwt, params: { name }, cookie: { auth } }) => { + const value = await jwt.sign({ name }) + + auth.set({ + value, + httpOnly: true, + maxAge: 7 * 86400, + path: '/profile', + }) + + return `Sign in as ${value}` + }) + .get('/profile', async ({ jwt, status, cookie: { auth } }) => { + const profile = await jwt.verify(auth.value) + + if (!profile) + return status(401, 'Unauthorized') + + return `Hello ${profile.name}` + }) + .listen(3000) +``` + +## Config +This plugin extends config from [jose](https://github.com/panva/jose). + +Below is a config that is accepted by the plugin. + +### name +Name to register `jwt` function as. + +For example, `jwt` function will be registered with a custom name. +```typescript +new Elysia() + .use( + jwt({ + name: 'myJWTNamespace', + secret: process.env.JWT_SECRETS! + }) + ) + .get('/sign/:name', ({ myJWTNamespace, params }) => { + return myJWTNamespace.sign(params) + }) +``` + +Because some might need to use multiple `jwt` with different configs in a single server, explicitly registering the JWT function with a different name is needed. + +### secret +The private key to sign JWT payload with. + +### schema +Type strict validation for JWT payload. + +### alg +@default `HS256` + +Signing Algorithm to sign JWT payload with. + +Possible properties for jose are: +HS256 +HS384 +HS512 +PS256 +PS384 +PS512 +RS256 +RS384 +RS512 +ES256 +ES256K +ES384 +ES512 +EdDSA + +### iss +The issuer claim identifies the principal that issued the JWT as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.1) + +TLDR; is usually (the domain) name of the signer. + +### sub +The subject claim identifies the principal that is the subject of the JWT. + +The claims in a JWT are normally statements about the subject as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.2) + +### aud +The audience claim identifies the recipients that the JWT is intended for. + +Each principal intended to process the JWT MUST identify itself with a value in the audience claim as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.3) + +### jti +JWT ID claim provides a unique identifier for the JWT as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.7) + +### nbf +The "not before" claim identifies the time before which the JWT must not be accepted for processing as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.5) + +### exp +The expiration time claim identifies the expiration time on or after which the JWT MUST NOT be accepted for processing as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.4) + +### iat +The "issued at" claim identifies the time at which the JWT was issued. + +This claim can be used to determine the age of the JWT as per [RFC7519](https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6) + +### b64 +This JWS Extension Header Parameter modifies the JWS Payload representation and the JWS Signing input computation as per [RFC7797](https://www.rfc-editor.org/rfc/rfc7797). + +### kid +A hint indicating which key was used to secure the JWS. + +This parameter allows originators to explicitly signal a change of key to recipients as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.4) + +### x5t +(X.509 certificate SHA-1 thumbprint) header parameter is a base64url-encoded SHA-1 digest of the DER encoding of the X.509 certificate [RFC5280](https://www.rfc-editor.org/rfc/rfc5280) corresponding to the key used to digitally sign the JWS as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.7) + +### x5c +(X.509 certificate chain) header parameter contains the X.509 public key certificate or certificate chain [RFC5280](https://www.rfc-editor.org/rfc/rfc5280) corresponding to the key used to digitally sign the JWS as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.6) + +### x5u +(X.509 URL) header parameter is a URI [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) that refers to a resource for the X.509 public key certificate or certificate chain [RFC5280] corresponding to the key used to digitally sign the JWS as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.5) + +### jwk +The "jku" (JWK Set URL) Header Parameter is a URI [RFC3986] that refers to a resource for a set of JSON-encoded public keys, one of which corresponds to the key used to digitally sign the JWS. + +The keys MUST be encoded as a JWK Set [JWK] as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.2) + +### typ +The `typ` (type) Header Parameter is used by JWS applications to declare the media type [IANA.MediaTypes] of this complete JWS. + +This is intended for use by the application when more than one kind of object could be present in an application data structure that can contain a JWS as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.9) + +### ctr +Content-Type parameter is used by JWS applications to declare the media type [IANA.MediaTypes] of the secured content (the payload). + +This is intended for use by the application when more than one kind of object could be present in the JWS Payload as per [RFC7515](https://www.rfc-editor.org/rfc/rfc7515#section-4.1.9) + +## Handler +Below are the value added to the handler. + +### jwt.sign +A dynamic object of collection related to use with JWT registered by the JWT plugin. + +Type: +```typescript +sign: (payload: JWTPayloadSpec): Promise +``` + +`JWTPayloadSpec` accepts the same value as [JWT config](#config) + +### jwt.verify +Verify payload with the provided JWT config + +Type: +```typescript +verify(payload: string) => Promise +``` + +`JWTPayloadSpec` accepts the same value as [JWT config](#config) + +## Pattern +Below you can find the common patterns to use the plugin. + +## Set JWT expiration date +By default, the config is passed to `setCookie` and inherits its value. + +```typescript +const app = new Elysia() + .use( + jwt({ + name: 'jwt', + secret: 'kunikuzushi', + exp: '7d' + }) + ) + .get('/sign/:name', async ({ jwt, params }) => jwt.sign(params)) +``` + +This will sign JWT with an expiration date of the next 7 days. diff --git a/.agents/skills/elysiajs/plugins/openapi.md b/.agents/skills/elysiajs/plugins/openapi.md new file mode 100644 index 0000000..c69150d --- /dev/null +++ b/.agents/skills/elysiajs/plugins/openapi.md @@ -0,0 +1,246 @@ +# OpenAPI Plugin + +## Installation +```bash +bun add @elysiajs/openapi +``` + +## Basic Usage +```typescript +import { openapi } from '@elysiajs/openapi' + +new Elysia() + .use(openapi()) + .get('/', () => 'hello') +``` + +Docs at `/openapi`, spec at `/openapi/json`. + +## Detail Object +Extends OpenAPI Operation Object: +```typescript +.get('/', () => 'hello', { + detail: { + title: 'Hello', + description: 'An example route', + summary: 'Short summary', + deprecated: false, + hide: true, // Hide from docs + tags: ['App'] + } +}) +``` + +### Documentation Config +```typescript +openapi({ + documentation: { + info: { + title: 'API', + version: '1.0.0' + }, + tags: [ + { name: 'App', description: 'General' } + ], + components: { + securitySchemes: { + bearerAuth: { type: 'http', scheme: 'bearer' } + } + } + } +}) +``` + +### Standard Schema Mapping +```typescript +mapJsonSchema: { + zod: z.toJSONSchema, // Zod 4 + valibot: toJsonSchema, + effect: JSONSchema.make +} +``` + +Zod 3: `zodToJsonSchema` from `zod-to-json-schema` + +## OpenAPI Type Gen +Generate docs from types: +```typescript +import { fromTypes } from '@elysiajs/openapi' + +export const app = new Elysia() + .use(openapi({ + references: fromTypes() + })) +``` + +### Production +Recommended to generate `.d.ts` file for production when using OpenAPI Type Gen +```typescript +references: fromTypes( + process.env.NODE_ENV === 'production' + ? 'dist/index.d.ts' + : 'src/index.ts' +) +``` + +### Options +```typescript +fromTypes('src/index.ts', { + projectRoot: path.join('..', import.meta.dir), + tsconfigPath: 'tsconfig.dts.json' +}) +``` + +### Caveat: Explicit Types +Use `Prettify` helper to inline when type is not showing: +```typescript +type Prettify = { [K in keyof T]: T[K] } & {} + +function getUser(): Prettify { } +``` + +## Schema Description +```typescript +body: t.Object({ + username: t.String(), + password: t.String({ + minLength: 8, + description: 'Password (8+ chars)' + }) +}, { + description: 'Expected username and password' +}), +detail: { + summary: 'Sign in user', + tags: ['auth'] +} +``` + +## Response Headers +```typescript +import { withHeader } from '@elysiajs/openapi' + +response: withHeader( + t.Literal('Hi'), + { 'x-powered-by': t.Literal('Elysia') } +) +``` + +Annotation only - doesn't enforce. Set headers manually. + +## Tags +Define + assign: +```typescript +.use(openapi({ + documentation: { + tags: [ + { name: 'App', description: 'General' }, + { name: 'Auth', description: 'Auth' } + ] + } +})) +.get('/', () => 'hello', { + detail: { tags: ['App'] } +}) +``` + +### Instance Tags +```typescript +new Elysia({ tags: ['user'] }) + .get('/user', 'user') +``` + +## Reference Models +Auto-generates schemas: +```typescript +.model({ + User: t.Object({ + id: t.Number(), + username: t.String() + }) +}) +.get('/user', () => ({ id: 1, username: 'x' }), { + response: { 200: 'User' }, + detail: { tags: ['User'] } +}) +``` + +## Guard +Apply to instance/group: +```typescript +.guard({ + detail: { + description: 'Requires auth' + } +}) +.get('/user', 'user') +``` + +## Security +```typescript +.use(openapi({ + documentation: { + components: { + securitySchemes: { + bearerAuth: { + type: 'http', + scheme: 'bearer', + bearerFormat: 'JWT' + } + } + } + } +})) + +new Elysia({ + prefix: '/address', + detail: { + security: [{ bearerAuth: [] }] + } +}) +``` + +Secures all routes under prefix. + +## Config +Below is a config which is accepted by the `openapi({})` + +### enabled +@default true +Enable/Disable the plugin + +### documentation +OpenAPI documentation information +@see https://spec.openapis.org/oas/v3.0.3.html + +### exclude +Configuration to exclude paths or methods from documentation + +### exclude.methods +List of methods to exclude from documentation + +### exclude.paths +List of paths to exclude from documentation + +### exclude.staticFile +@default true + +Exclude static file routes from documentation + +### exclude.tags +List of tags to exclude from documentation + +### mapJsonSchema +A custom mapping function from Standard schema to OpenAPI schema + +### path +@default '/openapi' +The endpoint to expose OpenAPI documentation frontend + +### provider +@default 'scalar' + +OpenAPI documentation frontend between: +- Scalar +- SwaggerUI +- null: disable frontend diff --git a/.agents/skills/elysiajs/plugins/opentelemetry.md b/.agents/skills/elysiajs/plugins/opentelemetry.md new file mode 100644 index 0000000..0ca95c3 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/opentelemetry.md @@ -0,0 +1,167 @@ +# OpenTelemetry Plugin - SKILLS.md + +## Installation +```bash +bun add @elysiajs/opentelemetry +``` + +## Basic Usage +```typescript +import { opentelemetry } from '@elysiajs/opentelemetry' +import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-node' +import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-proto' + +new Elysia() + .use(opentelemetry({ + spanProcessors: [ + new BatchSpanProcessor(new OTLPTraceExporter()) + ] + })) +``` + +Auto-collects spans from OpenTelemetry-compatible libraries. Parent/child spans applied automatically. + +## Config +Extends OpenTelemetry SDK params: + +- `autoDetectResources` (true) - Auto-detect from env +- `contextManager` (AsyncHooksContextManager) - Custom context +- `textMapPropagator` (CompositePropagator) - W3C Trace + Baggage +- `metricReader` - For MeterProvider +- `views` - Histogram bucket config +- `instrumentations` (getNodeAutoInstrumentations()) - Metapackage or individual +- `resource` - Custom resource +- `resourceDetectors` ([envDetector, processDetector, hostDetector]) - Auto-detect needs `autoDetectResources: true` +- `sampler` - Custom sampler (default: sample all) +- `serviceName` - Namespace identifier +- `spanProcessors` - Array for tracer provider +- `traceExporter` - Auto-setup OTLP/http/protobuf with BatchSpanProcessor if not set +- `spanLimits` - Tracing params + +### Resource Detectors via Env +```bash +export OTEL_NODE_RESOURCE_DETECTORS="env,host" +# Options: env, host, os, process, serviceinstance, all, none +``` + +## Export to Backends +Example - Axiom: +```typescript +.use(opentelemetry({ + spanProcessors: [ + new BatchSpanProcessor( + new OTLPTraceExporter({ + url: 'https://api.axiom.co/v1/traces', + headers: { + Authorization: `Bearer ${Bun.env.AXIOM_TOKEN}`, + 'X-Axiom-Dataset': Bun.env.AXIOM_DATASET + } + }) + ) + ] +})) +``` + +## OpenTelemetry SDK +Use SDK normally - runs under Elysia's request span, auto-appears in trace. + +## Record Utility +Equivalent to `startActiveSpan` - auto-closes + captures exceptions: +```typescript +import { record } from '@elysiajs/opentelemetry' + +.get('', () => { + return record('database.query', () => { + return db.query('SELECT * FROM users') + }) +}) +``` + +Label for code shown in trace. + +## Function Naming +Elysia reads function names as span names: +```typescript +// ⚠️ Anonymous span +.derive(async ({ cookie: { session } }) => { + return { user: await getProfile(session) } +}) + +// ✅ Named span: "getProfile" +.derive(async function getProfile({ cookie: { session } }) { + return { user: await getProfile(session) } +}) +``` + +## getCurrentSpan +Get current span outside handler (via AsyncLocalStorage): +```typescript +import { getCurrentSpan } from '@elysiajs/opentelemetry' + +function utility() { + const span = getCurrentSpan() + span.setAttributes({ 'custom.attribute': 'value' }) +} +``` + +## setAttributes +Sugar for `getCurrentSpan().setAttributes`: +```typescript +import { setAttributes } from '@elysiajs/opentelemetry' + +function utility() { + setAttributes({ 'custom.attribute': 'value' }) +} +``` + +## Instrumentations (Advanced) +SDK must run before importing instrumented module. + +### Setup +1. Separate file: +```typescript +// src/instrumentation.ts +import { opentelemetry } from '@elysiajs/opentelemetry' +import { PgInstrumentation } from '@opentelemetry/instrumentation-pg' + +export const instrumentation = opentelemetry({ + instrumentations: [new PgInstrumentation()] +}) +``` + +2. Apply: +```typescript +// src/index.ts +import { instrumentation } from './instrumentation' +new Elysia().use(instrumentation).listen(3000) +``` + +3. Preload: +```toml +# bunfig.toml +preload = ["./src/instrumentation.ts"] +``` + +### Production Deployment (Advanced) +OpenTelemetry monkey-patches `node_modules`. Exclude instrumented libs from bundling: +```bash +bun build --compile --external pg --outfile server src/index.ts +``` + +Package.json: +```json +{ + "dependencies": { "pg": "^8.15.6" }, + "devDependencies": { + "@elysiajs/opentelemetry": "^1.2.0", + "@opentelemetry/instrumentation-pg": "^0.52.0" + } +} +``` + +Production install: +```bash +bun install --production +``` + +Keeps `node_modules` with instrumented libs at runtime. diff --git a/.agents/skills/elysiajs/plugins/server-timing.md b/.agents/skills/elysiajs/plugins/server-timing.md new file mode 100644 index 0000000..0021424 --- /dev/null +++ b/.agents/skills/elysiajs/plugins/server-timing.md @@ -0,0 +1,71 @@ +# Server Timing Plugin +This plugin adds support for auditing performance bottlenecks with Server Timing API + +## Installation +```bash +bun add @elysiajs/server-timing +``` + +## Basic Usage +```typescript twoslash +import { Elysia } from 'elysia' +import { serverTiming } from '@elysiajs/server-timing' + +new Elysia() + .use(serverTiming()) + .get('/', () => 'hello') + .listen(3000) +``` + +Server Timing then will append header 'Server-Timing' with log duration, function name, and detail for each life-cycle function. + +To inspect, open browser developer tools > Network > [Request made through Elysia server] > Timing. + +Now you can effortlessly audit the performance bottleneck of your server. + +## Config +Below is a config which is accepted by the plugin + +### enabled +@default `NODE_ENV !== 'production'` + +Determine whether or not Server Timing should be enabled + +### allow +@default `undefined` + +A condition whether server timing should be log + +### trace +@default `undefined` + +Allow Server Timing to log specified life-cycle events: + +Trace accepts objects of the following: +- request: capture duration from request +- parse: capture duration from parse +- transform: capture duration from transform +- beforeHandle: capture duration from beforeHandle +- handle: capture duration from the handle +- afterHandle: capture duration from afterHandle +- total: capture total duration from start to finish + +## Pattern +Below you can find the common patterns to use the plugin. + +## Allow Condition +You may disable Server Timing on specific routes via `allow` property + +```ts twoslash +import { Elysia } from 'elysia' +import { serverTiming } from '@elysiajs/server-timing' + +new Elysia() + .use( + serverTiming({ + allow: ({ request }) => { + return new URL(request.url).pathname !== '/no-trace' + } + }) + ) +``` diff --git a/.agents/skills/elysiajs/plugins/static.md b/.agents/skills/elysiajs/plugins/static.md new file mode 100644 index 0000000..82fa1da --- /dev/null +++ b/.agents/skills/elysiajs/plugins/static.md @@ -0,0 +1,84 @@ +# Static Plugin +This plugin can serve static files/folders for Elysia Server + +## Installation +```bash +bun add @elysiajs/static +``` + +## Basic Usage +```typescript twoslash +import { Elysia } from 'elysia' +import { staticPlugin } from '@elysiajs/static' + +new Elysia() + .use(staticPlugin()) + .listen(3000) +``` + +By default, the static plugin default folder is `public`, and registered with `/public` prefix. + +Suppose your project structure is: +``` +| - src + | - index.ts +| - public + | - takodachi.png + | - nested + | - takodachi.png +``` + +The available path will become: +- /public/takodachi.png +- /public/nested/takodachi.png + +## Config +Below is a config which is accepted by the plugin + +### assets +@default `"public"` + +Path to the folder to expose as static + +### prefix +@default `"/public"` + +Path prefix to register public files + +### ignorePatterns +@default `[]` + +List of files to ignore from serving as static files + +### staticLimit +@default `1024` + +By default, the static plugin will register paths to the Router with a static name, if the limits are exceeded, paths will be lazily added to the Router to reduce memory usage. +Tradeoff memory with performance. + +### alwaysStatic +@default `false` + +If set to true, static files path will be registered to Router skipping the `staticLimits`. + +### headers +@default `{}` + +Set response headers of files + +### indexHTML +@default `false` + +If set to true, the `index.html` file from the static directory will be served for any request that is matching neither a route nor any existing static file. + +## Pattern +Below you can find the common patterns to use the plugin. + +## Single file +Suppose you want to return just a single file, you can use `file` instead of using the static plugin +```typescript +import { Elysia, file } from 'elysia' + +new Elysia() + .get('/file', file('public/takodachi.png')) +``` diff --git a/.agents/skills/elysiajs/references/bun-fullstack-dev-server.md b/.agents/skills/elysiajs/references/bun-fullstack-dev-server.md new file mode 100644 index 0000000..70d721b --- /dev/null +++ b/.agents/skills/elysiajs/references/bun-fullstack-dev-server.md @@ -0,0 +1,129 @@ +# Fullstack Dev Server + +## What It Is +Bun 1.3 Fullstack Dev Server with HMR. React without bundler (no Vite/Webpack). + +Example: [elysia-fullstack-example](https://github.com/saltyaom/elysia-fullstack-example) + +## Setup +1. Install + use Elysia Static: +```typescript +import { Elysia } from 'elysia' +import { staticPlugin } from '@elysiajs/static' + +new Elysia() + .use(await staticPlugin()) // await required for HMR hooks + .listen(3000) +``` + +2. Create `public/index.html` + `public/index.tsx`: +```html + + + + + + Elysia React App + + + +
+ + + +``` + +```tsx +// public/index.tsx +import { useState } from 'react' +import { createRoot } from 'react-dom/client' + +function App() { + const [count, setCount] = useState(0) + const increase = () => setCount((c) => c + 1) + + return ( +
+

{count}

+ +
+ ) +} + +const root = createRoot(document.getElementById('root')!) +root.render() +``` + +3. Enable JSX in `tsconfig.json`: +```json +{ + "compilerOptions": { + "jsx": "react-jsx" + } +} +``` + +4. Navigate to `http://localhost:3000/public`. + +Frontend + backend in single project. No bundler. Works with HMR, Tailwind, Tanstack Query, Eden Treaty, path alias. + +## Custom Prefix +```typescript +.use(await staticPlugin({ prefix: '/' })) +``` + +Serves at `/` instead of `/public`. + +## Tailwind CSS +1. Install: +```bash +bun add tailwindcss@4 +bun add -d bun-plugin-tailwind +``` + +2. Create `bunfig.toml`: +```toml +[serve.static] +plugins = ["bun-plugin-tailwind"] +``` + +3. Create `public/global.css`: +```css +@tailwind base; +``` + +4. Add to HTML or TS: +```html + +``` +Or: +```tsx +import './global.css' +``` + +## Path Alias +1. Add to `tsconfig.json`: +```json +{ + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@public/*": ["public/*"] + } + } +} +``` + +2. Use: +```tsx +import '@public/global.css' +``` + +Works out of box. + +## Production Build +```bash +bun build --compile --target bun --outfile server src/index.ts +``` + +Creates single executable `server`. Include `public` folder when running. diff --git a/.agents/skills/elysiajs/references/cookie.md b/.agents/skills/elysiajs/references/cookie.md new file mode 100644 index 0000000..9e1aa1c --- /dev/null +++ b/.agents/skills/elysiajs/references/cookie.md @@ -0,0 +1,187 @@ +# Cookie + +## What It Is +Reactive mutable signal for cookie interaction. Auto-encodes/decodes objects. + +## Basic Usage +No get/set - direct value access: +```typescript +import { Elysia } from 'elysia' + +new Elysia() + .get('/', ({ cookie: { name } }) => { + // Get + name.value + + // Set + name.value = "New Value" + }) +``` + +Auto-encodes/decodes objects. Just works. + +## Reactivity +Signal-like approach. Single source of truth. Auto-sets headers, syncs values. + +Cookie jar = Proxy object. Extract value always `Cookie`, never `undefined`. Access via `.value`. + +Iterate over cookie jar → only existing cookies. + +## Cookie Attributes + +### Direct Property Assignment +```typescript +.get('/', ({ cookie: { name } }) => { + // Get + name.domain + + // Set + name.domain = 'millennium.sh' + name.httpOnly = true +}) +``` + +### set - Reset All Properties +```typescript +.get('/', ({ cookie: { name } }) => { + name.set({ + domain: 'millennium.sh', + httpOnly: true + }) +}) +``` + +Overwrites all properties. + +### add - Update Specific Properties +Like `set` but only overwrites defined properties. + +## Remove Cookie +```typescript +.get('/', ({ cookie, cookie: { name } }) => { + name.remove() + // or + delete cookie.name +}) +``` + +## Cookie Schema +Strict validation + type inference with `t.Cookie`: +```typescript +import { Elysia, t } from 'elysia' + +new Elysia() + .get('/', ({ cookie: { name } }) => { + name.value = { + id: 617, + name: 'Summoning 101' + } + }, { + cookie: t.Cookie({ + name: t.Object({ + id: t.Numeric(), + name: t.String() + }) + }) + }) +``` + +### Nullable Cookie +```typescript +cookie: t.Cookie({ + name: t.Optional( + t.Object({ + id: t.Numeric(), + name: t.String() + }) + ) +}) +``` + +## Cookie Signature +Cryptographic hash for verification. Prevents malicious modification. + +```typescript +new Elysia() + .get('/', ({ cookie: { profile } }) => { + profile.value = { id: 617, name: 'Summoning 101' } + }, { + cookie: t.Cookie({ + profile: t.Object({ + id: t.Numeric(), + name: t.String() + }) + }, { + secrets: 'Fischl von Luftschloss Narfidort', + sign: ['profile'] + }) + }) +``` + +Auto-signs/unsigns. + +### Global Config +```typescript +new Elysia({ + cookie: { + secrets: 'Fischl von Luftschloss Narfidort', + sign: ['profile'] + } +}) +``` + +## Cookie Rotation +Auto-handles secret rotation. Old signature verification + new signature signing. + +```typescript +new Elysia({ + cookie: { + secrets: ['Vengeance will be mine', 'Fischl von Luftschloss Narfidort'] + } +}) +``` + +Array = key rotation (retire old, replace with new). + +## Config + +### secrets +Secret key for signing/unsigning. Array = key rotation. + +### domain +Domain Set-Cookie attribute. Default: none (current domain only). + +### encode +Function to encode value. Default: `encodeURIComponent`. + +### expires +Date for Expires attribute. Default: none (non-persistent, deleted on browser exit). + +If both `expires` and `maxAge` set, `maxAge` takes precedence (spec-compliant clients). + +### httpOnly (false) +HttpOnly attribute. If true, JS can't access via `document.cookie`. + +### maxAge (undefined) +Seconds for Max-Age attribute. Rounded down to integer. + +If both `expires` and `maxAge` set, `maxAge` takes precedence (spec-compliant clients). + +### path +Path attribute. Default: handler path. + +### priority +Priority attribute: `low` | `medium` | `high`. Not fully standardized. + +### sameSite +SameSite attribute: +- `true` = Strict +- `false` = not set +- `'lax'` = Lax +- `'none'` = None (explicit cross-site) +- `'strict'` = Strict + +Not fully standardized. + +### secure +Secure attribute. If true, only HTTPS. Clients won't send over HTTP. diff --git a/.agents/skills/elysiajs/references/deployment.md b/.agents/skills/elysiajs/references/deployment.md new file mode 100644 index 0000000..3c4cca8 --- /dev/null +++ b/.agents/skills/elysiajs/references/deployment.md @@ -0,0 +1,413 @@ +# Deployment + +## Production Build + +### Compile to Binary (Recommended) +```bash +bun build \ + --compile \ + --minify-whitespace \ + --minify-syntax \ + --target bun \ + --outfile server \ + src/index.ts +``` + +**Benefits:** +- No runtime needed on deployment server +- Smaller memory footprint (2-3x reduction) +- Faster startup +- Single portable executable + +**Run the binary:** +```bash +./server +``` + +### Compile to JavaScript +```bash +bun build \ + --minify-whitespace \ + --minify-syntax \ + --outfile ./dist/index.js \ + src/index.ts +``` + +**Run:** +```bash +NODE_ENV=production bun ./dist/index.js +``` + +## Docker + +### Basic Dockerfile +```dockerfile +FROM oven/bun:1 AS build + +WORKDIR /app + +# Cache dependencies +COPY package.json bun.lock ./ +RUN bun install + +COPY ./src ./src + +ENV NODE_ENV=production + +RUN bun build \ + --compile \ + --minify-whitespace \ + --minify-syntax \ + --outfile server \ + src/index.ts + +FROM gcr.io/distroless/base + +WORKDIR /app + +COPY --from=build /app/server server + +ENV NODE_ENV=production + +CMD ["./server"] + +EXPOSE 3000 +``` + +### Build and Run +```bash +docker build -t my-elysia-app . +docker run -p 3000:3000 my-elysia-app +``` + +### With Environment Variables +```dockerfile +FROM gcr.io/distroless/base + +WORKDIR /app + +COPY --from=build /app/server server + +ENV NODE_ENV=production +ENV PORT=3000 +ENV DATABASE_URL="" +ENV JWT_SECRET="" + +CMD ["./server"] + +EXPOSE 3000 +``` + +## Cluster Mode (Multiple CPU Cores) + +```typescript +// src/index.ts +import cluster from 'node:cluster' +import os from 'node:os' +import process from 'node:process' + +if (cluster.isPrimary) { + for (let i = 0; i < os.availableParallelism(); i++) { + cluster.fork() + } +} else { + await import('./server') + console.log(`Worker ${process.pid} started`) +} +``` + +```typescript +// src/server.ts +import { Elysia } from 'elysia' + +new Elysia() + .get('/', () => 'Hello World!') + .listen(3000) +``` + +## Environment Variables + +### .env File +```env +NODE_ENV=production +PORT=3000 +DATABASE_URL=postgresql://user:password@localhost:5432/db +JWT_SECRET=your-secret-key +CORS_ORIGIN=https://example.com +``` + +### Load in App +```typescript +import { Elysia } from 'elysia' + +const app = new Elysia() + .get('/env', () => ({ + env: process.env.NODE_ENV, + port: process.env.PORT + })) + .listen(parseInt(process.env.PORT || '3000')) +``` + +## Platform-Specific Deployments + +### Railway +```typescript +// Railway assigns random PORT via env variable +new Elysia() + .get('/', () => 'Hello Railway') + .listen(process.env.PORT ?? 3000) +``` + +### Vercel +```typescript +// src/index.ts +import { Elysia } from 'elysia' + +export default new Elysia() + .get('/', () => 'Hello Vercel') + +export const GET = app.fetch +export const POST = app.fetch +``` + +```json +// vercel.json +{ + "$schema": "https://openapi.vercel.sh/vercel.json", + "bunVersion": "1.x" +} +``` + +### Cloudflare Workers +```typescript +import { Elysia } from 'elysia' +import { CloudflareAdapter } from 'elysia/adapter/cloudflare-worker' + +export default new Elysia({ + adapter: CloudflareAdapter +}) + .get('/', () => 'Hello Cloudflare!') + .compile() +``` + +```toml +# wrangler.toml +name = "elysia-app" +main = "src/index.ts" +compatibility_date = "2025-06-01" +``` + +### Node.js Adapter +```typescript +import { Elysia } from 'elysia' +import { node } from '@elysiajs/node' + +const app = new Elysia({ adapter: node() }) + .get('/', () => 'Hello Node.js') + .listen(3000) +``` + +## Performance Optimization + +### Enable AoT Compilation +```typescript +new Elysia({ + aot: true // Ahead-of-time compilation +}) +``` + +### Use Native Static Response +```typescript +new Elysia({ + nativeStaticResponse: true +}) + .get('/version', 1) // Optimized for Bun.serve.static +``` + +### Precompile Routes +```typescript +new Elysia({ + precompile: true // Compile all routes ahead of time +}) +``` + +## Health Checks + +```typescript +new Elysia() + .get('/health', () => ({ + status: 'ok', + timestamp: Date.now() + })) + .get('/ready', ({ db }) => { + // Check database connection + const isDbReady = checkDbConnection() + + if (!isDbReady) { + return status(503, { status: 'not ready' }) + } + + return { status: 'ready' } + }) +``` + +## Graceful Shutdown + +```typescript +import { Elysia } from 'elysia' + +const app = new Elysia() + .get('/', () => 'Hello') + .listen(3000) + +process.on('SIGTERM', () => { + console.log('SIGTERM received, shutting down gracefully') + app.stop() + process.exit(0) +}) + +process.on('SIGINT', () => { + console.log('SIGINT received, shutting down gracefully') + app.stop() + process.exit(0) +}) +``` + +## Monitoring + +### OpenTelemetry +```typescript +import { opentelemetry } from '@elysiajs/opentelemetry' + +new Elysia() + .use(opentelemetry({ + serviceName: 'my-service', + endpoint: 'http://localhost:4318' + })) +``` + +### Custom Logging +```typescript +.onRequest(({ request }) => { + console.log(`[${new Date().toISOString()}] ${request.method} ${request.url}`) +}) +.onAfterResponse(({ request, set }) => { + console.log(`[${new Date().toISOString()}] ${request.method} ${request.url} - ${set.status}`) +}) +``` + +## SSL/TLS (HTTPS) + +```typescript +import { Elysia, file } from 'elysia' + +new Elysia({ + serve: { + tls: { + cert: file('cert.pem'), + key: file('key.pem') + } + } +}) + .get('/', () => 'Hello HTTPS') + .listen(3000) +``` + +## Best Practices + +1. **Always compile to binary for production** + - Reduces memory usage + - Smaller deployment size + - No runtime needed + +2. **Use environment variables** + - Never hardcode secrets + - Use different configs per environment + +3. **Enable health checks** + - Essential for load balancers + - K8s/Docker orchestration + +4. **Implement graceful shutdown** + - Handle SIGTERM/SIGINT + - Close connections properly + +5. **Use cluster mode** + - Utilize all CPU cores + - Better performance under load + +6. **Monitor your app** + - Use OpenTelemetry + - Log requests/responses + - Track errors + +## Example Production Setup + +```typescript +// src/server.ts +import { Elysia } from 'elysia' +import { cors } from '@elysiajs/cors' +import { opentelemetry } from '@elysiajs/opentelemetry' + +export const app = new Elysia({ + aot: true, + nativeStaticResponse: true +}) + .use(cors({ + origin: process.env.CORS_ORIGIN || 'http://localhost:3000' + })) + .use(opentelemetry({ + serviceName: 'my-service' + })) + .get('/health', () => ({ status: 'ok' })) + .get('/', () => 'Hello Production') + .listen(parseInt(process.env.PORT || '3000')) + +// Graceful shutdown +process.on('SIGTERM', () => { + app.stop() + process.exit(0) +}) +``` + +```typescript +// src/index.ts (cluster) +import cluster from 'node:cluster' +import os from 'node:os' + +if (cluster.isPrimary) { + for (let i = 0; i < os.availableParallelism(); i++) { + cluster.fork() + } +} else { + await import('./server') +} +``` + +```dockerfile +# Dockerfile +FROM oven/bun:1 AS build + +WORKDIR /app + +COPY package.json bun.lock ./ +RUN bun install + +COPY ./src ./src + +ENV NODE_ENV=production + +RUN bun build --compile --outfile server src/index.ts + +FROM gcr.io/distroless/base + +WORKDIR /app + +COPY --from=build /app/server server + +ENV NODE_ENV=production + +CMD ["./server"] + +EXPOSE 3000 +``` diff --git a/.agents/skills/elysiajs/references/eden.md b/.agents/skills/elysiajs/references/eden.md new file mode 100644 index 0000000..7d9165d --- /dev/null +++ b/.agents/skills/elysiajs/references/eden.md @@ -0,0 +1,158 @@ +# Eden Treaty +e2e type safe RPC client for share type from backend to frontend. + +## What It Is +Type-safe object representation for Elysia server. Auto-completion + error handling. + +## Installation +```bash +bun add @elysiajs/eden +bun add -d elysia +``` + +Export Elysia server type: +```typescript +const app = new Elysia() + .get('/', () => 'Hi Elysia') + .get('/id/:id', ({ params: { id } }) => id) + .post('/mirror', ({ body }) => body, { + body: t.Object({ + id: t.Number(), + name: t.String() + }) + }) + .listen(3000) + +export type App = typeof app +``` + +Consume on client side: +```typescript +import { treaty } from '@elysiajs/eden' +import type { App } from './server' + +const client = treaty('localhost:3000') + +// response: Hi Elysia +const { data: index } = await client.get() + +// response: 1895 +const { data: id } = await client.id({ id: 1895 }).get() + +// response: { id: 1895, name: 'Skadi' } +const { data: nendoroid } = await client.mirror.post({ + id: 1895, + name: 'Skadi' +}) +``` + +## Common Errors & Fixes +- **Strict mode**: Enable in tsconfig +- **Version mismatch**: `npm why elysia` - must match server/client +- **TypeScript**: Min 5.0 +- **Method chaining**: Required on server +- **Bun types**: `bun add -d @types/bun` if using Bun APIs +- **Path alias**: Must resolve same on frontend/backend + +### Monorepo Path Alias +Must resolve to same file on frontend/backend + +```json +// tsconfig.json at root +{ + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@frontend/*": ["./apps/frontend/src/*"], + "@backend/*": ["./apps/backend/src/*"] + } + } +} +``` + +## Syntax Mapping +| Path | Method | Treaty | +|----------------|--------|-------------------------------| +| / | GET | `.get()` | +| /hi | GET | `.hi.get()` | +| /deep/nested | POST | `.deep.nested.post()` | +| /item/:name | GET | `.item({ name: 'x' }).get()` | + +## Parameters + +### With body (POST/PUT/PATCH/DELETE): +```typescript +.user.post( + { name: 'Elysia' }, // body + { headers: {}, query: {}, fetch: {} } // optional +) +``` + +### No body (GET/HEAD): +```typescript +.hello.get({ headers: {}, query: {}, fetch: {} }) +``` + +### Empty body with query/headers: +```typescript +.user.post(null, { query: { name: 'Ely' } }) +``` + +### Fetch options: +```typescript +.hello.get({ fetch: { signal: controller.signal } }) +``` + +### File upload: +```typescript +// Accepts: File | File[] | FileList | Blob +.image.post({ + title: 'Title', + image: fileInput.files! +}) +``` + +## Response +```typescript +const { data, error, response, status, headers } = await api.user.post({ name: 'x' }) + +if (error) { + switch (error.status) { + case 400: throw error.value + default: throw error.value + } +} +// data unwrapped after error handling +return data +``` + +status >= 300 → `data = null`, `error` has value + +## Stream/SSE +Interpreted as `AsyncGenerator`: +```typescript +const { data, error } = await treaty(app).ok.get() +if (error) throw error + +for await (const chunk of data) console.log(chunk) +``` + +## Utility Types +```typescript +import { Treaty } from '@elysiajs/eden' + +type UserData = Treaty.Data +type UserError = Treaty.Error +``` + +## WebSocket +```typescript +const chat = api.chat.subscribe() + +chat.subscribe((message) => console.log('got', message)) +chat.on('open', () => chat.send('hello')) + +// Native access: chat.raw +``` + +`.subscribe()` accepts same params as `get`/`head` diff --git a/.agents/skills/elysiajs/references/lifecycle.md b/.agents/skills/elysiajs/references/lifecycle.md new file mode 100644 index 0000000..645584e --- /dev/null +++ b/.agents/skills/elysiajs/references/lifecycle.md @@ -0,0 +1,198 @@ +# Lifecycle + +Instead of a sequential process, Elysia's request handling is divided into multiple stages called lifecycle events. + +It's designed to separate the process into distinct phases based on their responsibility without interfering with each others. + +### List of events in order + +1. **request** - early, global +2. **parse** - body parsing +3. **transform** / **derive** - mutate context pre validation +4. **beforeHandle** / **resolve** - auth/guard logic +5. **handler** - your business code +6. **afterHandle** - tweak response, set headers +7. **mapResponse** - turn anything into a proper `Response` +8. **onError** - centralized error handling +9. **onAfterResponse** - post response/cleanup tasks + +## Request (`onRequest`) + +Runs first for every incoming request. + +- Ideal for **caching, rate limiting, CORS, adding global headers**. +- If the hook returns a value, the whole lifecycle stops and that value becomes the response. + +```ts +new Elysia().onRequest(({ ip, set }) => { + if (blocked(ip)) return (set.status = 429) +}) +``` + +--- + +## Parse (`onParse`) + +_Body parsing stage._ + +- Handles `text/plain`, `application/json`, `multipart/form-data`, `application/x www-form-urlencoded` by default. +- Use to add **custom parsers** or support extra `Content Type`s. + +```ts +new Elysia().onParse(({ request, contentType }) => { + if (contentType === 'application/custom') return request.text() +}) +``` + +--- + +## Transform (`onTransform`) + +_Runs **just before validation**; can mutate the request context._ + +- Perfect for **type coercion**, trimming strings, or adding temporary fields that validation will use. + +```ts +new Elysia().onTransform(({ params }) => { + params.id = Number(params.id) +}) +``` + +--- + +## Derive + +_Runs along with `onTransform` **but before validation**; adds per request values to the context._ + +- Useful for extracting info from headers, cookies, query, etc., that you want to reuse in handlers. + +```ts +new Elysia().derive(({ headers }) => ({ + bearer: headers.authorization?.replace(/^Bearer /, '') +})) +``` + +--- + +## Before Handle (`onBeforeHandle`) + +_Executed after validation, right before the route handler._ + +- Great for **auth checks, permission gating, custom pre validation logic**. +- Returning a value skips the handler. + +```ts +new Elysia().get('/', () => 'hi', { + beforeHandle({ cookie, status }) { + if (!cookie.session) return status(401) + } +}) +``` + +--- + +## Resolve + +_Like `derive` but runs **after validation** along "Before Handle" (so you can rely on validated data)._ + +- Usually placed inside a `guard` because it isn't available as a local hook. + +```ts +new Elysia().guard( + { headers: t.Object({ authorization: t.String() }) }, + (app) => + app + .resolve(({ headers }) => ({ + bearer: headers.authorization.split(' ')[1] + })) + .get('/', ({ bearer }) => bearer) +) +``` + +--- + +## After Handle (`onAfterHandle`) + +_Runs after the handler finishes._ + +- Can **modify response headers**, wrap the result in a `Response`, or transform the payload. +- Returning a value **replaces** the handler’s result, but the next `afterHandle` hooks still run. + +```ts +new Elysia().get('/', () => '

Hello

', { + afterHandle({ response, set }) { + if (isHtml(response)) { + set.headers['content-type'] = 'text/html; charset=utf-8' + return new Response(response) + } + } +}) +``` + +--- + +## Map Response (`mapResponse`) + +_Runs right after all `afterHandle` hooks; maps **any** value to a Web standard `Response`._ + +- Ideal for **compression, custom content type mapping, streaming**. + +```ts +new Elysia().mapResponse(({ responseValue, set }) => { + const body = + typeof responseValue === 'object' + ? JSON.stringify(responseValue) + : String(responseValue ?? '') + + set.headers['content-encoding'] = 'gzip' + return new Response(Bun.gzipSync(new TextEncoder().encode(body)), { + headers: { + 'Content-Type': + typeof responseValue === 'object' + ? 'application/json' + : 'text/plain' + } + }) +}) +``` + +--- + +## On Error (`onError`) + +_Caught whenever an error bubbles up from any lifecycle stage._ + +- Use to **customize error messages**, **handle 404**, **log**, or **retry**. +- Must be registered **before** the routes it should protect. + +```ts +new Elysia().onError(({ code, status }) => { + if (code === 'NOT_FOUND') return status(404, '❓ Not found') + return new Response('Oops', { status: 500 }) +}) +``` + +--- + +## After Response (`onAfterResponse`) + +_Runs **after** the response has been sent to the client._ + +- Perfect for **logging, metrics, cleanup**. + +```ts +new Elysia().onAfterResponse(() => + console.log('✅ response sent at', Date.now()) +) +``` + +--- + +## Hook Types + +| Type | Scope | How to add | +| -------------------- | --------------------------------- | --------------------------------------------------------- | +| **Local Hook** | Single route | Inside route options (`afterHandle`, `beforeHandle`, …) | +| **Interceptor Hook** | Whole instance (and later routes) | `.onXxx(cb)` or `.use(plugin)` | + +> **Remember:** Hooks only affect routes **defined after** they are registered, except `onRequest` which is global because it runs before route matching. diff --git a/.agents/skills/elysiajs/references/macro.md b/.agents/skills/elysiajs/references/macro.md new file mode 100644 index 0000000..f89ee75 --- /dev/null +++ b/.agents/skills/elysiajs/references/macro.md @@ -0,0 +1,83 @@ +# Macro + +Composable Elysia function for controlling lifecycle/schema/context with full type safety. Available in hook after definition control by key-value label. + +## Basic Pattern +```typescript +.macro({ + hi: (word: string) => ({ + beforeHandle() { console.log(word) } + }) +}) +.get('/', () => 'hi', { hi: 'Elysia' }) +``` + +## Property Shorthand +Object → function accepting boolean: +```typescript +.macro({ + // These equivalent: + isAuth: { resolve: () => ({ user: 'saltyaom' }) }, + isAuth(enabled: boolean) { if(enabled) return { resolve() {...} } } +}) +``` + +## Error Handling +Return `status`, don't throw: +```typescript +.macro({ + auth: { + resolve({ headers }) { + if(!headers.authorization) return status(401, 'Unauthorized') + return { user: 'SaltyAom' } + } + } +}) +``` + +## Resolve - Add Context Props +```typescript +.macro({ + user: (enabled: true) => ({ + resolve: () => ({ user: 'Pardofelis' }) + }) +}) +.get('/', ({ user }) => user, { user: true }) +``` + +### Named Macro for Type Inference +TypeScript limitation workaround: +```typescript +.macro('user', { resolve: () => ({ user: 'lilith' }) }) +.macro('user2', { user: true, resolve: ({ user }) => {} }) +``` + +## Schema +Auto-validates, infers types, stacks with other schemas: +```typescript +.macro({ + withFriends: { + body: t.Object({ friends: t.Tuple([...]) }) + } +}) +``` + +Use named single macro for lifecycle type inference within same macro. + +## Extension +Stack macros: +```typescript +.macro({ + sartre: { body: t.Object({...}) }, + fouco: { body: t.Object({...}) }, + lilith: { fouco: true, sartre: true, body: t.Object({...}) } +}) +``` + +## Deduplication +Auto-dedupes by property value. Custom seed: +```typescript +.macro({ sartre: (role: string) => ({ seed: role, ... }) }) +``` + +Max stack: 16 (prevents infinite loops) diff --git a/.agents/skills/elysiajs/references/plugin.md b/.agents/skills/elysiajs/references/plugin.md new file mode 100644 index 0000000..cd10e64 --- /dev/null +++ b/.agents/skills/elysiajs/references/plugin.md @@ -0,0 +1,207 @@ +# Plugins + +## Plugin = Decoupled Elysia Instance + +```ts +const plugin = new Elysia() + .decorate('plugin', 'hi') + .get('/plugin', ({ plugin }) => plugin) + +const app = new Elysia() + .use(plugin) // inherit properties + .get('/', ({ plugin }) => plugin) +``` + +**Inherits**: state, decorate +**Does NOT inherit**: lifecycle (isolated by default) + +## Dependency + +Each instance runs independently like microservice. **Must explicitly declare dependencies**. + +```ts +const auth = new Elysia() + .decorate('Auth', Auth) + +// ❌ Missing dependency +const main = new Elysia() + .get('/', ({ Auth }) => Auth.getProfile()) + +// ✅ Declare dependency +const main = new Elysia() + .use(auth) // required for Auth + .get('/', ({ Auth }) => Auth.getProfile()) +``` + +## Deduplication + +**Every plugin re-executes by default**. Use `name` + optional `seed` to deduplicate: + +```ts +const ip = new Elysia({ name: 'ip' }) // unique identifier + .derive({ as: 'global' }, ({ server, request }) => ({ + ip: server?.requestIP(request) + })) + +const router1 = new Elysia().use(ip) +const router2 = new Elysia().use(ip) +const server = new Elysia().use(router1).use(router2) +// `ip` only executes once due to deduplication +``` + +## Global vs Explicit Dependency + +**Global plugin** (rare, apply everywhere): +- Doesn't add types - cors, compress, helmet +- Global lifecycle no instance controls - tracing, logging +- Examples: OpenAPI docs, OpenTelemetry, logging + +**Explicit dependency** (default, recommended): +- Adds types - macro, state, model +- Business logic instances interact with - Auth, DB +- Examples: state management, ORM, auth, features + +## Scope + +**Lifecycle isolated by default**. Must specify scope to export. + +```ts +// ❌ NOT inherited by app +const profile = new Elysia() + .onBeforeHandle(({ cookie }) => throwIfNotSignIn(cookie)) + .get('/profile', () => 'Hi') + +const app = new Elysia() + .use(profile) + .patch('/rename', ({ body }) => updateProfile(body)) // No sign-in check + +// ✅ Exported to app +const profile = new Elysia() + .onBeforeHandle({ as: 'global' }, ({ cookie }) => throwIfNotSignIn(cookie)) + .get('/profile', () => 'Hi') +``` + +## Scope Levels + +1. **local** (default) - current + descendants only +2. **scoped** - parent + current + descendants +3. **global** - all instances (all parents, current, descendants) + +Example with `.onBeforeHandle({ as: 'local' }, ...)`: + +| type | child | current | parent | main | +|------|-------|---------|--------|------| +| local | ✅ | ✅ | ❌ | ❌ | +| scoped | ✅ | ✅ | ✅ | ❌ | +| global | ✅ | ✅ | ✅ | ✅ | + +## Config + +```ts +// Instance factory with config +const version = (v = 1) => new Elysia() + .get('/version', v) + +const app = new Elysia() + .use(version(1)) +``` + +## Functional Callback (not recommended) + +```ts +// Harder to handle scope/encapsulation +const plugin = (app: Elysia) => app + .state('counter', 0) + .get('/plugin', () => 'Hi') + +// Prefer new instance (better type inference, no perf diff) +``` + +## Guard (Apply to Multiple Routes) + +```ts +.guard( + { body: t.Object({ username: t.String(), password: t.String() }) }, + (app) => + app.post('/sign-up', ({ body }) => signUp(body)) + .post('/sign-in', ({ body }) => signIn(body)) +) +``` + +**Grouped guard** (merge group + guard): + +```ts +.group( + '/v1', + { body: t.Literal('Rikuhachima Aru') }, // guard here + (app) => app.post('/student', ({ body }) => body) +) +``` + +## Scope Casting + +**3 methods to apply hook to parent**: + +1. **Inline as** (single hook): +```ts +.derive({ as: 'scoped' }, () => ({ hi: 'ok' })) +``` + +2. **Guard as** (multiple hooks, no derive/resolve): +```ts +.guard({ + as: 'scoped', + response: t.String(), + beforeHandle() { console.log('ok') } +}) +``` + +3. **Instance as** (all hooks + schema): +```ts +const plugin = new Elysia() + .derive(() => ({ hi: 'ok' })) + .get('/child', ({ hi }) => hi) + .as('scoped') // lift scope up +``` + +`.as()` lifts scope: local → scoped → global + +## Lazy Load + +**Deferred module** (async plugin, non-blocking startup): + +```ts +// plugin.ts +export const loadStatic = async (app: Elysia) => { + const files = await loadAllFiles() + files.forEach((asset) => app.get(asset, file(asset))) + return app +} + +// main.ts +const app = new Elysia().use(loadStatic) +``` + +**Lazy-load module** (dynamic import): + +```ts +const app = new Elysia() + .use(import('./plugin')) // loaded after startup +``` + +**Testing** (wait for modules): + +```ts +await app.modules // ensure all deferred/lazy modules loaded +``` + +## Notes +[Inference] Based on docs patterns: +- Use inline values for static resources (performance optimization) +- Group routes by prefix for organization +- Extend context minimally (separation of concerns) +- Use `status()` over `set.status` for type safety +- Prefer `resolve()` over `derive()` when type integrity matters +- Plugins isolated by default (must declare scope explicitly) +- Use `name` for deduplication when plugin used multiple times +- Prefer explicit dependency over global (better modularity/tracking) diff --git a/.agents/skills/elysiajs/references/route.md b/.agents/skills/elysiajs/references/route.md new file mode 100644 index 0000000..c767283 --- /dev/null +++ b/.agents/skills/elysiajs/references/route.md @@ -0,0 +1,331 @@ +# ElysiaJS: Routing, Handlers & Context + +## Routing + +### Path Types + +```ts +new Elysia() + .get('/static', 'static path') // exact match + .get('/id/:id', 'dynamic path') // captures segment + .get('/id/*', 'wildcard path') // captures rest +``` + +**Path Priority**: static > dynamic > wildcard + +### Dynamic Paths + +```ts +new Elysia() + .get('/id/:id', ({ params: { id } }) => id) + .get('/id/:id/:name', ({ params: { id, name } }) => id + ' ' + name) +``` + +**Optional params**: `.get('/id/:id?', ...)` + +### HTTP Verbs + +- `.get()` - retrieve data +- `.post()` - submit/create +- `.put()` - replace +- `.patch()` - partial update +- `.delete()` - remove +- `.all()` - any method +- `.route(method, path, handler)` - custom verb + +### Grouping Routes + +```ts +new Elysia() + .group('/user', { body: t.Literal('auth') }, (app) => + app.post('/sign-in', ...) + .post('/sign-up', ...) +) + +// Or use prefix in constructor +new Elysia({ prefix: '/user' }) + .post('/sign-in', ...) +``` + +## Handlers + +### Handler = function accepting HTTP request, returning response + +```ts +// Inline value (compiled ahead, optimized) +.get('/', 'Hello Elysia') +.get('/video', file('video.mp4')) + +// Function handler +.get('/', () => 'hello') +.get('/', ({ params, query, body }) => {...}) +``` + +### Context Properties + +- `body` - HTTP message/form/file +- `query` - query string as object +- `params` - path parameters +- `headers` - HTTP headers +- `cookie` - mutable signal for cookies +- `store` - global mutable state +- `request` - Web Standard Request +- `server` - Bun server instance +- `path` - request pathname + +### Context Utilities + +```ts +import { redirect, form } from 'elysia' + +new Elysia().get('/', ({ status, set, form }) => { + // Status code (type-safe) + status(418, "I'm a teapot") + + // Set response props + set.headers['x-custom'] = 'value' + set.status = 418 // legacy, no type inference + + // Redirect + return redirect('https://...', 302) + + // Cookies (mutable signal, no get/set) + cookie.name.value // get + cookie.name.value = 'new' // set + + // FormData response + return form({ name: 'Party', images: [file('a.jpg')] }) + + // Single file + return file('document.pdf') +}) +``` + +### Streaming + +```ts +new Elysia() + .get('/stream', function* () { + yield 1 + yield 2 + yield 3 + }) + // Server-Sent Events + .get('/sse', function* () { + yield sse('hello') + yield sse({ event: 'msg', data: {...} }) + }) +``` + +**Note**: Headers only settable before first yield + +**Conditional stream**: returning without yield converts to normal response + +## Context Extension + +[Inference] Extend when property is: + +- Global mutable (use `state`) +- Request/response related (use `decorate`) +- Derived from existing props (use `derive`/`resolve`) + +### state() - Global Mutable + +```ts +new Elysia() + `.state('version', 1) + .get('/', ({ store: { version } }) => version) + // Multiple + .state({ counter: 0, visits: 0 }) + + // Remap (create new from existing) + .state(({ version, ...store }) => ({ + ...store, + apiVersion: version + })) +```` + +**Gotcha**: Use reference not value + +```ts +new Elysia() + // ✅ Correct + .get('/', ({ store }) => store.counter++) + + // ❌ Wrong - loses reference + .get('/', ({ store: { counter } }) => counter++) +``` + +### decorate() - Additional Context Props + +```ts +new Elysia() + .decorate('logger', new Logger()) + .get('/', ({ logger }) => logger.log('hi')) + + // Multiple + .decorate({ logger: new Logger(), db: connection }) +``` + +**When**: constant/readonly values, classes with internal state, singletons + +### derive() - Create from Existing (Transform Lifecycle) + +```ts +new Elysia() + .derive(({ headers }) => ({ + bearer: headers.authorization?.startsWith('Bearer ') + ? headers.authorization.slice(7) + : null + })) + .get('/', ({ bearer }) => bearer) +``` + +**Timing**: runs at transform (before validation) +**Type safety**: request props typed as `unknown` + +### resolve() - Type-Safe Derive (beforeHandle Lifecycle) + +```ts +new Elysia() + .guard({ + headers: t.Object({ + bearer: t.String({ pattern: '^Bearer .+$' }) + }) + }) + .resolve(({ headers }) => ({ + bearer: headers.bearer.slice(7) // typed correctly + })) +``` + +**Timing**: runs at beforeHandle (after validation) +**Type safety**: request props fully typed + +### Error from derive/resolve + +```ts +new Elysia() + .derive(({ headers, status }) => { + if (!headers.authorization) return status(400) + return { bearer: ... } + }) +``` + +Returns early if error returned + +## Patterns + +### Affix (Bulk Remap) + +```ts +const plugin = new Elysia({ name: 'setup' }).decorate({ + argon: 'a', + boron: 'b' +}) + +new Elysia() + .use(plugin) + .prefix('decorator', 'setup') // setupArgon, setupBoron + .prefix('all', 'setup') // remap everything +``` + +### Assignment Patterns + +1. **key-value**: `.state('key', value)` +2. **object**: `.state({ k1: v1, k2: v2 })` +3. **remap**: `.state(({old}) => ({new}))` + +## Testing + +```ts +const app = new Elysia().get('/', 'hi') + +// Programmatic test +app.handle(new Request('http://localhost/')) +``` + +## To Throw or Return + +Most of an error handling in Elysia can be done by throwing an error and will be handle in `onError`. + +But for `status` it can be a little bit confusing, since it can be used both as a return value or throw an error. + +It could either be **return** or **throw** based on your specific needs. + +- If an `status` is **throw**, it will be caught by `onError` middleware. +- If an `status` is **return**, it will be **NOT** caught by `onError` middleware. + +See the following code: + +```typescript +import { Elysia, file } from 'elysia' + +new Elysia() + .onError(({ code, error, path }) => { + if (code === 418) return 'caught' + }) + .get('/throw', ({ status }) => { + // This will be caught by onError + throw status(418) + }) + .get('/return', ({ status }) => { + // This will NOT be caught by onError + return status(418) + }) +``` + +## To Throw or Return + +Elysia provide a `status` function for returning HTTP status code, prefers over `set.status`. + +`status` can be import from Elysia but preferably extract from route handler Context for type safety. + +```ts +import { Elysia, status } from 'elysia' + +function doThing() { + if (Math.random() > 0.33) return status(418, "I'm a teapot") +} + +new Elysia().get('/', ({ status }) => { + if (Math.random() > 0.33) return status(418) + + return 'ok' +}) +``` + +Error Handling in Elysia can be done by throwing an error and will be handle in `onError`. + +Status could either be **return** or **throw** based on your specific needs. + +- If an `status` is **throw**, it will be caught by `onError` middleware. +- If an `status` is **return**, it will be **NOT** caught by `onError` middleware. + +See the following code: + +```typescript +import { Elysia, file } from 'elysia' + +new Elysia() + .onError(({ code, error, path }) => { + if (code === 418) return 'caught' + }) + .get('/throw', ({ status }) => { + // This will be caught by onError + throw status(418) + }) + .get('/return', ({ status }) => { + // This will NOT be caught by onError + return status(418) + }) +``` + +## Notes + +[Inference] Based on docs patterns: + +- Use inline values for static resources (performance optimization) +- Group routes by prefix for organization +- Extend context minimally (separation of concerns) +- Use `status()` over `set.status` for type safety +- Prefer `resolve()` over `derive()` when type integrity matters diff --git a/.agents/skills/elysiajs/references/testing.md b/.agents/skills/elysiajs/references/testing.md new file mode 100644 index 0000000..ffcdff3 --- /dev/null +++ b/.agents/skills/elysiajs/references/testing.md @@ -0,0 +1,385 @@ +# Unit Testing + +## Basic Test Setup + +### Installation +```bash +bun add -d @elysiajs/eden +``` + +### Basic Test +```typescript +// test/app.test.ts +import { describe, expect, it } from 'bun:test' +import { Elysia } from 'elysia' + +describe('Elysia App', () => { + it('should return hello world', async () => { + const app = new Elysia() + .get('/', () => 'Hello World') + + const res = await app.handle( + new Request('http://localhost/') + ) + + expect(res.status).toBe(200) + expect(await res.text()).toBe('Hello World') + }) +}) +``` + +## Testing Routes + +### GET Request +```typescript +it('should get user by id', async () => { + const app = new Elysia() + .get('/user/:id', ({ params: { id } }) => ({ + id, + name: 'John Doe' + })) + + const res = await app.handle( + new Request('http://localhost/user/123') + ) + + const data = await res.json() + + expect(res.status).toBe(200) + expect(data).toEqual({ + id: '123', + name: 'John Doe' + }) +}) +``` + +### POST Request +```typescript +it('should create user', async () => { + const app = new Elysia() + .post('/user', ({ body }) => body) + + const res = await app.handle( + new Request('http://localhost/user', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + name: 'Jane Doe', + email: 'jane@example.com' + }) + }) + ) + + const data = await res.json() + + expect(res.status).toBe(200) + expect(data.name).toBe('Jane Doe') +}) +``` + +## Testing Module/Plugin + +### Module Structure +``` +src/ +├── modules/ +│ └── auth/ +│ ├── index.ts # Elysia instance +│ ├── service.ts +│ └── model.ts +└── index.ts +``` + +### Auth Module +```typescript +// src/modules/auth/index.ts +import { Elysia, t } from 'elysia' + +export const authModule = new Elysia({ prefix: '/auth' }) + .post('/login', ({ body, cookie: { session } }) => { + if (body.username === 'admin' && body.password === 'password') { + session.value = 'valid-session' + return { success: true } + } + return { success: false } + }, { + body: t.Object({ + username: t.String(), + password: t.String() + }) + }) + .get('/profile', ({ cookie: { session }, status }) => { + if (!session.value) { + return status(401, { error: 'Unauthorized' }) + } + return { username: 'admin' } + }) +``` + +### Auth Module Test +```typescript +// test/auth.test.ts +import { describe, expect, it } from 'bun:test' +import { authModule } from '../src/modules/auth' + +describe('Auth Module', () => { + it('should login successfully', async () => { + const res = await authModule.handle( + new Request('http://localhost/auth/login', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + username: 'admin', + password: 'password' + }) + }) + ) + + const data = await res.json() + expect(res.status).toBe(200) + expect(data.success).toBe(true) + }) + + it('should reject invalid credentials', async () => { + const res = await authModule.handle( + new Request('http://localhost/auth/login', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + username: 'wrong', + password: 'wrong' + }) + }) + ) + + const data = await res.json() + expect(data.success).toBe(false) + }) + + it('should return 401 for unauthenticated profile request', async () => { + const res = await authModule.handle( + new Request('http://localhost/auth/profile') + ) + + expect(res.status).toBe(401) + }) +}) +``` + +## Eden Treaty Testing + +### Setup +```typescript +import { treaty } from '@elysiajs/eden' +import { app } from '../src/modules/auth' + +const api = treaty(app) +``` + +### Eden Tests +```typescript +describe('Auth Module with Eden', () => { + it('should login with Eden', async () => { + const { data, error } = await api.auth.login.post({ + username: 'admin', + password: 'password' + }) + + expect(error).toBeNull() + expect(data?.success).toBe(true) + }) + + it('should get profile with Eden', async () => { + // First login + await api.auth.login.post({ + username: 'admin', + password: 'password' + }) + + // Then get profile + const { data, error } = await api.auth.profile.get() + + expect(error).toBeNull() + expect(data?.username).toBe('admin') + }) +}) +``` + +## Mocking Dependencies + +### With Decorators +```typescript +// app.ts +export const app = new Elysia() + .decorate('db', realDatabase) + .get('/users', ({ db }) => db.getUsers()) + +// test +import { app } from '../src/app' + +describe('App with mocked DB', () => { + it('should use mock database', async () => { + const mockDb = { + getUsers: () => [{ id: 1, name: 'Test User' }] + } + + const testApp = app.decorate('db', mockDb) + + const res = await testApp.handle( + new Request('http://localhost/users') + ) + + const data = await res.json() + expect(data).toEqual([{ id: 1, name: 'Test User' }]) + }) +}) +``` + +## Testing with Headers + +```typescript +it('should require authorization', async () => { + const app = new Elysia() + .get('/protected', ({ headers, status }) => { + if (!headers.authorization) { + return status(401) + } + return { data: 'secret' } + }) + + const res = await app.handle( + new Request('http://localhost/protected', { + headers: { + 'Authorization': 'Bearer token123' + } + }) + ) + + expect(res.status).toBe(200) +}) +``` + +## Testing Validation + +```typescript +import { Elysia, t } from 'elysia' + +it('should validate request body', async () => { + const app = new Elysia() + .post('/user', ({ body }) => body, { + body: t.Object({ + name: t.String(), + age: t.Number({ minimum: 0 }) + }) + }) + + // Valid request + const validRes = await app.handle( + new Request('http://localhost/user', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + name: 'John', + age: 25 + }) + }) + ) + expect(validRes.status).toBe(200) + + // Invalid request (negative age) + const invalidRes = await app.handle( + new Request('http://localhost/user', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + name: 'John', + age: -5 + }) + }) + ) + expect(invalidRes.status).toBe(400) +}) +``` + +## Testing WebSocket + +```typescript +it('should handle websocket connection', (done) => { + const app = new Elysia() + .ws('/chat', { + message(ws, message) { + ws.send('Echo: ' + message) + } + }) + + const ws = new WebSocket('ws://localhost:3000/chat') + + ws.onopen = () => { + ws.send('Hello') + } + + ws.onmessage = (event) => { + expect(event.data).toBe('Echo: Hello') + ws.close() + done() + } +}) +``` + +## Complete Example + +```typescript +// src/modules/auth/index.ts +import { Elysia, t } from 'elysia' + +export const authModule = new Elysia({ prefix: '/auth' }) + .post('/login', ({ body, cookie: { session } }) => { + if (body.username === 'admin' && body.password === 'password') { + session.value = 'valid-session' + return { success: true } + } + return { success: false } + }, { + body: t.Object({ + username: t.String(), + password: t.String() + }) + }) + .get('/profile', ({ cookie: { session }, status }) => { + if (!session.value) { + return status(401) + } + return { username: 'admin' } + }) + +// test/auth.test.ts +import { describe, expect, it } from 'bun:test' +import { treaty } from '@elysiajs/eden' +import { authModule } from '../src/modules/auth' + +const api = treaty(authModule) + +describe('Auth Module', () => { + it('should login successfully', async () => { + const { data, error } = await api.auth.login.post({ + username: 'admin', + password: 'password' + }) + + expect(error).toBeNull() + expect(data?.success).toBe(true) + }) + + it('should return 401 for unauthorized access', async () => { + const { error } = await api.auth.profile.get() + + expect(error?.status).toBe(401) + }) +}) +``` diff --git a/.agents/skills/elysiajs/references/validation.md b/.agents/skills/elysiajs/references/validation.md new file mode 100644 index 0000000..ba723e0 --- /dev/null +++ b/.agents/skills/elysiajs/references/validation.md @@ -0,0 +1,491 @@ +# Validation Schema - SKILLS.md + +## What It Is +Runtime validation + type inference + OpenAPI schema from single source. TypeBox-based with Standard Schema support. + +## Basic Usage +```typescript +import { Elysia, t } from 'elysia' + +new Elysia() + .get('/id/:id', ({ params: { id } }) => id, { + params: t.Object({ id: t.Number({ minimum: 1 }) }), + response: { + 200: t.Number(), + 404: t.Literal('Not Found') + } + }) +``` + +## Schema Types +Third parameter of HTTP method: +- **body** - HTTP message +- **query** - URL query params +- **params** - Path params +- **headers** - Request headers +- **cookie** - Request cookies +- **response** - Response (per status) + +## Standard Schema Support +Use Zod, Valibot, ArkType, Effect, Yup, Joi: +```typescript +import { z } from 'zod' +import * as v from 'valibot' + +.get('/', ({ params, query }) => params.id, { + params: z.object({ id: z.coerce.number() }), + query: v.object({ name: v.literal('Lilith') }) +}) +``` + +Mix validators in same handler. + +## Body +```typescript +body: t.Object({ name: t.String() }) +``` + +GET/HEAD: body-parser disabled by default (RFC2616). + +### File Upload +```typescript +body: t.Object({ + file: t.File({ format: 'image/*' }), + multipleFiles: t.Files() +}) +// Auto-assumes multipart/form-data +``` + +### File (Standard Schema) +```typescript +import { fileType } from 'elysia' + +body: z.object({ + file: z.file().refine((file) => fileType(file, 'image/jpeg')) +}) +``` + +Use `fileType` for security (validates magic number, not just MIME). + +## Query +```typescript +query: t.Object({ name: t.String() }) +// /?name=Elysia +``` + +Auto-coerces to specified type. + +### Arrays +```typescript +query: t.Object({ name: t.Array(t.String()) }) +``` + +Formats supported: +- **nuqs**: `?name=a,b,c` (comma delimiter) +- **HTML form**: `?name=a&name=b&name=c` (multiple keys) + +## Params +```typescript +params: t.Object({ id: t.Number() }) +// /id/1 +``` + +Auto-inferred as string if schema not provided. + +## Headers +```typescript +headers: t.Object({ authorization: t.String() }) +``` + +`additionalProperties: true` by default. Always lowercase keys. + +## Cookie +```typescript +cookie: t.Cookie({ + name: t.String() +}, { + secure: true, + httpOnly: true +}) +``` + +Or use `t.Object`. `additionalProperties: true` by default. + +## Response +```typescript +response: t.Object({ name: t.String() }) +``` + +### Per Status +```typescript +response: { + 200: t.Object({ name: t.String() }), + 400: t.Object({ error: t.String() }) +} +``` + +## Error Handling + +### Inline Error Property +```typescript +body: t.Object({ + x: t.Number({ error: 'x must be number' }) +}) +``` + +Or function: +```typescript +x: t.Number({ + error({ errors, type, validation, value }) { + return 'Expected x to be number' + } +}) +``` + +### onError Hook +```typescript +.onError(({ code, error }) => { + if (code === 'VALIDATION') + return error.message // or error.all[0].message +}) +``` + +`error.all` - list all error causes. `error.all.find(x => x.path === '/name')` - find specific field. + +## Reference Models +Name + reuse models: +```typescript +.model({ + sign: t.Object({ + username: t.String(), + password: t.String() + }) +}) +.post('/sign-in', ({ body }) => body, { + body: 'sign', + response: 'sign' +}) +``` + +Extract to plugin: +```typescript +// auth.model.ts +export const authModel = new Elysia().model({ sign: t.Object({...}) }) + +// main.ts +new Elysia().use(authModel).post('/', ..., { body: 'sign' }) +``` + +### Naming Convention +Prevent duplicates with namespaces: +```typescript +.model({ + 'auth.admin': t.Object({...}), + 'auth.user': t.Object({...}) +}) +``` + +Or use `prefix` / `suffix` to rename models in current instance +```typescript +.model({ sign: t.Object({...}) }) +.prefix('model', 'auth') +.post('/', () => '', { + body: 'auth.User' +}) +``` + +Models with `prefix` will be capitalized. + +## TypeScript Types +```typescript +const MyType = t.Object({ hello: t.Literal('Elysia') }) +type MyType = typeof MyType.static +``` + +Single schema → runtime validation + coercion + TypeScript type + OpenAPI. + +## Guard +Apply schema to multiple handlers. Affects all handlers after definition. + +### Basic Usage +```typescript +import { Elysia, t } from 'elysia' + +new Elysia() + .get('/none', ({ query }) => 'hi') + .guard({ + query: t.Object({ + name: t.String() + }) + }) + .get('/query', ({ query }) => query) + .listen(3000) +``` + +Ensures `query.name` string required for all handlers after guard. + +### Behavior +| Path | Response | +|---------------|----------| +| /none | hi | +| /none?name=a | hi | +| /query | error | +| /query?name=a | a | + +### Precedence +- Multiple global schemas: latest wins +- Global vs local: local wins + +### Schema Types + +1. override (default) +Latest schema overrides collided schema. +```typescript +.guard({ query: t.Object({ name: t.String() }) }) +.guard({ query: t.Object({ id: t.Number() }) }) +// Only id required, name overridden +``` + +2. standalone +Both schemas run independently. Both validated. +```typescript +.guard({ query: t.Object({ name: t.String() }) }, { type: 'standalone' }) +.guard({ query: t.Object({ id: t.Number() }) }, { type: 'standalone' }) +// Both name AND id required +``` + +# Typebox Validation (Elysia.t) + +Elysia.t = TypeBox with server-side pre-configuration + HTTP-specific types + +**TypeBox API mirrors TypeScript syntax** but provides runtime validation + +## Basic Types + +| TypeBox | TypeScript | Example Value | +|---------|------------|---------------| +| `t.String()` | `string` | `"hello"` | +| `t.Number()` | `number` | `42` | +| `t.Boolean()` | `boolean` | `true` | +| `t.Array(t.Number())` | `number[]` | `[1, 2, 3]` | +| `t.Object({ x: t.Number() })` | `{ x: number }` | `{ x: 10 }` | +| `t.Null()` | `null` | `null` | +| `t.Literal(42)` | `42` | `42` | + +## Attributes (JSON Schema 7) + +```ts +// Email format +t.String({ format: 'email' }) + +// Number constraints +t.Number({ minimum: 10, maximum: 100 }) + +// Array constraints +t.Array(t.Number(), { + minItems: 1, // min items + maxItems: 5 // max items +}) + +// Object - allow extra properties +t.Object( + { x: t.Number() }, + { additionalProperties: true } // default: false +) +``` + +## Common Patterns + +### Union (Multiple Types) +```ts +t.Union([t.String(), t.Number()]) +// type: string | number +// values: "Hello" or 123 +``` + +### Optional (Field Optional) +```ts +t.Object({ + x: t.Number(), + y: t.Optional(t.Number()) // can be undefined +}) +// type: { x: number, y?: number } +// value: { x: 123 } or { x: 123, y: 456 } +``` + +### Partial (All Fields Optional) +```ts +t.Partial(t.Object({ + x: t.Number(), + y: t.Number() +})) +// type: { x?: number, y?: number } +// value: {} or { y: 123 } or { x: 1, y: 2 } +``` + +## Elysia-Specific Types + +### UnionEnum (One of Values) +```ts +t.UnionEnum(['rapi', 'anis', 1, true, false]) +``` + +### File (Single File Upload) +```ts +t.File({ + type: 'image', // or ['image', 'video'] + minSize: '1k', // 1024 bytes + maxSize: '5m' // 5242880 bytes +}) +``` + +**File unit suffixes**: +- `m` = MegaByte (1048576 bytes) +- `k` = KiloByte (1024 bytes) + +### Files (Multiple Files) +```ts +t.Files() // extends File + array +``` + +### Cookie (Cookie Jar) +```ts +t.Cookie({ + name: t.String() +}, { + secrets: 'secret-key' // or ['key1', 'key2'] for rotation +}) +``` + +### Nullable (Allow null) +```ts +t.Nullable(t.String()) +// type: string | null +``` + +### MaybeEmpty (Allow null + undefined) +```ts +t.MaybeEmpty(t.String()) +// type: string | null | undefined +``` + +### Form (FormData Validation) +```ts +t.Form({ + someValue: t.File() +}) +// Syntax sugar for t.Object with FormData support +``` + +### UInt8Array (Buffer → Uint8Array) +```ts +t.UInt8Array() +// For binary file uploads with arrayBuffer parser +``` + +### ArrayBuffer (Buffer → ArrayBuffer) +```ts +t.ArrayBuffer() +// For binary file uploads with arrayBuffer parser +``` + +### ObjectString (String → Object) +```ts +t.ObjectString() +// Accepts: '{"x":1}' → parses to { x: 1 } +// Use in: query string, headers, FormData +``` + +### BooleanString (String → Boolean) +```ts +t.BooleanString() +// Accepts: 'true'/'false' → parses to boolean +// Use in: query string, headers, FormData +``` + +### Numeric (String/Number → Number) +```ts +t.Numeric() +// Accepts: '123' or 123 → transforms to 123 +// Use in: path params, query string +``` + +## Elysia Behavior Differences from TypeBox + +### 1. Optional Behavior + +In Elysia, `t.Optional` makes **entire route parameter** optional (not object field): + +```ts +.get('/optional', ({ query }) => query, { + query: t.Optional( // makes query itself optional + t.Object({ name: t.String() }) + ) +}) +``` + +**Different from TypeBox**: TypeBox uses Optional for object fields only + +### 2. Number → Numeric Auto-Conversion + +**Route schema only** (not nested objects): + +```ts +.get('/:id', ({ id }) => id, { + params: t.Object({ + id: t.Number() // ✅ Auto-converts to t.Numeric() + }), + body: t.Object({ + id: t.Number() // ❌ NOT converted (stays t.Number()) + }) +}) + +// Outside route schema +t.Number() // ❌ NOT converted +``` + +**Why**: HTTP headers/query/params always strings. Auto-conversion parses numeric strings. + +### 3. Boolean → BooleanString Auto-Conversion + +Same as Number → Numeric: + +```ts +.get('/:active', ({ active }) => active, { + params: t.Object({ + active: t.Boolean() // ✅ Auto-converts to t.BooleanString() + }), + body: t.Object({ + active: t.Boolean() // ❌ NOT converted + }) +}) +``` + +## Usage Pattern + +```ts +import { Elysia, t } from 'elysia' + +new Elysia() + .post('/', ({ body }) => `Hello ${body}`, { + body: t.String() // validates body is string + }) + .listen(3000) +``` + +**Validation flow**: +1. Request arrives +2. Schema validates against HTTP body/params/query/headers +3. If valid → handler executes +4. If invalid → Error Life Cycle + +## Notes + +[Inference] Based on docs: +- TypeBox mirrors TypeScript but adds runtime validation +- Elysia.t extends TypeBox with HTTP-specific types +- Auto-conversion (Number→Numeric, Boolean→BooleanString) only for route schemas +- Use `t.Optional` for optional route params (different from TypeBox behavior) +- File validation supports unit suffixes ('1k', '5m') +- ObjectString/BooleanString for parsing strings in query/headers +- Cookie supports key rotation with array of secrets diff --git a/.agents/skills/elysiajs/references/websocket.md b/.agents/skills/elysiajs/references/websocket.md new file mode 100644 index 0000000..b2c86a8 --- /dev/null +++ b/.agents/skills/elysiajs/references/websocket.md @@ -0,0 +1,250 @@ +# WebSocket + +## Basic WebSocket + +```typescript +import { Elysia } from 'elysia' + +new Elysia() + .ws('/chat', { + message(ws, message) { + ws.send(message) // Echo back + } + }) + .listen(3000) +``` + +## With Validation + +```typescript +import { Elysia, t } from 'elysia' + +.ws('/chat', { + body: t.Object({ + message: t.String(), + username: t.String() + }), + response: t.Object({ + message: t.String(), + timestamp: t.Number() + }), + message(ws, body) { + ws.send({ + message: body.message, + timestamp: Date.now() + }) + } +}) +``` + +## Lifecycle Events + +```typescript +.ws('/chat', { + open(ws) { + console.log('Client connected') + }, + message(ws, message) { + console.log('Received:', message) + ws.send('Echo: ' + message) + }, + close(ws) { + console.log('Client disconnected') + }, + error(ws, error) { + console.error('Error:', error) + } +}) +``` + +## Broadcasting + +```typescript +const connections = new Set() + +.ws('/chat', { + open(ws) { + connections.add(ws) + }, + message(ws, message) { + // Broadcast to all connected clients + for (const client of connections) { + client.send(message) + } + }, + close(ws) { + connections.delete(ws) + } +}) +``` + +## With Authentication + +```typescript +.ws('/chat', { + beforeHandle({ headers, status }) { + const token = headers.authorization?.replace('Bearer ', '') + if (!verifyToken(token)) { + return status(401) + } + }, + message(ws, message) { + ws.send(message) + } +}) +``` + +## Room-Based Chat + +```typescript +const rooms = new Map>() + +.ws('/chat/:room', { + open(ws) { + const room = ws.data.params.room + if (!rooms.has(room)) { + rooms.set(room, new Set()) + } + rooms.get(room)!.add(ws) + }, + message(ws, message) { + const room = ws.data.params.room + const clients = rooms.get(room) + + if (clients) { + for (const client of clients) { + client.send(message) + } + } + }, + close(ws) { + const room = ws.data.params.room + const clients = rooms.get(room) + + if (clients) { + clients.delete(ws) + if (clients.size === 0) { + rooms.delete(room) + } + } + } +}) +``` + +## With State/Context + +```typescript +.ws('/chat', { + open(ws) { + ws.data.userId = generateUserId() + ws.data.joinedAt = Date.now() + }, + message(ws, message) { + const response = { + userId: ws.data.userId, + message, + timestamp: Date.now() + } + ws.send(response) + } +}) +``` + +## Client Usage (Browser) + +```typescript +const ws = new WebSocket('ws://localhost:3000/chat') + +ws.onopen = () => { + console.log('Connected') + ws.send('Hello Server!') +} + +ws.onmessage = (event) => { + console.log('Received:', event.data) +} + +ws.onerror = (error) => { + console.error('Error:', error) +} + +ws.onclose = () => { + console.log('Disconnected') +} +``` + +## Eden Treaty WebSocket + +```typescript +// Server +export const app = new Elysia() + .ws('/chat', { + message(ws, message) { + ws.send(message) + } + }) + +export type App = typeof app + +// Client +import { treaty } from '@elysiajs/eden' +import type { App } from './server' + +const api = treaty('localhost:3000') +const chat = api.chat.subscribe() + +chat.subscribe((message) => { + console.log('Received:', message) +}) + +chat.send('Hello!') +``` + +## Headers in WebSocket + +```typescript +.ws('/chat', { + header: t.Object({ + authorization: t.String() + }), + beforeHandle({ headers, status }) { + const token = headers.authorization?.replace('Bearer ', '') + if (!token) return status(401) + }, + message(ws, message) { + ws.send(message) + } +}) +``` + +## Query Parameters + +```typescript +.ws('/chat', { + query: t.Object({ + username: t.String() + }), + message(ws, message) { + const username = ws.data.query.username + ws.send(`${username}: ${message}`) + } +}) + +// Client +const ws = new WebSocket('ws://localhost:3000/chat?username=john') +``` + +## Compression + +```typescript +new Elysia({ + websocket: { + perMessageDeflate: true + } +}) + .ws('/chat', { + message(ws, message) { + ws.send(message) + } + }) +``` diff --git a/.agents/skills/find-skills/SKILL.md b/.agents/skills/find-skills/SKILL.md new file mode 100644 index 0000000..f92bce2 --- /dev/null +++ b/.agents/skills/find-skills/SKILL.md @@ -0,0 +1,143 @@ +--- +name: find-skills +description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", "is there a skill that can...", or express interest in extending capabilities. This skill should be used when the user is looking for functionality that might exist as an installable skill. +--- + +# Find Skills + +This skill helps you discover and install skills from the open agent skills ecosystem. + +## When to Use This Skill + +Use this skill when the user: + +- Asks "how do I do X" where X might be a common task with an existing skill +- Says "find a skill for X" or "is there a skill for X" +- Asks "can you do X" where X is a specialized capability +- Expresses interest in extending agent capabilities +- Wants to search for tools, templates, or workflows +- Mentions they wish they had help with a specific domain (design, testing, deployment, etc.) + +## What is the Skills CLI? + +The Skills CLI (`npx skills`) is the package manager for the open agent skills ecosystem. Skills are modular packages that extend agent capabilities with specialized knowledge, workflows, and tools. + +**Key commands:** + +- `npx skills find [query]` - Search for skills interactively or by keyword +- `npx skills add ` - Install a skill from GitHub or other sources +- `npx skills check` - Check for skill updates +- `npx skills update` - Update all installed skills + +**Browse skills at:** https://skills.sh/ + +## How to Help Users Find Skills + +### Step 1: Understand What They Need + +When a user asks for help with something, identify: + +1. The domain (e.g., React, testing, design, deployment) +2. The specific task (e.g., writing tests, creating animations, reviewing PRs) +3. Whether this is a common enough task that a skill likely exists + +### Step 2: Check the Leaderboard First + +Before running a CLI search, check the [skills.sh leaderboard](https://skills.sh/) to see if a well-known skill already exists for the domain. The leaderboard ranks skills by total installs, surfacing the most popular and battle-tested options. + +For example, top skills for web development include: + +- `vercel-labs/agent-skills` — React, Next.js, web design (100K+ installs each) +- `anthropics/skills` — Frontend design, document processing (100K+ installs) + +### Step 3: Search for Skills + +If the leaderboard doesn't cover the user's need, run the find command: + +```bash +npx skills find [query] +``` + +For example: + +- User asks "how do I make my React app faster?" → `npx skills find react performance` +- User asks "can you help me with PR reviews?" → `npx skills find pr review` +- User asks "I need to create a changelog" → `npx skills find changelog` + +### Step 4: Verify Quality Before Recommending + +**Do not recommend a skill based solely on search results.** Always verify: + +1. **Install count** — Prefer skills with 1K+ installs. Be cautious with anything under 100. +2. **Source reputation** — Official sources (`vercel-labs`, `anthropics`, `microsoft`) are more trustworthy than unknown authors. +3. **GitHub stars** — Check the source repository. A skill from a repo with <100 stars should be treated with skepticism. + +### Step 5: Present Options to the User + +When you find relevant skills, present them to the user with: + +1. The skill name and what it does +2. The install count and source +3. The install command they can run +4. A link to learn more at skills.sh + +Example response: + +``` +I found a skill that might help! The "react-best-practices" skill provides +React and Next.js performance optimization guidelines from Vercel Engineering. +(185K installs) + +To install it: +npx skills add vercel-labs/agent-skills@react-best-practices + +Learn more: https://skills.sh/vercel-labs/agent-skills/react-best-practices +``` + +### Step 6: Offer to Install + +If the user wants to proceed, you can install the skill for them: + +```bash +npx skills add -g -y +``` + +The `-g` flag installs globally (user-level) and `-y` skips confirmation prompts. + +## Common Skill Categories + +When searching, consider these common categories: + +| Category | Example Queries | +| --------------- | ---------------------------------------- | +| Web Development | react, nextjs, typescript, css, tailwind | +| Testing | testing, jest, playwright, e2e | +| DevOps | deploy, docker, kubernetes, ci-cd | +| Documentation | docs, readme, changelog, api-docs | +| Code Quality | review, lint, refactor, best-practices | +| Design | ui, ux, design-system, accessibility | +| Productivity | workflow, automation, git | + +## Tips for Effective Searches + +1. **Use specific keywords**: "react testing" is better than just "testing" +2. **Try alternative terms**: If "deploy" doesn't work, try "deployment" or "ci-cd" +3. **Check popular sources**: Many skills come from `vercel-labs/agent-skills` or `ComposioHQ/awesome-claude-skills` + +## When No Skills Are Found + +If no relevant skills exist: + +1. Acknowledge that no existing skill was found +2. Offer to help with the task directly using your general capabilities +3. Suggest the user could create their own skill with `npx skills init` + +Example: + +``` +I searched for skills related to "xyz" but didn't find any matches. +I can still help you with this task directly! Would you like me to proceed? + +If this is something you do often, you could create your own skill: +npx skills init my-xyz-skill +``` diff --git a/.agents/skills/frontend-design/LICENSE.txt b/.agents/skills/frontend-design/LICENSE.txt new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/.agents/skills/frontend-design/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.agents/skills/frontend-design/SKILL.md b/.agents/skills/frontend-design/SKILL.md new file mode 100644 index 0000000..f709fde --- /dev/null +++ b/.agents/skills/frontend-design/SKILL.md @@ -0,0 +1,45 @@ +--- +name: frontend-design +description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, artifacts, posters, or applications (examples include websites, landing pages, dashboards, React components, HTML/CSS layouts, or when styling/beautifying any web UI). Generates creative, polished code and UI design that avoids generic AI aesthetics. +license: Complete terms in LICENSE.txt +--- + +This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices. + +The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints. + +## Design Thinking + +Before coding, understand the context and commit to a BOLD aesthetic direction: + +- **Purpose**: What problem does this interface solve? Who uses it? +- **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. There are so many flavors to choose from. Use these for inspiration but design one that is true to the aesthetic direction. +- **Constraints**: Technical requirements (framework, performance, accessibility). +- **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember? + +**CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity. + +Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is: + +- Production-grade and functional +- Visually striking and memorable +- Cohesive with a clear aesthetic point-of-view +- Meticulously refined in every detail + +## Frontend Aesthetics Guidelines + +Focus on: + +- **Typography**: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics; unexpected, characterful font choices. Pair a distinctive display font with a refined body font. +- **Color & Theme**: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes. +- **Motion**: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise. +- **Spatial Composition**: Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density. +- **Backgrounds & Visual Details**: Create atmosphere and depth rather than defaulting to solid colors. Add contextual effects and textures that match the overall aesthetic. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays. + +NEVER use generic AI-generated aesthetics like overused font families (Inter, Roboto, Arial, system fonts), cliched color schemes (particularly purple gradients on white backgrounds), predictable layouts and component patterns, and cookie-cutter design that lacks context-specific character. + +Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. NEVER converge on common choices (Space Grotesk, for example) across generations. + +**IMPORTANT**: Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. Elegance comes from executing the vision well. + +Remember: Claude is capable of extraordinary creative work. Don't hold back, show what can truly be created when thinking outside the box and committing fully to a distinctive vision. diff --git a/.agents/skills/kiranism-shadcn-dashboard/SKILL.md b/.agents/skills/kiranism-shadcn-dashboard/SKILL.md new file mode 100644 index 0000000..c578cc2 --- /dev/null +++ b/.agents/skills/kiranism-shadcn-dashboard/SKILL.md @@ -0,0 +1,615 @@ +--- +name: kiranism-shadcn-dashboard +description: | + Guide for building features, pages, tables, forms, themes, and navigation in this Next.js 16 shadcn dashboard template. Use this skill whenever the user wants to add a new page, create a feature module, build a data table, add a form, configure navigation items, add a theme, set up RBAC access control, or work with the dashboard's patterns and conventions. Also triggers when adding routes under /dashboard, working with Clerk auth/orgs/billing, creating mock APIs, or modifying the sidebar. Even if the user doesn't mention "dashboard" explicitly — if they're adding UI, pages, or features to this project, use this skill. +--- + +# Dashboard Development Guide + +This skill encodes the exact patterns and conventions used in this Next.js 16 + shadcn/ui admin dashboard template. + +## Quick Reference: What Goes Where + +| Task | Location | +| --------------------- | --------------------------------------- | +| New page | `src/app/dashboard//page.tsx` | +| New feature module | `src/features//` | +| Feature components | `src/features//components/` | +| API types | `src/features//api/types.ts` | +| Service layer | `src/features//api/service.ts` | +| Query options | `src/features//api/queries.ts` | +| Mutation options | `src/features//api/mutations.ts` | +| Zod schemas | `src/features//schemas/.ts` | +| Filter/select options | `src/features//constants/` | +| Nav config | `src/config/nav-config.ts` | +| Types | `src/types/index.ts` | +| Mock data | `src/constants/mock-api-.ts` | +| Search params | `src/lib/searchparams.ts` | +| Query client | `src/lib/query-client.ts` | +| Theme CSS | `src/styles/themes/.css` | +| Theme registry | `src/components/themes/theme.config.ts` | +| Custom hook | `src/hooks/` | +| Icons registry | `src/components/icons.tsx` | + +--- + +## Adding a New Feature (End-to-End) + +When a user asks to add a feature (e.g., "add an orders page"), follow these steps in order. Each step below shows the minimal pattern — see reference files for full templates. + +### Step 1: Mock API (`src/constants/mock-api-.ts`) + +See [references/mock-api-guide.md](references/mock-api-guide.md) for the complete template. Key structure: + +```tsx +import { faker } from '@faker-js/faker'; +import { matchSorter } from 'match-sorter'; +import { delay } from './mock-api'; + +export type Order = { + id: number; + customer: string; + status: string; + total: number; + created_at: string; + updated_at: string; +}; + +export const fakeOrders = { + records: [] as Order[], + initialize() { + /* generate with faker */ + }, + async getOrders({ page, limit, search, sort }) { + /* filter, sort, paginate, return { items, total_items } */ + }, + async getOrderById(id: number) { + /* find by id */ + }, + async createOrder(data) { + /* push to records */ + }, + async updateOrder(id, data) { + /* merge into record */ + }, + async deleteOrder(id) { + /* filter out */ + } +}; +fakeOrders.initialize(); +``` + +Every method should call `await delay(800)` to simulate network latency. Use `matchSorter` for search. Return `{ items, total_items }` from list methods. + +### Step 2: API Layer (`src/features//api/`) + +Each feature has 4 API files: **types** → **service** → **queries** → **mutations**. + +**Types** (`api/types.ts`) — re-export the entity type from mock API, plus filter/response/payload types: + +```tsx +export type { Order } from '@/constants/mock-api-orders'; +export type OrderFilters = { page?: number; limit?: number; search?: string; sort?: string }; +export type OrdersResponse = { items: Order[]; total_items: number }; +export type OrderMutationPayload = { customer: string; status: string; total: number }; +``` + +**Service** (`api/service.ts`) — data access layer. One exported function per operation: + +```tsx +import { fakeOrders } from '@/constants/mock-api-orders'; +import type { OrderFilters, OrdersResponse, OrderMutationPayload } from './types'; + +export async function getOrders(filters: OrderFilters): Promise { + return fakeOrders.getOrders(filters); +} +export async function getOrderById(id: number) { + return fakeOrders.getOrderById(id); +} +export async function createOrder(data: OrderMutationPayload) { + return fakeOrders.createOrder(data); +} +export async function updateOrder(id: number, data: OrderMutationPayload) { + return fakeOrders.updateOrder(id, data); +} +export async function deleteOrder(id: number) { + return fakeOrders.deleteOrder(id); +} +``` + +**Queries** (`api/queries.ts`) — query key factory + query options: + +```tsx +import { queryOptions } from '@tanstack/react-query'; +import { getOrders, getOrderById } from './service'; +import type { Order, OrderFilters } from './types'; + +export type { Order }; + +export const orderKeys = { + all: ['orders'] as const, + list: (filters: OrderFilters) => [...orderKeys.all, 'list', filters] as const, + detail: (id: number) => [...orderKeys.all, 'detail', id] as const +}; + +export const ordersQueryOptions = (filters: OrderFilters) => + queryOptions({ + queryKey: orderKeys.list(filters), + queryFn: () => getOrders(filters) + }); + +export const orderByIdOptions = (id: number) => + queryOptions({ + queryKey: orderKeys.detail(id), + queryFn: () => getOrderById(id) + }); +``` + +**Mutations** (`api/mutations.ts`) — use `mutationOptions` + `getQueryClient()` (not custom hooks with `useQueryClient()`): + +```tsx +import { mutationOptions } from '@tanstack/react-query'; +import { getQueryClient } from '@/lib/query-client'; +import { createOrder, updateOrder, deleteOrder } from './service'; +import { orderKeys } from './queries'; +import type { OrderMutationPayload } from './types'; + +export const createOrderMutation = mutationOptions({ + mutationFn: (data: OrderMutationPayload) => createOrder(data), + onSuccess: () => { + getQueryClient().invalidateQueries({ queryKey: orderKeys.all }); + } +}); + +export const updateOrderMutation = mutationOptions({ + mutationFn: ({ id, values }: { id: number; values: OrderMutationPayload }) => + updateOrder(id, values), + onSuccess: () => { + getQueryClient().invalidateQueries({ queryKey: orderKeys.all }); + } +}); + +export const deleteOrderMutation = mutationOptions({ + mutationFn: (id: number) => deleteOrder(id), + onSuccess: () => { + getQueryClient().invalidateQueries({ queryKey: orderKeys.all }); + } +}); +``` + +`mutationOptions` is the right abstraction because it works outside React (event handlers, tests, utilities), composes via spread at the call site, and uses `getQueryClient()` which handles both SSR (fresh per request) and client (singleton) correctly. See [references/query-abstractions.md](references/query-abstractions.md) for the full rationale. + +### Step 3: Zod Schema (`src/features//schemas/.ts`) + +```tsx +import { z } from 'zod'; + +export const orderSchema = z.object({ + customer: z.string().min(2, 'Customer name must be at least 2 characters'), + status: z.string().min(1, 'Please select a status'), + total: z.number({ message: 'Total is required' }) +}); + +export type OrderFormValues = z.infer; +``` + +### Step 4: Feature Components + +Create `src/features//components/` with: + +**Listing page** (server component — `-listing.tsx`): + +```tsx +import { HydrationBoundary, dehydrate } from '@tanstack/react-query'; +import { getQueryClient } from '@/lib/query-client'; +import { searchParamsCache } from '@/lib/searchparams'; +import { ordersQueryOptions } from '../api/queries'; +import { OrderTable, OrderTableSkeleton } from './orders-table'; +import { Suspense } from 'react'; + +export default function OrderListingPage() { + const page = searchParamsCache.get('page'); + const search = searchParamsCache.get('name'); + const pageLimit = searchParamsCache.get('perPage'); + const sort = searchParamsCache.get('sort'); + + const filters = { + page, + limit: pageLimit, + ...(search && { search }), + ...(sort && { sort }) + }; + + const queryClient = getQueryClient(); + void queryClient.prefetchQuery(ordersQueryOptions(filters)); + + return ( + + }> + + + + ); +} +``` + +**Table + skeleton** (client component — `orders-table/index.tsx`): + +```tsx +'use client'; +import { useSuspenseQuery } from '@tanstack/react-query'; +import { parseAsInteger, parseAsString, useQueryStates } from 'nuqs'; +import { getSortingStateParser } from '@/lib/parsers'; +import { useDataTable } from '@/hooks/use-data-table'; +import { DataTable } from '@/components/ui/table/data-table'; +import { DataTableToolbar } from '@/components/ui/table/data-table-toolbar'; +import { Skeleton } from '@/components/ui/skeleton'; +import { ordersQueryOptions } from '../../api/queries'; +import { columns } from './columns'; + +const columnIds = columns.map((c) => c.id).filter(Boolean) as string[]; + +export function OrderTable() { + const [params] = useQueryStates({ + page: parseAsInteger.withDefault(1), + perPage: parseAsInteger.withDefault(10), + name: parseAsString, + sort: getSortingStateParser(columnIds).withDefault([]) + }); + + const filters = { + page: params.page, + limit: params.perPage, + ...(params.name && { search: params.name }), + ...(params.sort.length > 0 && { sort: JSON.stringify(params.sort) }) + }; + + const { data } = useSuspenseQuery(ordersQueryOptions(filters)); + + const { table } = useDataTable({ + data: data.items, + columns, + pageCount: Math.ceil(data.total_items / params.perPage), + shallow: true, + debounceMs: 500, + initialState: { columnPinning: { right: ['actions'] } } + }); + + return ( + + + + ); +} + +export function OrderTableSkeleton() { + return ( +
+ + +
+ ); +} +``` + +**Column definitions** (`orders-table/columns.tsx`): + +Each column needs `id`, `accessorKey` (or `accessorFn`), `header` with `DataTableColumnHeader`, and optionally `meta` for filtering + `enableColumnFilter: true`. + +```tsx +export const columns: ColumnDef[] = [ + { + id: 'customer', + accessorKey: 'customer', + header: ({ column }) => , + meta: { label: 'Customer', placeholder: 'Search...', variant: 'text', icon: Icons.text }, + enableColumnFilter: true + }, + { + id: 'status', + accessorKey: 'status', + header: ({ column }) => , + cell: ({ cell }) => ( + + {cell.getValue()} + + ), + enableColumnFilter: true, + meta: { label: 'Status', variant: 'multiSelect', options: STATUS_OPTIONS } + }, + { id: 'actions', cell: ({ row }) => } +]; +``` + +Filter `meta.variant` options: `text`, `number`, `range`, `date`, `dateRange`, `select`, `multiSelect`, `boolean`. For multiSelect, provide `options: { value, label, icon? }[]`. + +**Cell actions** (`orders-table/cell-action.tsx`): + +Pattern: `DropdownMenu` with edit/delete items + `AlertModal` for delete confirmation + `useMutation` for the delete API call. + +```tsx +import { deleteOrderMutation } from '../../api/mutations'; + +export const CellAction: React.FC<{ data: Order }> = ({ data }) => { + const [deleteOpen, setDeleteOpen] = useState(false); + const deleteMutation = useMutation({ + ...deleteOrderMutation, + onSuccess: () => { + toast.success('Deleted'); + setDeleteOpen(false); + } + }); + return ( + <> + setDeleteOpen(false)} + onConfirm={() => deleteMutation.mutate(data.id)} + loading={deleteMutation.isPending} + /> + + + + + + Actions + router.push(`/dashboard/orders/${data.id}`)}> + Edit + + setDeleteOpen(true)}> + Delete + + + + + ); +}; +``` + +For **sheet-based editing** (like Users), replace `router.push` with opening a `` — see the Forms section below. + +### Step 5: Page Route (`src/app/dashboard//page.tsx`) + +```tsx +import PageContainer from '@/components/layout/page-container'; +import OrderListingPage from '@/features/orders/components/order-listing'; +import { searchParamsCache } from '@/lib/searchparams'; +import type { SearchParams } from 'nuqs/server'; + +export const metadata = { title: 'Dashboard: Orders' }; +type PageProps = { searchParams: Promise }; + +export default async function Page(props: PageProps) { + const searchParams = await props.searchParams; + searchParamsCache.parse(searchParams); + + return ( + + + + ); +} +``` + +**PageContainer props**: `scrollable`, `pageTitle`, `pageDescription`, `pageHeaderAction` (React node for the top-right button), `infoContent` (help sidebar), `access` + `accessFallback` (RBAC gating). + +**Detail/Edit page** (`src/app/dashboard//[id]/page.tsx`): + +```tsx +import PageContainer from '@/components/layout/page-container'; +import OrderViewPage from '@/features/orders/components/order-view-page'; + +export const metadata = { title: 'Dashboard: Order Details' }; +type PageProps = { params: Promise<{ id: string }> }; + +export default async function Page(props: PageProps) { + const { id } = await props.params; + return ( + + + + ); +} +``` + +**View page component** (client — handles new vs edit): + +```tsx +'use client'; +import { useSuspenseQuery } from '@tanstack/react-query'; +import { notFound } from 'next/navigation'; +import { orderByIdOptions } from '../api/queries'; +import OrderForm from './order-form'; + +export default function OrderViewPage({ orderId }: { orderId: string }) { + if (orderId === 'new') return ; + const { data } = useSuspenseQuery(orderByIdOptions(Number(orderId))); + if (!data) notFound(); + return ; +} +``` + +### Step 6: Search Params (`src/lib/searchparams.ts`) + +Add any new filter keys. Existing params: `page`, `perPage`, `name`, `gender`, `category`, `role`, `sort`. + +### Step 7: Navigation (`src/config/nav-config.ts`) + +```tsx +{ title: 'Orders', url: '/dashboard/orders', icon: 'product', items: [] } +``` + +### Step 8: Icons (`src/components/icons.tsx`) + +To register a new icon, import from `@tabler/icons-react` and add to the `Icons` object: + +```tsx +import { IconShoppingCart } from '@tabler/icons-react'; +export const Icons = { /* ...existing */ cart: IconShoppingCart }; +``` + +Never import `@tabler/icons-react` anywhere else. Always use `Icons.keyName`. + +**Existing icon keys** (partial): `dashboard`, `product`, `kanban`, `chat`, `forms`, `user`, `teams`, `billing`, `settings`, `add`, `edit`, `trash`, `search`, `check`, `close`, `clock`, `ellipsis`, `text`, `calendar`, `upload`, `spinner`, `chevronDown/Left/Right/Up`, `sun`, `moon`, `palette`, `pro`, `workspace`, `notification`. + +--- + +## Forms + +Forms use **TanStack Form + Zod** with `useAppForm` + `useFormFields()` and `useMutation` for submission. See [references/forms-guide.md](references/forms-guide.md) for all field types, validation strategies, multi-step forms, and advanced patterns. + +### Page Form (Create/Edit on a dedicated route) + +The full pattern is shown in Steps 1-4 above. The key structure: + +1. **Schema** — Zod schema + inferred type in `schemas/.ts` +2. **Form component** — `useAppForm({ defaultValues, validators: { onSubmit: schema }, onSubmit })` + `useFormFields()` for typed fields +3. **Mutations** — `useMutation({ ...createOrderMutation, onSuccess: () => { toast(); router.push() } })`, spread shared mutation options from `api/mutations.ts` and layer on UI callbacks +4. **View page** — client component that checks `id === 'new'` for create vs `useSuspenseQuery(byIdOptions)` for edit + +### Sheet Form (Inline create/edit in a side panel) + +For features where a separate page is overkill (like Users). The sheet manages open state; the form uses a `form` attribute to connect to the sheet footer's submit button. + +```tsx +'use client'; +import { Sheet, SheetContent, SheetFooter, SheetHeader, SheetTitle } from '@/components/ui/sheet'; + +export function OrderFormSheet({ + order, + open, + onOpenChange +}: { + order?: Order; + open: boolean; + onOpenChange: (open: boolean) => void; +}) { + const isEdit = !!order; + const mutation = useMutation({ + ...(isEdit ? updateOrderMutation : createOrderMutation), + onSuccess: () => { + onOpenChange(false); + } + }); + const form = useAppForm({ + defaultValues: { customer: order?.customer ?? '' /* ... */ } as OrderFormValues, + validators: { onSubmit: orderSchema }, + onSubmit: async ({ value }) => { + await mutation.mutateAsync(value); + } + }); + const { FormTextField, FormSelectField } = useFormFields(); + + return ( + + + + {isEdit ? 'Edit' : 'New'} Order + +
+ + + + + + +
+ + + + +
+
+ ); +} +``` + +For cell actions, add `const [editOpen, setEditOpen] = useState(false)` and render `` with a ` setEditOpen(true)}>`. For the page header "Add" button, create a trigger component that manages `open` state and renders the sheet. + +**Available field components** from `useFormFields()`: `FormTextField`, `FormTextareaField`, `FormSelectField`, `FormCheckboxField`, `FormSwitchField`, `FormRadioGroupField`, `FormSliderField`, `FormFileUploadField`. + +--- + +## Data Fetching with React Query + +The pattern is: server prefetch → HydrationBoundary → client useSuspenseQuery. + +1. **Server**: `void queryClient.prefetchQuery(options)` — fire-and-forget during SSR streaming +2. **Client**: `useSuspenseQuery(options)` — picks up dehydrated data, suspends until resolved +3. **HydrationBoundary + dehydrate**: bridges server cache → client cache +4. **Suspense fallback**: skeleton shown while data streams + +**Why `useSuspenseQuery` not `useQuery`:** `useQuery` doesn't integrate with Suspense — it shows loading even when data is prefetched. `useSuspenseQuery` picks up the dehydrated pending query. Once cached (within `staleTime: 60s`), subsequent visits are instant. + +**Mutations** use `mutationOptions` + `getQueryClient()` in `mutations.ts`, composed via spread at the call site: + +```tsx +// In mutations.ts — shared config +export const createOrderMutation = mutationOptions({ + mutationFn: (data) => createOrder(data), + onSuccess: () => { + getQueryClient().invalidateQueries({ queryKey: orderKeys.all }); + } +}); + +// In component — spread + layer UI callbacks +const mutation = useMutation({ + ...createOrderMutation, + onSuccess: () => toast.success('Created') +}); +``` + +See [references/query-abstractions.md](references/query-abstractions.md) for why `mutationOptions`/`queryOptions` are the right abstraction over custom hooks. + +--- + +## Navigation & RBAC + +Configure in `src/config/nav-config.ts`. Items are filtered client-side in `src/hooks/use-nav.ts` using Clerk. + +**Access control properties** on nav items: + +- `requireOrg: boolean` — requires active Clerk organization +- `permission: string` — requires specific Clerk permission +- `role: string` — requires specific Clerk role +- `plan: string` — requires subscription plan (server-side) +- `feature: string` — requires feature flag (server-side) + +Items without `access` are visible to everyone. All client-side checks are synchronous — no loading states. + +--- + +## Themes + +See [references/theming-guide.md](references/theming-guide.md) for the complete guide. Quick steps: + +1. Create `src/styles/themes/.css` with OKLCH color tokens + `@theme inline` block +2. Import in `src/styles/theme.css` +3. Register in `THEMES` array in `src/components/themes/theme.config.ts` +4. (Optional) Add Google Fonts in `src/components/themes/font.config.ts` + +--- + +## Code Conventions + +- **`cn()`** for class merging — never concatenate className strings +- **Server components by default** — only add `'use client'` when needed +- **React Query** — `void prefetchQuery()` on server + `useSuspenseQuery` on client +- **API layer** — `types.ts` → `service.ts` → `queries.ts` → `mutations.ts` per feature; `queryOptions`/`mutationOptions` as base abstractions (not custom hooks); `getQueryClient()` in mutations (not `useQueryClient()`); key factories (`entityKeys.all/list/detail`); components never import mock APIs directly +- **nuqs** — `searchParamsCache` on server, `useQueryStates` on client with `shallow: true` +- **Icons** — only from `@/components/icons`, never from `@tabler/icons-react` directly +- **Forms** — `useAppForm` + `useFormFields()` from `@/components/ui/tanstack-form` +- **Page headers** — `PageContainer` props, never import `` manually +- **Sort parser** — use `getSortingStateParser` from `@/lib/parsers` (same parser as `useDataTable`) +- **Formatting** — single quotes, JSX single quotes, no trailing comma, 2-space indent diff --git a/.agents/skills/kiranism-shadcn-dashboard/references/charts-guide.md b/.agents/skills/kiranism-shadcn-dashboard/references/charts-guide.md new file mode 100644 index 0000000..2c141cd --- /dev/null +++ b/.agents/skills/kiranism-shadcn-dashboard/references/charts-guide.md @@ -0,0 +1,420 @@ +# Charts & Analytics Guide + +## Table of Contents + +1. [Overview Architecture](#overview-architecture) +2. [Parallel Routes Pattern](#parallel-routes-pattern) +3. [Chart Components](#chart-components) +4. [Stats Cards](#stats-cards) +5. [Skeleton Loading](#skeleton-loading) +6. [Adding a New Chart Section](#adding-a-new-chart-section) + +--- + +## Overview Architecture + +The analytics dashboard at `/dashboard/overview` uses **Next.js parallel routes** to load multiple chart sections independently. Each chart slot streams in as its data becomes ready — no waterfall, no blocking. + +**File structure:** + +``` +src/app/dashboard/overview/ +├── layout.tsx # Composes all slots into a grid +├── @area_stats/ +│ ├── page.tsx # Async server component (fetches data) +│ ├── loading.tsx # Skeleton shown while streaming +│ └── error.tsx # Error boundary if fetch fails +├── @bar_stats/ +│ ├── page.tsx +│ ├── loading.tsx +│ └── error.tsx +├── @pie_stats/ +│ ├── page.tsx +│ ├── loading.tsx +│ └── error.tsx +└── @sales/ + ├── page.tsx + ├── loading.tsx + └── error.tsx + +src/features/overview/components/ +├── area-graph.tsx # Client chart component +├── area-graph-skeleton.tsx # Matching skeleton +├── bar-graph.tsx +├── bar-graph-skeleton.tsx +├── pie-graph.tsx +├── pie-graph-skeleton.tsx +├── recent-sales.tsx +└── recent-sales-skeleton.tsx +``` + +--- + +## Parallel Routes Pattern + +### Layout (`layout.tsx`) + +The layout receives each parallel route as a prop and arranges them in a grid: + +```tsx +export default function OverviewLayout({ + sales, + pie_stats, + bar_stats, + area_stats +}: { + sales: React.ReactNode; + pie_stats: React.ReactNode; + bar_stats: React.ReactNode; + area_stats: React.ReactNode; +}) { + return ( + + {/* Stats cards row */} +
+ + + Total Revenue + + + +
$45,231.89
+

+20.1% from last month

+
+
+ {/* ...more stat cards */} +
+ + {/* Charts grid — each slot loads independently */} +
+
{area_stats}
+
{sales}
+
{bar_stats}
+
{pie_stats}
+
+
+ ); +} +``` + +### Slot Page (`@area_stats/page.tsx`) + +Each slot is an async server component that fetches data then renders the chart: + +```tsx +import { delay } from '@/constants/mock-api'; +import { AreaGraph } from '@/features/overview/components/area-graph'; + +export default async function AreaStatsPage() { + await delay(2000); // Simulates API fetch + return ; +} +``` + +### Slot Loading (`@area_stats/loading.tsx`) + +```tsx +import { AreaGraphSkeleton } from '@/features/overview/components/area-graph-skeleton'; + +export default function Loading() { + return ; +} +``` + +### Slot Error (`@area_stats/error.tsx`) + +```tsx +'use client'; +import { Alert, AlertDescription, AlertTitle } from '@/components/ui/alert'; +import { Icons } from '@/components/icons'; + +export default function AreaStatsError({ error }: { error: Error }) { + return ( + + + Error + Failed to load area stats: {error.message} + + ); +} +``` + +Each slot can fail independently without affecting others. + +--- + +## Chart Components + +All chart components are `'use client'` and use **Recharts** wrapped in shadcn's `ChartContainer`. + +### Chart Config + +Every chart defines a config object mapping data keys to labels and theme colors: + +```tsx +import { + type ChartConfig, + ChartContainer, + ChartTooltip, + ChartTooltipContent +} from '@/components/ui/chart'; + +const chartConfig = { + desktop: { label: 'Desktop', color: 'var(--chart-1)' }, + mobile: { label: 'Mobile', color: 'var(--chart-2)' } +} satisfies ChartConfig; +``` + +Theme colors `--chart-1` through `--chart-5` are defined in each theme's CSS file and automatically adapt to light/dark mode. + +### Area Chart Example + +```tsx +'use client'; +import { Area, AreaChart, CartesianGrid, XAxis } from 'recharts'; +import { + type ChartConfig, + ChartContainer, + ChartTooltip, + ChartTooltipContent +} from '@/components/ui/chart'; +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; +import { Badge } from '@/components/ui/badge'; +import { Icons } from '@/components/icons'; + +const chartData = [ + { month: 'January', desktop: 186, mobile: 80 }, + { month: 'February', desktop: 305, mobile: 200 } + // ...more months +]; + +const chartConfig = { + desktop: { label: 'Desktop', color: 'var(--chart-1)' }, + mobile: { label: 'Mobile', color: 'var(--chart-2)' } +} satisfies ChartConfig; + +export function AreaGraph() { + return ( + + + Area Chart - Stacked + + +12.5% + + + + + + + value.slice(0, 3)} + /> + } /> + + + + + + + ); +} +``` + +### Bar Chart Pattern + +Same structure, using `BarChart` + `Bar`: + +```tsx + + + + + } /> + + + + +``` + +### Pie/Donut Chart Pattern + +```tsx + + + } /> + + + + + +``` + +--- + +## Stats Cards + +Stats cards are simple server-rendered `Card` components at the top of the layout — no parallel routes needed since they render instantly: + +```tsx + + + Total Revenue + + + +
$45,231.89
+

+20.1% from last month

+
+
+``` + +For dynamic stats that need data fetching, wrap in their own Suspense boundary or parallel route slot. + +--- + +## Skeleton Loading + +Each chart has a matching skeleton component. Pattern: + +```tsx +import { Card, CardContent, CardHeader } from '@/components/ui/card'; +import { Skeleton } from '@/components/ui/skeleton'; + +export function AreaGraphSkeleton() { + return ( + + + + + + + + + + ); +} +``` + +Match the skeleton dimensions to the actual chart for smooth visual transitions. + +--- + +## Adding a New Chart Section + +To add a new chart (e.g., line chart for user growth): + +### 1. Create the chart component + +`src/features/overview/components/line-graph.tsx`: + +```tsx +'use client'; +import { Line, LineChart, CartesianGrid, XAxis } from 'recharts'; +import { + type ChartConfig, + ChartContainer, + ChartTooltip, + ChartTooltipContent +} from '@/components/ui/chart'; + +const chartConfig = { + users: { label: 'Users', color: 'var(--chart-3)' } +} satisfies ChartConfig; + +const chartData = [ + /* monthly user data */ +]; + +export function LineGraph() { + return ( + + + User Growth + + + + + + + } /> + + + + + + ); +} +``` + +### 2. Create matching skeleton + +`src/features/overview/components/line-graph-skeleton.tsx` + +### 3. Create parallel route slot + +``` +src/app/dashboard/overview/@line_stats/ +├── page.tsx → async, fetches data, returns +├── loading.tsx → returns +├── error.tsx → error alert +└── default.tsx → return null (fallback when route doesn't match) +``` + +`default.tsx` is required for parallel routes — return `null` or a fallback: + +```tsx +export default function Default() { + return null; +} +``` + +### 4. Add slot to layout + +Update `src/app/dashboard/overview/layout.tsx`: + +```tsx +export default function OverviewLayout({ + sales, + pie_stats, + bar_stats, + area_stats, + line_stats // ← add new slot +}: { + /* ...types */ +}) { + return ( +
+ {/* existing charts */} +
{line_stats}
+
+ ); +} +``` + +### Available Recharts Components + +Common chart types to use with `ChartContainer`: + +- `AreaChart` + `Area` — filled area charts (stacked or standalone) +- `BarChart` + `Bar` — vertical/horizontal bars +- `LineChart` + `Line` — line/trend charts +- `PieChart` + `Pie` — pie/donut charts +- `RadarChart` + `Radar` — radar/spider charts +- `RadialBarChart` + `RadialBar` — radial progress bars + +All support `ChartTooltip`, `ChartLegend`, and theme-aware colors via `var(--chart-N)`. diff --git a/.agents/skills/kiranism-shadcn-dashboard/references/forms-guide.md b/.agents/skills/kiranism-shadcn-dashboard/references/forms-guide.md new file mode 100644 index 0000000..b0d444a --- /dev/null +++ b/.agents/skills/kiranism-shadcn-dashboard/references/forms-guide.md @@ -0,0 +1,304 @@ +# Forms Guide + +## Table of Contents + +1. [Architecture](#architecture) +2. [Field Types](#field-types) +3. [Usage Patterns](#usage-patterns) +4. [Validation Strategies](#validation-strategies) +5. [Sheet/Dialog Forms](#sheetdialog-forms) +6. [Multi-Step Forms](#multi-step-forms) +7. [Advanced Patterns](#advanced-patterns) + +--- + +## Architecture + +The form system is built on **TanStack Form + Zod** with a composable field layer. + +**Key files:** + +- `src/components/ui/tanstack-form.tsx` — exports `useAppForm`, `useFormFields()`, composed fields +- `src/components/ui/form-context.tsx` — contexts, `createFormField`, structural components +- `src/components/forms/fields/*.tsx` — 8 field type implementations + +**Key exports:** + +```tsx +import { useAppForm, useFormFields } from '@/components/ui/tanstack-form'; +``` + +- `useAppForm(config)` — creates a form instance with `defaultValues`, `validators`, `onSubmit` +- `useFormFields()` — returns all 8 typed field components with name autocomplete from `T` +- `form.AppForm` — context provider wrapper +- `form.Form` — `
` element that handles submit +- `form.SubmitButton` — auto-disabled when form is invalid or submitting +- `form.AppField` — low-level render prop for custom fields + +--- + +## Field Types + +All fields accept: `name`, `label`, `description`, `required`, `disabled`, `validators`, `listeners`, `className`. + +| Component | Props | Notes | +| --------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------- | +| `FormTextField` | `type` (text/email/number/password/tel/url), `placeholder`, `min`, `max`, `step`, `maxLength` | For numbers use `type='number'` | +| `FormTextareaField` | `placeholder`, `rows`, `maxLength` | Multiline text | +| `FormSelectField` | `options: {value, label}[]`, `placeholder` | Single select dropdown | +| `FormCheckboxField` | `options?: {value, label}[]` | Single checkbox or multi-checkbox group | +| `FormSwitchField` | — | Toggle switch | +| `FormRadioGroupField` | `options: {value, label}[]`, `orientation` | Radio button group | +| `FormSliderField` | `min`, `max`, `step` | Range slider | +| `FormFileUploadField` | `maxSize`, `maxFiles`, `accept` | Drag-and-drop with preview | + +--- + +## Usage Patterns + +### Pattern 1: `useFormFields()` (Recommended) + +Type-safe field components with name autocomplete: + +```tsx +const { FormTextField, FormSelectField } = useFormFields(); + + + + +``` + +### Pattern 2: `form.AppField` render prop + +Full control for custom field rendering: + +```tsx + + {(field) => ( + + + + + + + )} + +``` + +### Pattern 3: Direct import (no type safety) + +For quick prototyping: + +```tsx +import { FormTextField } from '@/components/ui/tanstack-form'; +; +``` + +--- + +## Validation Strategies + +### Field-level (recommended for UX) + +```tsx + +``` + +### Form-level (catch-all on submit) + +```tsx +const form = useAppForm({ + validators: { onSubmit: orderSchema }, // Validates entire form on submit + onSubmit: async ({ value }) => { + /* ... */ + } +}); +``` + +### Async validation (server-side checks) + +```tsx + { + const exists = await checkUsername(value); + return exists ? 'Username taken' : undefined; + } + }} + asyncDebounceMs={500} +/> +``` + +### Linked field validation + +For dependent fields (e.g., confirm password): + +```tsx + { + const password = fieldApi.form.getFieldValue('password'); + return value !== password ? 'Passwords must match' : undefined; + } + }} +/> +``` + +--- + +## Sheet/Dialog Forms + +The key pattern for forms inside sheets or dialogs: give the `` an `id`, and use that `id` on the submit button's `form` attribute. This allows the submit button to live outside the form element (e.g., in `SheetFooter`). + +```tsx + + + {/* fields */} + +; + +{ + /* In SheetFooter — button is outside the but still submits it */ +} + + +; +``` + +On success, call `onOpenChange(false)` to close the sheet and `form.reset()` for create forms. + +--- + +## Multi-Step Forms + +Use `withFieldGroup` + `useAppForm` with `StepButton`: + +```tsx +// Define field groups for each step +const Step1 = withFieldGroup({ + fields: ['name', 'email'], + render: ({ form }) => { + const { FormTextField } = useFormFields(); + return ( + <> + + + + + ); + } +}); + +const Step2 = withFieldGroup({ + fields: ['address', 'city'], + render: ({ form }) => { + const { FormTextField } = useFormFields(); + return ( + <> + + + + + + ); + } +}); +``` + +Use the `useStepper` hook from `src/hooks/use-stepper.tsx` to manage step state. + +--- + +## Advanced Patterns + +### Nested objects (dot notation) + +```tsx + + +``` + +### Dynamic array rows + +```tsx + + {(field) => ( + <> + {field.state.value.map((_, i) => ( + + {(subField) => } + + ))} + + + )} + +``` + +### Side effects with listeners + +```tsx + { + // Reset city when country changes + form.setFieldValue('city', ''); + } + }} +/> +``` + +### Custom field with `form.AppField` + +For fields not covered by the built-in 8 types: + +```tsx + + {(field) => ( + + + + field.handleChange(e.target.value)} + /> + + + + )} + +``` + +### Form-level errors + +Display errors that apply to the whole form (e.g., server errors): + +```tsx +import { FormErrors } from '@/components/ui/form-context'; + + + + {/* Renders form-level validation errors */} + {/* fields... */} + +; +``` diff --git a/.agents/skills/kiranism-shadcn-dashboard/references/mock-api-guide.md b/.agents/skills/kiranism-shadcn-dashboard/references/mock-api-guide.md new file mode 100644 index 0000000..ec48133 --- /dev/null +++ b/.agents/skills/kiranism-shadcn-dashboard/references/mock-api-guide.md @@ -0,0 +1,255 @@ +# Mock API Guide + +## Table of Contents + +1. [Structure](#structure) +2. [Full Template](#full-template) +3. [Key Patterns](#key-patterns) +4. [Integrating with React Query](#integrating-with-react-query) + +--- + +## Structure + +Each mock API file lives in `src/constants/mock-api-.ts` and is a self-contained in-memory database. It uses: + +- **faker** for generating sample data +- **match-sorter** for fuzzy search across fields +- **delay** (from `./mock-api`) to simulate network latency + +The `delay` function is exported from `src/constants/mock-api.ts`: + +```tsx +export async function delay(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)); +} +``` + +--- + +## Full Template + +```tsx +import { faker } from '@faker-js/faker'; +import { matchSorter } from 'match-sorter'; +import { delay } from './mock-api'; + +// 1. Define the entity type +export type Order = { + id: number; + customer: string; + email: string; + status: string; + total: number; + created_at: string; + updated_at: string; +}; + +// 2. Create the fake database object +export const fakeOrders = { + records: [] as Order[], + + // 3. Initialize with faker data + initialize() { + const statuses = ['pending', 'processing', 'completed', 'cancelled']; + for (let i = 1; i <= 20; i++) { + this.records.push({ + id: i, + customer: faker.person.fullName(), + email: faker.internet.email(), + status: faker.helpers.arrayElement(statuses), + total: parseFloat(faker.commerce.price({ min: 10, max: 500 })), + created_at: faker.date.between({ from: '2023-01-01', to: Date.now() }).toISOString(), + updated_at: faker.date.recent().toISOString() + }); + } + }, + + // 4. Get all with optional search (used internally) + async getAll({ search }: { search?: string } = {}) { + let items = [...this.records]; + if (search) { + items = matchSorter(items, search, { + keys: ['customer', 'email'] + }); + } + return items; + }, + + // 5. Paginated list with filtering and sorting + async getOrders(params: { + page?: number; + limit?: number; + search?: string; + statuses?: string; + sort?: string; + }) { + await delay(800); + const { page = 1, limit = 10, search, statuses, sort } = params; + + let items = await this.getAll({ search }); + + // Filter by comma-separated values + if (statuses) { + const statusList = statuses.split('.'); + items = items.filter((item) => statusList.includes(item.status)); + } + + // Sort by column + if (sort) { + const parsedSort = JSON.parse(sort) as { id: string; desc: boolean }[]; + if (parsedSort.length > 0) { + const { id, desc } = parsedSort[0]; + items.sort((a, b) => { + const aVal = a[id as keyof Order]; + const bVal = b[id as keyof Order]; + if (aVal < bVal) return desc ? 1 : -1; + if (aVal > bVal) return desc ? -1 : 1; + return 0; + }); + } + } + + // Paginate + const total_items = items.length; + items = items.slice((page - 1) * limit, page * limit); + + return { items, total_items }; + }, + + // 6. Get single record by ID + async getOrderById(id: number) { + await delay(800); + return this.records.find((r) => r.id === id) || null; + }, + + // 7. Create + async createOrder(data: Omit) { + await delay(800); + const newRecord: Order = { + ...data, + id: this.records.length + 1, + created_at: new Date().toISOString(), + updated_at: new Date().toISOString() + }; + this.records.push(newRecord); + return newRecord; + }, + + // 8. Update + async updateOrder(id: number, data: Partial) { + await delay(800); + const idx = this.records.findIndex((r) => r.id === id); + if (idx === -1) return null; + this.records[idx] = { + ...this.records[idx], + ...data, + updated_at: new Date().toISOString() + }; + return this.records[idx]; + }, + + // 9. Delete + async deleteOrder(id: number) { + await delay(800); + this.records = this.records.filter((r) => r.id !== id); + return true; + } +}; + +// 10. Auto-initialize on import +fakeOrders.initialize(); +``` + +--- + +## Key Patterns + +### Search with match-sorter + +Always specify which fields to search across: + +```tsx +matchSorter(items, search, { keys: ['customer', 'email', 'status'] }); +``` + +### Comma-separated filter values + +For multi-select filters (roles, statuses), the URL param uses `.` as delimiter: + +```tsx +if (statuses) { + const list = statuses.split('.'); + items = items.filter((item) => list.includes(item.status)); +} +``` + +### Computed column sorting + +When a table has a computed column (e.g., combining first_name + last_name into "name"), handle it in the sort logic: + +```tsx +if (id === 'name') { + const aName = `${a.first_name} ${a.last_name}`; + const bName = `${b.first_name} ${b.last_name}`; + return desc ? bName.localeCompare(aName) : aName.localeCompare(bName); +} +``` + +### Return shape + +List methods must return `{ items, total_items }` (or `{ products, total }` etc. — match the query option expectations). The total is the count **before** pagination, used for `pageCount` calculation. + +--- + +## Integrating with the API Layer + +The mock API is only imported in `service.ts`. Queries and components import from the service and types files: + +``` +mock-api-orders.ts → api/service.ts → api/queries.ts → components +(data source) (data access) (key factory + (useSuspenseQuery + queryOptions) + useMutation) +``` + +**service.ts** imports from the mock API: + +```tsx +import { fakeOrders } from '@/constants/mock-api-orders'; +import type { OrderFilters, OrdersResponse } from './types'; + +export async function getOrders(filters: OrderFilters): Promise { + return fakeOrders.getOrders(filters); +} +export async function createOrder(data: OrderMutationPayload) { + return fakeOrders.createOrder(data); +} +``` + +**queries.ts** imports from service, uses key factories: + +```tsx +import { getOrders } from './service'; +import type { OrderFilters } from './types'; + +export const orderKeys = { + all: ['orders'] as const, + list: (filters: OrderFilters) => [...orderKeys.all, 'list', filters] as const, + detail: (id: number) => [...orderKeys.all, 'detail', id] as const +}; + +export const ordersQueryOptions = (filters: OrderFilters) => + queryOptions({ queryKey: orderKeys.list(filters), queryFn: () => getOrders(filters) }); +``` + +**Mutations** in components use service functions + key factories: + +```tsx +import { createOrder } from '../api/service'; +import { orderKeys } from '../api/queries'; + +const mutation = useMutation({ + mutationFn: (data) => createOrder(data), + onSuccess: () => queryClient.invalidateQueries({ queryKey: orderKeys.all }) +}); +``` diff --git a/.agents/skills/kiranism-shadcn-dashboard/references/query-abstractions.md b/.agents/skills/kiranism-shadcn-dashboard/references/query-abstractions.md new file mode 100644 index 0000000..0554b56 --- /dev/null +++ b/.agents/skills/kiranism-shadcn-dashboard/references/query-abstractions.md @@ -0,0 +1,153 @@ +# TanStack Query Abstractions (v5) + +The core insight: **`queryOptions` and `mutationOptions` are the right abstraction — not custom hooks.** + +--- + +## Query Abstraction + +### The Pattern + +```ts +// queries/invoice.ts +import { queryOptions } from '@tanstack/react-query'; + +export function invoiceOptions(id: number) { + return queryOptions({ + queryKey: ['invoice', id], + queryFn: () => fetchInvoice(id) + }); +} + +export function invoiceListOptions(filters: InvoiceFilters) { + return queryOptions({ + queryKey: ['invoices', filters], + queryFn: () => fetchInvoices(filters), + staleTime: 30_000 + }); +} +``` + +### Usage — always compose at the call site + +```ts +// basic +const { data } = useQuery(invoiceOptions(id)); + +// with suspense — same options, different hook +const { data } = useSuspenseQuery(invoiceOptions(id)); + +// with extra options spread on top — full type inference, no TS pain +const { data } = useQuery({ + ...invoiceOptions(id), + select: (invoice) => invoice.createdAt, // data infers as string | undefined + enabled: !!id +}); + +// prefetch in a route loader (works outside React — this is why hooks are wrong) +await queryClient.prefetchQuery(invoiceOptions(id)); + +// read from cache imperatively — queryKey is typed via DataTag symbol +const invoice = queryClient.getQueryData(invoiceOptions(id).queryKey); + +// invalidate +queryClient.invalidateQueries({ queryKey: invoiceOptions(id).queryKey }); +``` + +### Why NOT a custom hook + +Custom hooks like `useInvoice(id)` have three critical problems: + +1. **Hooks only work in components/hooks** — but queries are now used in route loaders, server prefetching, event handlers, and server components. `queryOptions` is just a plain function — works anywhere. +2. **They share logic, not configuration** — what you actually want to share is the `queryKey` + `queryFn` config. Hooks are the wrong primitive for that. +3. **They lock you to one hook** — you can't use `useInvoice()` with `useSuspenseQuery`, `useQueries`, or imperative `queryClient` methods. + +### Why NOT `UseQueryOptions` type directly + +```ts +// BAD — data becomes unknown +function useInvoice(id: number, options?: Partial) { ... } + +// STILL BAD — select breaks with TS error +function useInvoice(id: number, options?: Partial>) { ... } +// select: (invoice) => invoice.createdAt +// Error: Type 'string' is not assignable to type 'Invoice' +``` + +`queryOptions` solves this via a `DataTag` symbol on the queryKey — full inference, zero manual generics. + +### Custom hooks are still fine on top + +If a component always uses the same composition, a hook is fine — but build it _on top of_ `queryOptions`: + +```ts +// OK — hook built on queryOptions +function useInvoice(id: number) { + return useQuery(invoiceOptions(id)); +} + +// OK — hook that adds per-feature defaults +function useInvoiceWithSuspense(id: number) { + return useSuspenseQuery(invoiceOptions(id)); +} +``` + +--- + +## Mutation Abstraction + +### The Pattern + +```ts +// mutations/invoice.ts +import { mutationOptions } from '@tanstack/react-query'; +import { getQueryClient } from '@/lib/query-client'; + +export const createInvoiceMutation = mutationOptions({ + mutationFn: (data: CreateInvoiceInput) => createInvoice(data), + onSuccess: () => { + getQueryClient().invalidateQueries({ queryKey: ['invoices'] }); + } +}); + +export const updateInvoiceMutation = mutationOptions({ + mutationFn: ({ id, ...data }: UpdateInvoiceInput) => updateInvoice(id, data), + onSuccess: (updated) => { + const qc = getQueryClient(); + qc.setQueryData(invoiceOptions(updated.id).queryKey, updated); + qc.invalidateQueries({ queryKey: ['invoices'] }); + } +}); +``` + +> **Note on queryClient**: Import `getQueryClient()` directly — do NOT pass `queryClient` as a function argument. The `getQueryClient()` pattern handles both SSR (fresh per request) and client (singleton) correctly. + +### Usage + +```ts +// basic +const { mutate } = useMutation(createInvoiceMutation); + +// composed — add per-usage callbacks on top +const { mutate } = useMutation({ + ...createInvoiceMutation, + onError: (err) => toast.error(err.message), + onSuccess: (data) => { + // this runs AFTER the shared onSuccess above + router.push(`/invoices/${data.id}`); + } +}); +``` + +--- + +## Rules Summary + +| Rule | Reason | +| ------------------------------------------------------------- | ---------------------------------------------------------------------- | +| Use `queryOptions()` not custom hooks as the base abstraction | Works everywhere — loaders, server, imperative calls | +| Keep options factories lean — no extra config params | Best abstractions are not configurable | +| Compose extra options at the call site via spread | Full TS inference without manual generics | +| Import `getQueryClient()` in mutation files | Handles SSR/client correctly without prop drilling | +| Co-locate `queryKey` inside `queryOptions` | Typed key reuse in `invalidateQueries`, `setQueryData`, `getQueryData` | +| Custom hooks are fine — but built ON TOP of `queryOptions` | Hooks for component convenience, `queryOptions` for sharing config | diff --git a/.agents/skills/kiranism-shadcn-dashboard/references/theming-guide.md b/.agents/skills/kiranism-shadcn-dashboard/references/theming-guide.md new file mode 100644 index 0000000..90e76bc --- /dev/null +++ b/.agents/skills/kiranism-shadcn-dashboard/references/theming-guide.md @@ -0,0 +1,180 @@ +# Theme Creation Guide + +## Table of Contents + +1. [Create Theme CSS](#1-create-theme-css) +2. [Import Theme](#2-import-theme) +3. [Register Theme](#3-register-theme) +4. [Add Custom Fonts](#4-add-custom-fonts-optional) +5. [Set as Default](#5-set-as-default-optional) +6. [Required Tokens](#required-tokens) +7. [Color Format Reference](#color-format-reference) + +--- + +## 1. Create Theme CSS + +Create `src/styles/themes/.css`: + +```css +/* Light mode */ +[data-theme='your-theme'] { + --background: oklch(1 0 0); + --foreground: oklch(0.145 0 0); + --card: oklch(...); + --card-foreground: oklch(...); + --popover: oklch(...); + --popover-foreground: oklch(...); + --primary: oklch(...); + --primary-foreground: oklch(...); + --secondary: oklch(...); + --secondary-foreground: oklch(...); + --muted: oklch(...); + --muted-foreground: oklch(...); + --accent: oklch(...); + --accent-foreground: oklch(...); + --destructive: oklch(...); + --destructive-foreground: oklch(...); + --border: oklch(...); + --input: oklch(...); + --ring: oklch(...); + --chart-1: oklch(...); + --chart-2: oklch(...); + --chart-3: oklch(...); + --chart-4: oklch(...); + --chart-5: oklch(...); + --sidebar: oklch(...); + --sidebar-foreground: oklch(...); + --sidebar-primary: oklch(...); + --sidebar-primary-foreground: oklch(...); + --sidebar-accent: oklch(...); + --sidebar-accent-foreground: oklch(...); + --sidebar-border: oklch(...); + --sidebar-ring: oklch(...); + --font-sans: 'Font Name', sans-serif; + --font-mono: 'Mono Font', monospace; + --radius: 0.5rem; + --spacing: 0.25rem; +} + +/* Dark mode */ +[data-theme='your-theme'].dark { + --background: oklch(0.145 0 0); + --foreground: oklch(0.985 0 0); + /* ... all tokens with dark values */ +} + +/* Tailwind integration (required) */ +[data-theme='your-theme'] { + @theme inline { + --color-background: var(--background); + --color-foreground: var(--foreground); + --color-card: var(--card); + --color-card-foreground: var(--card-foreground); + --color-popover: var(--popover); + --color-popover-foreground: var(--popover-foreground); + --color-primary: var(--primary); + --color-primary-foreground: var(--primary-foreground); + --color-secondary: var(--secondary); + --color-secondary-foreground: var(--secondary-foreground); + --color-muted: var(--muted); + --color-muted-foreground: var(--muted-foreground); + --color-accent: var(--accent); + --color-accent-foreground: var(--accent-foreground); + --color-destructive: var(--destructive); + --color-destructive-foreground: var(--destructive-foreground); + --color-border: var(--border); + --color-input: var(--input); + --color-ring: var(--ring); + --color-chart-1: var(--chart-1); + --color-chart-2: var(--chart-2); + --color-chart-3: var(--chart-3); + --color-chart-4: var(--chart-4); + --color-chart-5: var(--chart-5); + --color-sidebar: var(--sidebar); + --color-sidebar-foreground: var(--sidebar-foreground); + --color-sidebar-primary: var(--sidebar-primary); + --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); + --color-sidebar-accent: var(--sidebar-accent); + --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); + --color-sidebar-border: var(--sidebar-border); + --color-sidebar-ring: var(--sidebar-ring); + --font-sans: var(--font-sans); + --font-mono: var(--font-mono); + --font-serif: var(--font-serif); + --radius-sm: calc(var(--radius) - 4px); + --radius-md: calc(var(--radius) - 2px); + --radius-lg: var(--radius); + --radius-xl: calc(var(--radius) + 4px); + } +} +``` + +## 2. Import Theme + +Add to `src/styles/theme.css`: + +```css +@import './themes/your-theme.css'; +``` + +## 3. Register Theme + +Add to `THEMES` array in `src/components/themes/theme.config.ts`: + +```typescript +{ name: 'Your Theme', value: 'your-theme' } +``` + +The `value` must exactly match the `data-theme` attribute in your CSS. + +## 4. Add Custom Fonts (Optional) + +Only if using a Google Font not already loaded. + +In `src/components/themes/font.config.ts`: + +```typescript +import { Your_Font } from 'next/font/google'; + +const fontYourName = Your_Font({ + subsets: ['latin'], + weight: ['400', '500', '700'], + variable: '--font-your-name' +}); + +export const fontVariables = cn( + // ... existing fonts + fontYourName.variable +); +``` + +In your theme CSS, use the font's **display name** (not the CSS variable): + +```css +--font-sans: 'Your Font', sans-serif; +``` + +## 5. Set as Default (Optional) + +In `src/components/themes/theme.config.ts`: + +```typescript +export const DEFAULT_THEME = 'your-theme'; +``` + +## Required Tokens + +Minimum required: `--background`, `--foreground`, `--card` & `--card-foreground`, `--popover` & `--popover-foreground`, `--primary` & `--primary-foreground`, `--secondary` & `--secondary-foreground`, `--muted` & `--muted-foreground`, `--accent` & `--accent-foreground`, `--destructive` & `--destructive-foreground`, `--border`, `--input`, `--ring`, `--radius`. + +Optional: `--chart-*`, `--sidebar-*`, `--font-*`, `--shadow-*`, `--tracking-normal`, `--spacing`. + +## Color Format Reference + +OKLCH: `oklch(lightness chroma hue)` + +- Lightness: 0-1 (0=black, 1=white) +- Chroma: 0+ (0=gray, higher=saturated) +- Hue: 0-360 (0=red, 120=green, 240=blue) + +See `src/styles/themes/claude.css` for a complete example. diff --git a/.agents/skills/next-best-practices/SKILL.md b/.agents/skills/next-best-practices/SKILL.md new file mode 100644 index 0000000..3d5e686 --- /dev/null +++ b/.agents/skills/next-best-practices/SKILL.md @@ -0,0 +1,171 @@ +--- +name: next-best-practices +description: Next.js best practices - file conventions, RSC boundaries, data patterns, async APIs, metadata, error handling, route handlers, image/font optimization, bundling +user-invocable: false +--- + +# Next.js Best Practices + +Apply these rules when writing or reviewing Next.js code. + +## File Conventions + +See [file-conventions.md](./file-conventions.md) for: + +- Project structure and special files +- Route segments (dynamic, catch-all, groups) +- Parallel and intercepting routes +- Middleware rename in v16 (middleware → proxy) + +## RSC Boundaries + +Detect invalid React Server Component patterns. + +See [rsc-boundaries.md](./rsc-boundaries.md) for: + +- Async client component detection (invalid) +- Non-serializable props detection +- Server Action exceptions + +## Async Patterns + +Next.js 15+ async API changes. + +See [async-patterns.md](./async-patterns.md) for: + +- Async `params` and `searchParams` +- Async `cookies()` and `headers()` +- Migration codemod + +## Runtime Selection + +See [runtime-selection.md](./runtime-selection.md) for: + +- Default to Node.js runtime +- When Edge runtime is appropriate + +## Directives + +See [directives.md](./directives.md) for: + +- `'use client'`, `'use server'` (React) +- `'use cache'` (Next.js) + +## Functions + +See [functions.md](./functions.md) for: + +- Navigation hooks: `useRouter`, `usePathname`, `useSearchParams`, `useParams` +- Server functions: `cookies`, `headers`, `draftMode`, `after` +- Generate functions: `generateStaticParams`, `generateMetadata` + +## Error Handling + +See [error-handling.md](./error-handling.md) for: + +- `error.tsx`, `global-error.tsx`, `not-found.tsx` +- `redirect`, `permanentRedirect`, `notFound` +- `forbidden`, `unauthorized` (auth errors) +- `unstable_rethrow` for catch blocks + +## Data Patterns + +See [data-patterns.md](./data-patterns.md) for: + +- Server Components vs Server Actions vs Route Handlers +- Avoiding data waterfalls (`Promise.all`, Suspense, preload) +- Client component data fetching + +## Route Handlers + +See [route-handlers.md](./route-handlers.md) for: + +- `route.ts` basics +- GET handler conflicts with `page.tsx` +- Environment behavior (no React DOM) +- When to use vs Server Actions + +## Metadata & OG Images + +See [metadata.md](./metadata.md) for: + +- Static and dynamic metadata +- `generateMetadata` function +- OG image generation with `next/og` +- File-based metadata conventions + +## Image Optimization + +See [image.md](./image.md) for: + +- Always use `next/image` over `` +- Remote images configuration +- Responsive `sizes` attribute +- Blur placeholders +- Priority loading for LCP + +## Font Optimization + +See [font.md](./font.md) for: + +- `next/font` setup +- Google Fonts, local fonts +- Tailwind CSS integration +- Preloading subsets + +## Bundling + +See [bundling.md](./bundling.md) for: + +- Server-incompatible packages +- CSS imports (not link tags) +- Polyfills (already included) +- ESM/CommonJS issues +- Bundle analysis + +## Scripts + +See [scripts.md](./scripts.md) for: + +- `next/script` vs native script tags +- Inline scripts need `id` +- Loading strategies +- Google Analytics with `@next/third-parties` + +## Hydration Errors + +See [hydration-error.md](./hydration-error.md) for: + +- Common causes (browser APIs, dates, invalid HTML) +- Debugging with error overlay +- Fixes for each cause + +## Suspense Boundaries + +See [suspense-boundaries.md](./suspense-boundaries.md) for: + +- CSR bailout with `useSearchParams` and `usePathname` +- Which hooks require Suspense boundaries + +## Parallel & Intercepting Routes + +See [parallel-routes.md](./parallel-routes.md) for: + +- Modal patterns with `@slot` and `(.)` interceptors +- `default.tsx` for fallbacks +- Closing modals correctly with `router.back()` + +## Self-Hosting + +See [self-hosting.md](./self-hosting.md) for: + +- `output: 'standalone'` for Docker +- Cache handlers for multi-instance ISR +- What works vs needs extra setup + +## Debug Tricks + +See [debug-tricks.md](./debug-tricks.md) for: + +- MCP endpoint for AI-assisted debugging +- Rebuild specific routes with `--debug-build-paths` diff --git a/.agents/skills/next-best-practices/async-patterns.md b/.agents/skills/next-best-practices/async-patterns.md new file mode 100644 index 0000000..0692c4a --- /dev/null +++ b/.agents/skills/next-best-practices/async-patterns.md @@ -0,0 +1,84 @@ +# Async Patterns + +In Next.js 15+, `params`, `searchParams`, `cookies()`, and `headers()` are asynchronous. + +## Async Params and SearchParams + +Always type them as `Promise<...>` and await them. + +### Pages and Layouts + +```tsx +type Props = { params: Promise<{ slug: string }> }; + +export default async function Page({ params }: Props) { + const { slug } = await params; +} +``` + +### Route Handlers + +```tsx +export async function GET(request: Request, { params }: { params: Promise<{ id: string }> }) { + const { id } = await params; +} +``` + +### SearchParams + +```tsx +type Props = { + params: Promise<{ slug: string }>; + searchParams: Promise<{ query?: string }>; +}; + +export default async function Page({ params, searchParams }: Props) { + const { slug } = await params; + const { query } = await searchParams; +} +``` + +### Synchronous Components + +Use `React.use()` for non-async components: + +```tsx +import { use } from 'react'; + +type Props = { params: Promise<{ slug: string }> }; + +export default function Page({ params }: Props) { + const { slug } = use(params); +} +``` + +### generateMetadata + +```tsx +type Props = { params: Promise<{ slug: string }> }; + +export async function generateMetadata({ params }: Props): Promise { + const { slug } = await params; + return { title: slug }; +} +``` + +## Async Cookies and Headers + +```tsx +import { cookies, headers } from 'next/headers'; + +export default async function Page() { + const cookieStore = await cookies(); + const headersList = await headers(); + + const theme = cookieStore.get('theme'); + const userAgent = headersList.get('user-agent'); +} +``` + +## Migration Codemod + +```bash +npx @next/codemod@latest next-async-request-api . +``` diff --git a/.agents/skills/next-best-practices/bundling.md b/.agents/skills/next-best-practices/bundling.md new file mode 100644 index 0000000..f4dc161 --- /dev/null +++ b/.agents/skills/next-best-practices/bundling.md @@ -0,0 +1,182 @@ +# Bundling + +Fix common bundling issues with third-party packages. + +## Server-Incompatible Packages + +Some packages use browser APIs (`window`, `document`, `localStorage`) and fail in Server Components. + +### Error Signs + +``` +ReferenceError: window is not defined +ReferenceError: document is not defined +ReferenceError: localStorage is not defined +Module not found: Can't resolve 'fs' +``` + +### Solution 1: Mark as Client-Only + +If the package is only needed on client: + +```tsx +// Bad: Fails - package uses window +import SomeChart from 'some-chart-library'; + +export default function Page() { + return ; +} + +// Good: Use dynamic import with ssr: false +import dynamic from 'next/dynamic'; + +const SomeChart = dynamic(() => import('some-chart-library'), { + ssr: false +}); + +export default function Page() { + return ; +} +``` + +### Solution 2: Externalize from Server Bundle + +For packages that should run on server but have bundling issues: + +```js +// next.config.js +module.exports = { + serverExternalPackages: ['problematic-package'] +}; +``` + +Use this for: + +- Packages with native bindings (sharp, bcrypt) +- Packages that don't bundle well (some ORMs) +- Packages with circular dependencies + +### Solution 3: Client Component Wrapper + +Wrap the entire usage in a client component: + +```tsx +// components/ChartWrapper.tsx +'use client'; + +import { Chart } from 'chart-library'; + +export function ChartWrapper(props) { + return ; +} + +// app/page.tsx (server component) +import { ChartWrapper } from '@/components/ChartWrapper'; + +export default function Page() { + return ; +} +``` + +## CSS Imports + +Import CSS files instead of using `` tags. Next.js handles bundling and optimization. + +```tsx +// Bad: Manual link tag +; + +// Good: Import CSS +import './styles.css'; + +// Good: CSS Modules +import styles from './Button.module.css'; +``` + +## Polyfills + +Next.js includes common polyfills automatically. Don't load redundant ones from polyfill.io or similar CDNs. + +Already included: `Array.from`, `Object.assign`, `Promise`, `fetch`, `Map`, `Set`, `Symbol`, `URLSearchParams`, and 50+ others. + +```tsx +// Bad: Redundant polyfills +; + +// Good: Next.js Script component +import Script from 'next/script'; + + +``` + +## Don't Put Script in Head + +`next/script` should not be placed inside `next/head`. It handles its own positioning. + +```tsx +// Bad: Script inside Head +import Head from 'next/head' +import Script from 'next/script' + + + + +// Good: Next.js component +import { GoogleAnalytics } from '@next/third-parties/google' + +export default function Layout({ children }) { + return ( + + {children} + + + ) +} +``` + +## Google Tag Manager + +```tsx +import { GoogleTagManager } from '@next/third-parties/google'; + +export default function Layout({ children }) { + return ( + + + {children} + + ); +} +``` + +## Other Third-Party Scripts + +```tsx +// YouTube embed +import { YouTubeEmbed } from '@next/third-parties/google'; + +; + +// Google Maps +import { GoogleMapsEmbed } from '@next/third-parties/google'; + +; +``` + +## Quick Reference + +| Pattern | Issue | Fix | +| --------------------------------------------- | -------------------------- | ------------------------- | +| ` + + diff --git a/.agents/skills/skill-creator/eval-viewer/generate_review.py b/.agents/skills/skill-creator/eval-viewer/generate_review.py new file mode 100644 index 0000000..7fa5978 --- /dev/null +++ b/.agents/skills/skill-creator/eval-viewer/generate_review.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +"""Generate and serve a review page for eval results. + +Reads the workspace directory, discovers runs (directories with outputs/), +embeds all output data into a self-contained HTML page, and serves it via +a tiny HTTP server. Feedback auto-saves to feedback.json in the workspace. + +Usage: + python generate_review.py [--port PORT] [--skill-name NAME] + python generate_review.py --previous-feedback /path/to/old/feedback.json + +No dependencies beyond the Python stdlib are required. +""" + +import argparse +import base64 +import json +import mimetypes +import os +import re +import signal +import subprocess +import sys +import time +import webbrowser +from functools import partial +from http.server import HTTPServer, BaseHTTPRequestHandler +from pathlib import Path + +# Files to exclude from output listings +METADATA_FILES = {"transcript.md", "user_notes.md", "metrics.json"} + +# Extensions we render as inline text +TEXT_EXTENSIONS = { + ".txt", ".md", ".json", ".csv", ".py", ".js", ".ts", ".tsx", ".jsx", + ".yaml", ".yml", ".xml", ".html", ".css", ".sh", ".rb", ".go", ".rs", + ".java", ".c", ".cpp", ".h", ".hpp", ".sql", ".r", ".toml", +} + +# Extensions we render as inline images +IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".svg", ".webp"} + +# MIME type overrides for common types +MIME_OVERRIDES = { + ".svg": "image/svg+xml", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", +} + + +def get_mime_type(path: Path) -> str: + ext = path.suffix.lower() + if ext in MIME_OVERRIDES: + return MIME_OVERRIDES[ext] + mime, _ = mimetypes.guess_type(str(path)) + return mime or "application/octet-stream" + + +def find_runs(workspace: Path) -> list[dict]: + """Recursively find directories that contain an outputs/ subdirectory.""" + runs: list[dict] = [] + _find_runs_recursive(workspace, workspace, runs) + runs.sort(key=lambda r: (r.get("eval_id", float("inf")), r["id"])) + return runs + + +def _find_runs_recursive(root: Path, current: Path, runs: list[dict]) -> None: + if not current.is_dir(): + return + + outputs_dir = current / "outputs" + if outputs_dir.is_dir(): + run = build_run(root, current) + if run: + runs.append(run) + return + + skip = {"node_modules", ".git", "__pycache__", "skill", "inputs"} + for child in sorted(current.iterdir()): + if child.is_dir() and child.name not in skip: + _find_runs_recursive(root, child, runs) + + +def build_run(root: Path, run_dir: Path) -> dict | None: + """Build a run dict with prompt, outputs, and grading data.""" + prompt = "" + eval_id = None + + # Try eval_metadata.json + for candidate in [run_dir / "eval_metadata.json", run_dir.parent / "eval_metadata.json"]: + if candidate.exists(): + try: + metadata = json.loads(candidate.read_text()) + prompt = metadata.get("prompt", "") + eval_id = metadata.get("eval_id") + except (json.JSONDecodeError, OSError): + pass + if prompt: + break + + # Fall back to transcript.md + if not prompt: + for candidate in [run_dir / "transcript.md", run_dir / "outputs" / "transcript.md"]: + if candidate.exists(): + try: + text = candidate.read_text() + match = re.search(r"## Eval Prompt\n\n([\s\S]*?)(?=\n##|$)", text) + if match: + prompt = match.group(1).strip() + except OSError: + pass + if prompt: + break + + if not prompt: + prompt = "(No prompt found)" + + run_id = str(run_dir.relative_to(root)).replace("/", "-").replace("\\", "-") + + # Collect output files + outputs_dir = run_dir / "outputs" + output_files: list[dict] = [] + if outputs_dir.is_dir(): + for f in sorted(outputs_dir.iterdir()): + if f.is_file() and f.name not in METADATA_FILES: + output_files.append(embed_file(f)) + + # Load grading if present + grading = None + for candidate in [run_dir / "grading.json", run_dir.parent / "grading.json"]: + if candidate.exists(): + try: + grading = json.loads(candidate.read_text()) + except (json.JSONDecodeError, OSError): + pass + if grading: + break + + return { + "id": run_id, + "prompt": prompt, + "eval_id": eval_id, + "outputs": output_files, + "grading": grading, + } + + +def embed_file(path: Path) -> dict: + """Read a file and return an embedded representation.""" + ext = path.suffix.lower() + mime = get_mime_type(path) + + if ext in TEXT_EXTENSIONS: + try: + content = path.read_text(errors="replace") + except OSError: + content = "(Error reading file)" + return { + "name": path.name, + "type": "text", + "content": content, + } + elif ext in IMAGE_EXTENSIONS: + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "image", + "mime": mime, + "data_uri": f"data:{mime};base64,{b64}", + } + elif ext == ".pdf": + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "pdf", + "data_uri": f"data:{mime};base64,{b64}", + } + elif ext == ".xlsx": + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "xlsx", + "data_b64": b64, + } + else: + # Binary / unknown — base64 download link + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "binary", + "mime": mime, + "data_uri": f"data:{mime};base64,{b64}", + } + + +def load_previous_iteration(workspace: Path) -> dict[str, dict]: + """Load previous iteration's feedback and outputs. + + Returns a map of run_id -> {"feedback": str, "outputs": list[dict]}. + """ + result: dict[str, dict] = {} + + # Load feedback + feedback_map: dict[str, str] = {} + feedback_path = workspace / "feedback.json" + if feedback_path.exists(): + try: + data = json.loads(feedback_path.read_text()) + feedback_map = { + r["run_id"]: r["feedback"] + for r in data.get("reviews", []) + if r.get("feedback", "").strip() + } + except (json.JSONDecodeError, OSError, KeyError): + pass + + # Load runs (to get outputs) + prev_runs = find_runs(workspace) + for run in prev_runs: + result[run["id"]] = { + "feedback": feedback_map.get(run["id"], ""), + "outputs": run.get("outputs", []), + } + + # Also add feedback for run_ids that had feedback but no matching run + for run_id, fb in feedback_map.items(): + if run_id not in result: + result[run_id] = {"feedback": fb, "outputs": []} + + return result + + +def generate_html( + runs: list[dict], + skill_name: str, + previous: dict[str, dict] | None = None, + benchmark: dict | None = None, +) -> str: + """Generate the complete standalone HTML page with embedded data.""" + template_path = Path(__file__).parent / "viewer.html" + template = template_path.read_text() + + # Build previous_feedback and previous_outputs maps for the template + previous_feedback: dict[str, str] = {} + previous_outputs: dict[str, list[dict]] = {} + if previous: + for run_id, data in previous.items(): + if data.get("feedback"): + previous_feedback[run_id] = data["feedback"] + if data.get("outputs"): + previous_outputs[run_id] = data["outputs"] + + embedded = { + "skill_name": skill_name, + "runs": runs, + "previous_feedback": previous_feedback, + "previous_outputs": previous_outputs, + } + if benchmark: + embedded["benchmark"] = benchmark + + data_json = json.dumps(embedded) + + return template.replace("/*__EMBEDDED_DATA__*/", f"const EMBEDDED_DATA = {data_json};") + + +# --------------------------------------------------------------------------- +# HTTP server (stdlib only, zero dependencies) +# --------------------------------------------------------------------------- + +def _kill_port(port: int) -> None: + """Kill any process listening on the given port.""" + try: + result = subprocess.run( + ["lsof", "-ti", f":{port}"], + capture_output=True, text=True, timeout=5, + ) + for pid_str in result.stdout.strip().split("\n"): + if pid_str.strip(): + try: + os.kill(int(pid_str.strip()), signal.SIGTERM) + except (ProcessLookupError, ValueError): + pass + if result.stdout.strip(): + time.sleep(0.5) + except subprocess.TimeoutExpired: + pass + except FileNotFoundError: + print("Note: lsof not found, cannot check if port is in use", file=sys.stderr) + +class ReviewHandler(BaseHTTPRequestHandler): + """Serves the review HTML and handles feedback saves. + + Regenerates the HTML on each page load so that refreshing the browser + picks up new eval outputs without restarting the server. + """ + + def __init__( + self, + workspace: Path, + skill_name: str, + feedback_path: Path, + previous: dict[str, dict], + benchmark_path: Path | None, + *args, + **kwargs, + ): + self.workspace = workspace + self.skill_name = skill_name + self.feedback_path = feedback_path + self.previous = previous + self.benchmark_path = benchmark_path + super().__init__(*args, **kwargs) + + def do_GET(self) -> None: + if self.path == "/" or self.path == "/index.html": + # Regenerate HTML on each request (re-scans workspace for new outputs) + runs = find_runs(self.workspace) + benchmark = None + if self.benchmark_path and self.benchmark_path.exists(): + try: + benchmark = json.loads(self.benchmark_path.read_text()) + except (json.JSONDecodeError, OSError): + pass + html = generate_html(runs, self.skill_name, self.previous, benchmark) + content = html.encode("utf-8") + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.send_header("Content-Length", str(len(content))) + self.end_headers() + self.wfile.write(content) + elif self.path == "/api/feedback": + data = b"{}" + if self.feedback_path.exists(): + data = self.feedback_path.read_bytes() + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(data))) + self.end_headers() + self.wfile.write(data) + else: + self.send_error(404) + + def do_POST(self) -> None: + if self.path == "/api/feedback": + length = int(self.headers.get("Content-Length", 0)) + body = self.rfile.read(length) + try: + data = json.loads(body) + if not isinstance(data, dict) or "reviews" not in data: + raise ValueError("Expected JSON object with 'reviews' key") + self.feedback_path.write_text(json.dumps(data, indent=2) + "\n") + resp = b'{"ok":true}' + self.send_response(200) + except (json.JSONDecodeError, OSError, ValueError) as e: + resp = json.dumps({"error": str(e)}).encode() + self.send_response(500) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(resp))) + self.end_headers() + self.wfile.write(resp) + else: + self.send_error(404) + + def log_message(self, format: str, *args: object) -> None: + # Suppress request logging to keep terminal clean + pass + + +def main() -> None: + parser = argparse.ArgumentParser(description="Generate and serve eval review") + parser.add_argument("workspace", type=Path, help="Path to workspace directory") + parser.add_argument("--port", "-p", type=int, default=3117, help="Server port (default: 3117)") + parser.add_argument("--skill-name", "-n", type=str, default=None, help="Skill name for header") + parser.add_argument( + "--previous-workspace", type=Path, default=None, + help="Path to previous iteration's workspace (shows old outputs and feedback as context)", + ) + parser.add_argument( + "--benchmark", type=Path, default=None, + help="Path to benchmark.json to show in the Benchmark tab", + ) + parser.add_argument( + "--static", "-s", type=Path, default=None, + help="Write standalone HTML to this path instead of starting a server", + ) + args = parser.parse_args() + + workspace = args.workspace.resolve() + if not workspace.is_dir(): + print(f"Error: {workspace} is not a directory", file=sys.stderr) + sys.exit(1) + + runs = find_runs(workspace) + if not runs: + print(f"No runs found in {workspace}", file=sys.stderr) + sys.exit(1) + + skill_name = args.skill_name or workspace.name.replace("-workspace", "") + feedback_path = workspace / "feedback.json" + + previous: dict[str, dict] = {} + if args.previous_workspace: + previous = load_previous_iteration(args.previous_workspace.resolve()) + + benchmark_path = args.benchmark.resolve() if args.benchmark else None + benchmark = None + if benchmark_path and benchmark_path.exists(): + try: + benchmark = json.loads(benchmark_path.read_text()) + except (json.JSONDecodeError, OSError): + pass + + if args.static: + html = generate_html(runs, skill_name, previous, benchmark) + args.static.parent.mkdir(parents=True, exist_ok=True) + args.static.write_text(html) + print(f"\n Static viewer written to: {args.static}\n") + sys.exit(0) + + # Kill any existing process on the target port + port = args.port + _kill_port(port) + handler = partial(ReviewHandler, workspace, skill_name, feedback_path, previous, benchmark_path) + try: + server = HTTPServer(("127.0.0.1", port), handler) + except OSError: + # Port still in use after kill attempt — find a free one + server = HTTPServer(("127.0.0.1", 0), handler) + port = server.server_address[1] + + url = f"http://localhost:{port}" + print(f"\n Eval Viewer") + print(f" ─────────────────────────────────") + print(f" URL: {url}") + print(f" Workspace: {workspace}") + print(f" Feedback: {feedback_path}") + if previous: + print(f" Previous: {args.previous_workspace} ({len(previous)} runs)") + if benchmark_path: + print(f" Benchmark: {benchmark_path}") + print(f"\n Press Ctrl+C to stop.\n") + + webbrowser.open(url) + + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nStopped.") + server.server_close() + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/eval-viewer/viewer.html b/.agents/skills/skill-creator/eval-viewer/viewer.html new file mode 100644 index 0000000..0db8387 --- /dev/null +++ b/.agents/skills/skill-creator/eval-viewer/viewer.html @@ -0,0 +1,1478 @@ + + + + + + Eval Review + + + + + + + +
+
+
+

Eval Review:

+
+ Review each output and leave feedback below. Navigate with arrow keys or buttons. When + done, copy feedback and paste into Claude Code. +
+
+
+
+ + + + + +
+
+ +
+
+ Prompt +
+
+
+
+
+ + +
+
Output
+
+
No output files found
+
+
+ + + + + + + + +
+
Your Feedback
+
+ + + +
+
+
+ + +
+ + + +
+
+
+ No benchmark data available. Run a benchmark to see quantitative results here. +
+
+
+
+ + +
+
+

Review Complete

+

+ Your feedback has been saved. Go back to your Claude Code session and tell Claude you're + done reviewing. +

+
+ +
+
+
+ + +
+ + + + diff --git a/.agents/skills/skill-creator/references/schemas.md b/.agents/skills/skill-creator/references/schemas.md new file mode 100644 index 0000000..effe351 --- /dev/null +++ b/.agents/skills/skill-creator/references/schemas.md @@ -0,0 +1,423 @@ +# JSON Schemas + +This document defines the JSON schemas used by skill-creator. + +--- + +## evals.json + +Defines the evals for a skill. Located at `evals/evals.json` within the skill directory. + +```json +{ + "skill_name": "example-skill", + "evals": [ + { + "id": 1, + "prompt": "User's example prompt", + "expected_output": "Description of expected result", + "files": ["evals/files/sample1.pdf"], + "expectations": ["The output includes X", "The skill used script Y"] + } + ] +} +``` + +**Fields:** + +- `skill_name`: Name matching the skill's frontmatter +- `evals[].id`: Unique integer identifier +- `evals[].prompt`: The task to execute +- `evals[].expected_output`: Human-readable description of success +- `evals[].files`: Optional list of input file paths (relative to skill root) +- `evals[].expectations`: List of verifiable statements + +--- + +## history.json + +Tracks version progression in Improve mode. Located at workspace root. + +```json +{ + "started_at": "2026-01-15T10:30:00Z", + "skill_name": "pdf", + "current_best": "v2", + "iterations": [ + { + "version": "v0", + "parent": null, + "expectation_pass_rate": 0.65, + "grading_result": "baseline", + "is_current_best": false + }, + { + "version": "v1", + "parent": "v0", + "expectation_pass_rate": 0.75, + "grading_result": "won", + "is_current_best": false + }, + { + "version": "v2", + "parent": "v1", + "expectation_pass_rate": 0.85, + "grading_result": "won", + "is_current_best": true + } + ] +} +``` + +**Fields:** + +- `started_at`: ISO timestamp of when improvement started +- `skill_name`: Name of the skill being improved +- `current_best`: Version identifier of the best performer +- `iterations[].version`: Version identifier (v0, v1, ...) +- `iterations[].parent`: Parent version this was derived from +- `iterations[].expectation_pass_rate`: Pass rate from grading +- `iterations[].grading_result`: "baseline", "won", "lost", or "tie" +- `iterations[].is_current_best`: Whether this is the current best version + +--- + +## grading.json + +Output from the grader agent. Located at `/grading.json`. + +```json +{ + "expectations": [ + { + "text": "The output includes the name 'John Smith'", + "passed": true, + "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'" + }, + { + "text": "The spreadsheet has a SUM formula in cell B10", + "passed": false, + "evidence": "No spreadsheet was created. The output was a text file." + } + ], + "summary": { + "passed": 2, + "failed": 1, + "total": 3, + "pass_rate": 0.67 + }, + "execution_metrics": { + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8 + }, + "total_tool_calls": 15, + "total_steps": 6, + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 + }, + "timing": { + "executor_duration_seconds": 165.0, + "grader_duration_seconds": 26.0, + "total_duration_seconds": 191.0 + }, + "claims": [ + { + "claim": "The form has 12 fillable fields", + "type": "factual", + "verified": true, + "evidence": "Counted 12 fields in field_info.json" + } + ], + "user_notes_summary": { + "uncertainties": ["Used 2023 data, may be stale"], + "needs_review": [], + "workarounds": ["Fell back to text overlay for non-fillable fields"] + }, + "eval_feedback": { + "suggestions": [ + { + "assertion": "The output includes the name 'John Smith'", + "reason": "A hallucinated document that mentions the name would also pass" + } + ], + "overall": "Assertions check presence but not correctness." + } +} +``` + +**Fields:** + +- `expectations[]`: Graded expectations with evidence +- `summary`: Aggregate pass/fail counts +- `execution_metrics`: Tool usage and output size (from executor's metrics.json) +- `timing`: Wall clock timing (from timing.json) +- `claims`: Extracted and verified claims from the output +- `user_notes_summary`: Issues flagged by the executor +- `eval_feedback`: (optional) Improvement suggestions for the evals, only present when the grader identifies issues worth raising + +--- + +## metrics.json + +Output from the executor agent. Located at `/outputs/metrics.json`. + +```json +{ + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8, + "Edit": 1, + "Glob": 2, + "Grep": 0 + }, + "total_tool_calls": 18, + "total_steps": 6, + "files_created": ["filled_form.pdf", "field_values.json"], + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 +} +``` + +**Fields:** + +- `tool_calls`: Count per tool type +- `total_tool_calls`: Sum of all tool calls +- `total_steps`: Number of major execution steps +- `files_created`: List of output files created +- `errors_encountered`: Number of errors during execution +- `output_chars`: Total character count of output files +- `transcript_chars`: Character count of transcript + +--- + +## timing.json + +Wall clock timing for a run. Located at `/timing.json`. + +**How to capture:** When a subagent task completes, the task notification includes `total_tokens` and `duration_ms`. Save these immediately — they are not persisted anywhere else and cannot be recovered after the fact. + +```json +{ + "total_tokens": 84852, + "duration_ms": 23332, + "total_duration_seconds": 23.3, + "executor_start": "2026-01-15T10:30:00Z", + "executor_end": "2026-01-15T10:32:45Z", + "executor_duration_seconds": 165.0, + "grader_start": "2026-01-15T10:32:46Z", + "grader_end": "2026-01-15T10:33:12Z", + "grader_duration_seconds": 26.0 +} +``` + +--- + +## benchmark.json + +Output from Benchmark mode. Located at `benchmarks//benchmark.json`. + +```json +{ + "metadata": { + "skill_name": "pdf", + "skill_path": "/path/to/pdf", + "executor_model": "claude-sonnet-4-20250514", + "analyzer_model": "most-capable-model", + "timestamp": "2026-01-15T10:30:00Z", + "evals_run": [1, 2, 3], + "runs_per_configuration": 3 + }, + + "runs": [ + { + "eval_id": 1, + "eval_name": "Ocean", + "configuration": "with_skill", + "run_number": 1, + "result": { + "pass_rate": 0.85, + "passed": 6, + "failed": 1, + "total": 7, + "time_seconds": 42.5, + "tokens": 3800, + "tool_calls": 18, + "errors": 0 + }, + "expectations": [{ "text": "...", "passed": true, "evidence": "..." }], + "notes": ["Used 2023 data, may be stale", "Fell back to text overlay for non-fillable fields"] + } + ], + + "run_summary": { + "with_skill": { + "pass_rate": { "mean": 0.85, "stddev": 0.05, "min": 0.8, "max": 0.9 }, + "time_seconds": { "mean": 45.0, "stddev": 12.0, "min": 32.0, "max": 58.0 }, + "tokens": { "mean": 3800, "stddev": 400, "min": 3200, "max": 4100 } + }, + "without_skill": { + "pass_rate": { "mean": 0.35, "stddev": 0.08, "min": 0.28, "max": 0.45 }, + "time_seconds": { "mean": 32.0, "stddev": 8.0, "min": 24.0, "max": 42.0 }, + "tokens": { "mean": 2100, "stddev": 300, "min": 1800, "max": 2500 } + }, + "delta": { + "pass_rate": "+0.50", + "time_seconds": "+13.0", + "tokens": "+1700" + } + }, + + "notes": [ + "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value", + "Eval 3 shows high variance (50% ± 40%) - may be flaky or model-dependent", + "Without-skill runs consistently fail on table extraction expectations", + "Skill adds 13s average execution time but improves pass rate by 50%" + ] +} +``` + +**Fields:** + +- `metadata`: Information about the benchmark run + - `skill_name`: Name of the skill + - `timestamp`: When the benchmark was run + - `evals_run`: List of eval names or IDs + - `runs_per_configuration`: Number of runs per config (e.g. 3) +- `runs[]`: Individual run results + - `eval_id`: Numeric eval identifier + - `eval_name`: Human-readable eval name (used as section header in the viewer) + - `configuration`: Must be `"with_skill"` or `"without_skill"` (the viewer uses this exact string for grouping and color coding) + - `run_number`: Integer run number (1, 2, 3...) + - `result`: Nested object with `pass_rate`, `passed`, `total`, `time_seconds`, `tokens`, `errors` +- `run_summary`: Statistical aggregates per configuration + - `with_skill` / `without_skill`: Each contains `pass_rate`, `time_seconds`, `tokens` objects with `mean` and `stddev` fields + - `delta`: Difference strings like `"+0.50"`, `"+13.0"`, `"+1700"` +- `notes`: Freeform observations from the analyzer + +**Important:** The viewer reads these field names exactly. Using `config` instead of `configuration`, or putting `pass_rate` at the top level of a run instead of nested under `result`, will cause the viewer to show empty/zero values. Always reference this schema when generating benchmark.json manually. + +--- + +## comparison.json + +Output from blind comparator. Located at `/comparison-N.json`. + +```json +{ + "winner": "A", + "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.", + "rubric": { + "A": { + "content": { + "correctness": 5, + "completeness": 5, + "accuracy": 4 + }, + "structure": { + "organization": 4, + "formatting": 5, + "usability": 4 + }, + "content_score": 4.7, + "structure_score": 4.3, + "overall_score": 9.0 + }, + "B": { + "content": { + "correctness": 3, + "completeness": 2, + "accuracy": 3 + }, + "structure": { + "organization": 3, + "formatting": 2, + "usability": 3 + }, + "content_score": 2.7, + "structure_score": 2.7, + "overall_score": 5.4 + } + }, + "output_quality": { + "A": { + "score": 9, + "strengths": ["Complete solution", "Well-formatted", "All fields present"], + "weaknesses": ["Minor style inconsistency in header"] + }, + "B": { + "score": 5, + "strengths": ["Readable output", "Correct basic structure"], + "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"] + } + }, + "expectation_results": { + "A": { + "passed": 4, + "total": 5, + "pass_rate": 0.8, + "details": [{ "text": "Output includes name", "passed": true }] + }, + "B": { + "passed": 3, + "total": 5, + "pass_rate": 0.6, + "details": [{ "text": "Output includes name", "passed": true }] + } + } +} +``` + +--- + +## analysis.json + +Output from post-hoc analyzer. Located at `/analysis.json`. + +```json +{ + "comparison_summary": { + "winner": "A", + "winner_skill": "path/to/winner/skill", + "loser_skill": "path/to/loser/skill", + "comparator_reasoning": "Brief summary of why comparator chose winner" + }, + "winner_strengths": [ + "Clear step-by-step instructions for handling multi-page documents", + "Included validation script that caught formatting errors" + ], + "loser_weaknesses": [ + "Vague instruction 'process the document appropriately' led to inconsistent behavior", + "No script for validation, agent had to improvise" + ], + "instruction_following": { + "winner": { + "score": 9, + "issues": ["Minor: skipped optional logging step"] + }, + "loser": { + "score": 6, + "issues": [ + "Did not use the skill's formatting template", + "Invented own approach instead of following step 3" + ] + } + }, + "improvement_suggestions": [ + { + "priority": "high", + "category": "instructions", + "suggestion": "Replace 'process the document appropriately' with explicit steps", + "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior" + } + ], + "transcript_insights": { + "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script", + "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods" + } +} +``` diff --git a/.agents/skills/skill-creator/scripts/__init__.py b/.agents/skills/skill-creator/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/.agents/skills/skill-creator/scripts/aggregate_benchmark.py b/.agents/skills/skill-creator/scripts/aggregate_benchmark.py new file mode 100644 index 0000000..3e66e8c --- /dev/null +++ b/.agents/skills/skill-creator/scripts/aggregate_benchmark.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 +""" +Aggregate individual run results into benchmark summary statistics. + +Reads grading.json files from run directories and produces: +- run_summary with mean, stddev, min, max for each metric +- delta between with_skill and without_skill configurations + +Usage: + python aggregate_benchmark.py + +Example: + python aggregate_benchmark.py benchmarks/2026-01-15T10-30-00/ + +The script supports two directory layouts: + + Workspace layout (from skill-creator iterations): + / + └── eval-N/ + ├── with_skill/ + │ ├── run-1/grading.json + │ └── run-2/grading.json + └── without_skill/ + ├── run-1/grading.json + └── run-2/grading.json + + Legacy layout (with runs/ subdirectory): + / + └── runs/ + └── eval-N/ + ├── with_skill/ + │ └── run-1/grading.json + └── without_skill/ + └── run-1/grading.json +""" + +import argparse +import json +import math +import sys +from datetime import datetime, timezone +from pathlib import Path + + +def calculate_stats(values: list[float]) -> dict: + """Calculate mean, stddev, min, max for a list of values.""" + if not values: + return {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0} + + n = len(values) + mean = sum(values) / n + + if n > 1: + variance = sum((x - mean) ** 2 for x in values) / (n - 1) + stddev = math.sqrt(variance) + else: + stddev = 0.0 + + return { + "mean": round(mean, 4), + "stddev": round(stddev, 4), + "min": round(min(values), 4), + "max": round(max(values), 4) + } + + +def load_run_results(benchmark_dir: Path) -> dict: + """ + Load all run results from a benchmark directory. + + Returns dict keyed by config name (e.g. "with_skill"/"without_skill", + or "new_skill"/"old_skill"), each containing a list of run results. + """ + # Support both layouts: eval dirs directly under benchmark_dir, or under runs/ + runs_dir = benchmark_dir / "runs" + if runs_dir.exists(): + search_dir = runs_dir + elif list(benchmark_dir.glob("eval-*")): + search_dir = benchmark_dir + else: + print(f"No eval directories found in {benchmark_dir} or {benchmark_dir / 'runs'}") + return {} + + results: dict[str, list] = {} + + for eval_idx, eval_dir in enumerate(sorted(search_dir.glob("eval-*"))): + metadata_path = eval_dir / "eval_metadata.json" + if metadata_path.exists(): + try: + with open(metadata_path) as mf: + eval_id = json.load(mf).get("eval_id", eval_idx) + except (json.JSONDecodeError, OSError): + eval_id = eval_idx + else: + try: + eval_id = int(eval_dir.name.split("-")[1]) + except ValueError: + eval_id = eval_idx + + # Discover config directories dynamically rather than hardcoding names + for config_dir in sorted(eval_dir.iterdir()): + if not config_dir.is_dir(): + continue + # Skip non-config directories (inputs, outputs, etc.) + if not list(config_dir.glob("run-*")): + continue + config = config_dir.name + if config not in results: + results[config] = [] + + for run_dir in sorted(config_dir.glob("run-*")): + run_number = int(run_dir.name.split("-")[1]) + grading_file = run_dir / "grading.json" + + if not grading_file.exists(): + print(f"Warning: grading.json not found in {run_dir}") + continue + + try: + with open(grading_file) as f: + grading = json.load(f) + except json.JSONDecodeError as e: + print(f"Warning: Invalid JSON in {grading_file}: {e}") + continue + + # Extract metrics + result = { + "eval_id": eval_id, + "run_number": run_number, + "pass_rate": grading.get("summary", {}).get("pass_rate", 0.0), + "passed": grading.get("summary", {}).get("passed", 0), + "failed": grading.get("summary", {}).get("failed", 0), + "total": grading.get("summary", {}).get("total", 0), + } + + # Extract timing — check grading.json first, then sibling timing.json + timing = grading.get("timing", {}) + result["time_seconds"] = timing.get("total_duration_seconds", 0.0) + timing_file = run_dir / "timing.json" + if result["time_seconds"] == 0.0 and timing_file.exists(): + try: + with open(timing_file) as tf: + timing_data = json.load(tf) + result["time_seconds"] = timing_data.get("total_duration_seconds", 0.0) + result["tokens"] = timing_data.get("total_tokens", 0) + except json.JSONDecodeError: + pass + + # Extract metrics if available + metrics = grading.get("execution_metrics", {}) + result["tool_calls"] = metrics.get("total_tool_calls", 0) + if not result.get("tokens"): + result["tokens"] = metrics.get("output_chars", 0) + result["errors"] = metrics.get("errors_encountered", 0) + + # Extract expectations — viewer requires fields: text, passed, evidence + raw_expectations = grading.get("expectations", []) + for exp in raw_expectations: + if "text" not in exp or "passed" not in exp: + print(f"Warning: expectation in {grading_file} missing required fields (text, passed, evidence): {exp}") + result["expectations"] = raw_expectations + + # Extract notes from user_notes_summary + notes_summary = grading.get("user_notes_summary", {}) + notes = [] + notes.extend(notes_summary.get("uncertainties", [])) + notes.extend(notes_summary.get("needs_review", [])) + notes.extend(notes_summary.get("workarounds", [])) + result["notes"] = notes + + results[config].append(result) + + return results + + +def aggregate_results(results: dict) -> dict: + """ + Aggregate run results into summary statistics. + + Returns run_summary with stats for each configuration and delta. + """ + run_summary = {} + configs = list(results.keys()) + + for config in configs: + runs = results.get(config, []) + + if not runs: + run_summary[config] = { + "pass_rate": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}, + "time_seconds": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}, + "tokens": {"mean": 0, "stddev": 0, "min": 0, "max": 0} + } + continue + + pass_rates = [r["pass_rate"] for r in runs] + times = [r["time_seconds"] for r in runs] + tokens = [r.get("tokens", 0) for r in runs] + + run_summary[config] = { + "pass_rate": calculate_stats(pass_rates), + "time_seconds": calculate_stats(times), + "tokens": calculate_stats(tokens) + } + + # Calculate delta between the first two configs (if two exist) + if len(configs) >= 2: + primary = run_summary.get(configs[0], {}) + baseline = run_summary.get(configs[1], {}) + else: + primary = run_summary.get(configs[0], {}) if configs else {} + baseline = {} + + delta_pass_rate = primary.get("pass_rate", {}).get("mean", 0) - baseline.get("pass_rate", {}).get("mean", 0) + delta_time = primary.get("time_seconds", {}).get("mean", 0) - baseline.get("time_seconds", {}).get("mean", 0) + delta_tokens = primary.get("tokens", {}).get("mean", 0) - baseline.get("tokens", {}).get("mean", 0) + + run_summary["delta"] = { + "pass_rate": f"{delta_pass_rate:+.2f}", + "time_seconds": f"{delta_time:+.1f}", + "tokens": f"{delta_tokens:+.0f}" + } + + return run_summary + + +def generate_benchmark(benchmark_dir: Path, skill_name: str = "", skill_path: str = "") -> dict: + """ + Generate complete benchmark.json from run results. + """ + results = load_run_results(benchmark_dir) + run_summary = aggregate_results(results) + + # Build runs array for benchmark.json + runs = [] + for config in results: + for result in results[config]: + runs.append({ + "eval_id": result["eval_id"], + "configuration": config, + "run_number": result["run_number"], + "result": { + "pass_rate": result["pass_rate"], + "passed": result["passed"], + "failed": result["failed"], + "total": result["total"], + "time_seconds": result["time_seconds"], + "tokens": result.get("tokens", 0), + "tool_calls": result.get("tool_calls", 0), + "errors": result.get("errors", 0) + }, + "expectations": result["expectations"], + "notes": result["notes"] + }) + + # Determine eval IDs from results + eval_ids = sorted(set( + r["eval_id"] + for config in results.values() + for r in config + )) + + benchmark = { + "metadata": { + "skill_name": skill_name or "", + "skill_path": skill_path or "", + "executor_model": "", + "analyzer_model": "", + "timestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), + "evals_run": eval_ids, + "runs_per_configuration": 3 + }, + "runs": runs, + "run_summary": run_summary, + "notes": [] # To be filled by analyzer + } + + return benchmark + + +def generate_markdown(benchmark: dict) -> str: + """Generate human-readable benchmark.md from benchmark data.""" + metadata = benchmark["metadata"] + run_summary = benchmark["run_summary"] + + # Determine config names (excluding "delta") + configs = [k for k in run_summary if k != "delta"] + config_a = configs[0] if len(configs) >= 1 else "config_a" + config_b = configs[1] if len(configs) >= 2 else "config_b" + label_a = config_a.replace("_", " ").title() + label_b = config_b.replace("_", " ").title() + + lines = [ + f"# Skill Benchmark: {metadata['skill_name']}", + "", + f"**Model**: {metadata['executor_model']}", + f"**Date**: {metadata['timestamp']}", + f"**Evals**: {', '.join(map(str, metadata['evals_run']))} ({metadata['runs_per_configuration']} runs each per configuration)", + "", + "## Summary", + "", + f"| Metric | {label_a} | {label_b} | Delta |", + "|--------|------------|---------------|-------|", + ] + + a_summary = run_summary.get(config_a, {}) + b_summary = run_summary.get(config_b, {}) + delta = run_summary.get("delta", {}) + + # Format pass rate + a_pr = a_summary.get("pass_rate", {}) + b_pr = b_summary.get("pass_rate", {}) + lines.append(f"| Pass Rate | {a_pr.get('mean', 0)*100:.0f}% ± {a_pr.get('stddev', 0)*100:.0f}% | {b_pr.get('mean', 0)*100:.0f}% ± {b_pr.get('stddev', 0)*100:.0f}% | {delta.get('pass_rate', '—')} |") + + # Format time + a_time = a_summary.get("time_seconds", {}) + b_time = b_summary.get("time_seconds", {}) + lines.append(f"| Time | {a_time.get('mean', 0):.1f}s ± {a_time.get('stddev', 0):.1f}s | {b_time.get('mean', 0):.1f}s ± {b_time.get('stddev', 0):.1f}s | {delta.get('time_seconds', '—')}s |") + + # Format tokens + a_tokens = a_summary.get("tokens", {}) + b_tokens = b_summary.get("tokens", {}) + lines.append(f"| Tokens | {a_tokens.get('mean', 0):.0f} ± {a_tokens.get('stddev', 0):.0f} | {b_tokens.get('mean', 0):.0f} ± {b_tokens.get('stddev', 0):.0f} | {delta.get('tokens', '—')} |") + + # Notes section + if benchmark.get("notes"): + lines.extend([ + "", + "## Notes", + "" + ]) + for note in benchmark["notes"]: + lines.append(f"- {note}") + + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description="Aggregate benchmark run results into summary statistics" + ) + parser.add_argument( + "benchmark_dir", + type=Path, + help="Path to the benchmark directory" + ) + parser.add_argument( + "--skill-name", + default="", + help="Name of the skill being benchmarked" + ) + parser.add_argument( + "--skill-path", + default="", + help="Path to the skill being benchmarked" + ) + parser.add_argument( + "--output", "-o", + type=Path, + help="Output path for benchmark.json (default: /benchmark.json)" + ) + + args = parser.parse_args() + + if not args.benchmark_dir.exists(): + print(f"Directory not found: {args.benchmark_dir}") + sys.exit(1) + + # Generate benchmark + benchmark = generate_benchmark(args.benchmark_dir, args.skill_name, args.skill_path) + + # Determine output paths + output_json = args.output or (args.benchmark_dir / "benchmark.json") + output_md = output_json.with_suffix(".md") + + # Write benchmark.json + with open(output_json, "w") as f: + json.dump(benchmark, f, indent=2) + print(f"Generated: {output_json}") + + # Write benchmark.md + markdown = generate_markdown(benchmark) + with open(output_md, "w") as f: + f.write(markdown) + print(f"Generated: {output_md}") + + # Print summary + run_summary = benchmark["run_summary"] + configs = [k for k in run_summary if k != "delta"] + delta = run_summary.get("delta", {}) + + print(f"\nSummary:") + for config in configs: + pr = run_summary[config]["pass_rate"]["mean"] + label = config.replace("_", " ").title() + print(f" {label}: {pr*100:.1f}% pass rate") + print(f" Delta: {delta.get('pass_rate', '—')}") + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/generate_report.py b/.agents/skills/skill-creator/scripts/generate_report.py new file mode 100644 index 0000000..959e30a --- /dev/null +++ b/.agents/skills/skill-creator/scripts/generate_report.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +"""Generate an HTML report from run_loop.py output. + +Takes the JSON output from run_loop.py and generates a visual HTML report +showing each description attempt with check/x for each test case. +Distinguishes between train and test queries. +""" + +import argparse +import html +import json +import sys +from pathlib import Path + + +def generate_html(data: dict, auto_refresh: bool = False, skill_name: str = "") -> str: + """Generate HTML report from loop output data. If auto_refresh is True, adds a meta refresh tag.""" + history = data.get("history", []) + holdout = data.get("holdout", 0) + title_prefix = html.escape(skill_name + " \u2014 ") if skill_name else "" + + # Get all unique queries from train and test sets, with should_trigger info + train_queries: list[dict] = [] + test_queries: list[dict] = [] + if history: + for r in history[0].get("train_results", history[0].get("results", [])): + train_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)}) + if history[0].get("test_results"): + for r in history[0].get("test_results", []): + test_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)}) + + refresh_tag = ' \n' if auto_refresh else "" + + html_parts = [""" + + + +""" + refresh_tag + """ """ + title_prefix + """Skill Description Optimization + + + + + + +

""" + title_prefix + """Skill Description Optimization

+
+ Optimizing your skill's description. This page updates automatically as Claude tests different versions of your skill's description. Each row is an iteration — a new description attempt. The columns show test queries: green checkmarks mean the skill triggered correctly (or correctly didn't trigger), red crosses mean it got it wrong. The "Train" score shows performance on queries used to improve the description; the "Test" score shows performance on held-out queries the optimizer hasn't seen. When it's done, Claude will apply the best-performing description to your skill. +
+"""] + + # Summary section + best_test_score = data.get('best_test_score') + best_train_score = data.get('best_train_score') + html_parts.append(f""" +
+

Original: {html.escape(data.get('original_description', 'N/A'))}

+

Best: {html.escape(data.get('best_description', 'N/A'))}

+

Best Score: {data.get('best_score', 'N/A')} {'(test)' if best_test_score else '(train)'}

+

Iterations: {data.get('iterations_run', 0)} | Train: {data.get('train_size', '?')} | Test: {data.get('test_size', '?')}

+
+""") + + # Legend + html_parts.append(""" +
+ Query columns: + Should trigger + Should NOT trigger + Train + Test +
+""") + + # Table header + html_parts.append(""" +
+ + + + + + + +""") + + # Add column headers for train queries + for qinfo in train_queries: + polarity = "positive-col" if qinfo["should_trigger"] else "negative-col" + html_parts.append(f' \n') + + # Add column headers for test queries (different color) + for qinfo in test_queries: + polarity = "positive-col" if qinfo["should_trigger"] else "negative-col" + html_parts.append(f' \n') + + html_parts.append(""" + + +""") + + # Find best iteration for highlighting + if test_queries: + best_iter = max(history, key=lambda h: h.get("test_passed") or 0).get("iteration") + else: + best_iter = max(history, key=lambda h: h.get("train_passed", h.get("passed", 0))).get("iteration") + + # Add rows for each iteration + for h in history: + iteration = h.get("iteration", "?") + train_passed = h.get("train_passed", h.get("passed", 0)) + train_total = h.get("train_total", h.get("total", 0)) + test_passed = h.get("test_passed") + test_total = h.get("test_total") + description = h.get("description", "") + train_results = h.get("train_results", h.get("results", [])) + test_results = h.get("test_results", []) + + # Create lookups for results by query + train_by_query = {r["query"]: r for r in train_results} + test_by_query = {r["query"]: r for r in test_results} if test_results else {} + + # Compute aggregate correct/total runs across all retries + def aggregate_runs(results: list[dict]) -> tuple[int, int]: + correct = 0 + total = 0 + for r in results: + runs = r.get("runs", 0) + triggers = r.get("triggers", 0) + total += runs + if r.get("should_trigger", True): + correct += triggers + else: + correct += runs - triggers + return correct, total + + train_correct, train_runs = aggregate_runs(train_results) + test_correct, test_runs = aggregate_runs(test_results) + + # Determine score classes + def score_class(correct: int, total: int) -> str: + if total > 0: + ratio = correct / total + if ratio >= 0.8: + return "score-good" + elif ratio >= 0.5: + return "score-ok" + return "score-bad" + + train_class = score_class(train_correct, train_runs) + test_class = score_class(test_correct, test_runs) + + row_class = "best-row" if iteration == best_iter else "" + + html_parts.append(f""" + + + + +""") + + # Add result for each train query + for qinfo in train_queries: + r = train_by_query.get(qinfo["query"], {}) + did_pass = r.get("pass", False) + triggers = r.get("triggers", 0) + runs = r.get("runs", 0) + + icon = "✓" if did_pass else "✗" + css_class = "pass" if did_pass else "fail" + + html_parts.append(f' \n') + + # Add result for each test query (with different background) + for qinfo in test_queries: + r = test_by_query.get(qinfo["query"], {}) + did_pass = r.get("pass", False) + triggers = r.get("triggers", 0) + runs = r.get("runs", 0) + + icon = "✓" if did_pass else "✗" + css_class = "pass" if did_pass else "fail" + + html_parts.append(f' \n') + + html_parts.append(" \n") + + html_parts.append(""" +
IterTrainTestDescription{html.escape(qinfo["query"])}{html.escape(qinfo["query"])}
{iteration}{train_correct}/{train_runs}{test_correct}/{test_runs}{html.escape(description)}{icon}{triggers}/{runs}{icon}{triggers}/{runs}
+
+""") + + html_parts.append(""" + + +""") + + return "".join(html_parts) + + +def main(): + parser = argparse.ArgumentParser(description="Generate HTML report from run_loop output") + parser.add_argument("input", help="Path to JSON output from run_loop.py (or - for stdin)") + parser.add_argument("-o", "--output", default=None, help="Output HTML file (default: stdout)") + parser.add_argument("--skill-name", default="", help="Skill name to include in the report title") + args = parser.parse_args() + + if args.input == "-": + data = json.load(sys.stdin) + else: + data = json.loads(Path(args.input).read_text()) + + html_output = generate_html(data, skill_name=args.skill_name) + + if args.output: + Path(args.output).write_text(html_output) + print(f"Report written to {args.output}", file=sys.stderr) + else: + print(html_output) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/improve_description.py b/.agents/skills/skill-creator/scripts/improve_description.py new file mode 100644 index 0000000..06bcec7 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/improve_description.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +"""Improve a skill description based on eval results. + +Takes eval results (from run_eval.py) and generates an improved description +by calling `claude -p` as a subprocess (same auth pattern as run_eval.py — +uses the session's Claude Code auth, no separate ANTHROPIC_API_KEY needed). +""" + +import argparse +import json +import os +import re +import subprocess +import sys +from pathlib import Path + +from scripts.utils import parse_skill_md + + +def _call_claude(prompt: str, model: str | None, timeout: int = 300) -> str: + """Run `claude -p` with the prompt on stdin and return the text response. + + Prompt goes over stdin (not argv) because it embeds the full SKILL.md + body and can easily exceed comfortable argv length. + """ + cmd = ["claude", "-p", "--output-format", "text"] + if model: + cmd.extend(["--model", model]) + + # Remove CLAUDECODE env var to allow nesting claude -p inside a + # Claude Code session. The guard is for interactive terminal conflicts; + # programmatic subprocess usage is safe. Same pattern as run_eval.py. + env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"} + + result = subprocess.run( + cmd, + input=prompt, + capture_output=True, + text=True, + env=env, + timeout=timeout, + ) + if result.returncode != 0: + raise RuntimeError( + f"claude -p exited {result.returncode}\nstderr: {result.stderr}" + ) + return result.stdout + + +def improve_description( + skill_name: str, + skill_content: str, + current_description: str, + eval_results: dict, + history: list[dict], + model: str, + test_results: dict | None = None, + log_dir: Path | None = None, + iteration: int | None = None, +) -> str: + """Call Claude to improve the description based on eval results.""" + failed_triggers = [ + r for r in eval_results["results"] + if r["should_trigger"] and not r["pass"] + ] + false_triggers = [ + r for r in eval_results["results"] + if not r["should_trigger"] and not r["pass"] + ] + + # Build scores summary + train_score = f"{eval_results['summary']['passed']}/{eval_results['summary']['total']}" + if test_results: + test_score = f"{test_results['summary']['passed']}/{test_results['summary']['total']}" + scores_summary = f"Train: {train_score}, Test: {test_score}" + else: + scores_summary = f"Train: {train_score}" + + prompt = f"""You are optimizing a skill description for a Claude Code skill called "{skill_name}". A "skill" is sort of like a prompt, but with progressive disclosure -- there's a title and description that Claude sees when deciding whether to use the skill, and then if it does use the skill, it reads the .md file which has lots more details and potentially links to other resources in the skill folder like helper files and scripts and additional documentation or examples. + +The description appears in Claude's "available_skills" list. When a user sends a query, Claude decides whether to invoke the skill based solely on the title and on this description. Your goal is to write a description that triggers for relevant queries, and doesn't trigger for irrelevant ones. + +Here's the current description: + +"{current_description}" + + +Current scores ({scores_summary}): + +""" + if failed_triggers: + prompt += "FAILED TO TRIGGER (should have triggered but didn't):\n" + for r in failed_triggers: + prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n' + prompt += "\n" + + if false_triggers: + prompt += "FALSE TRIGGERS (triggered but shouldn't have):\n" + for r in false_triggers: + prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n' + prompt += "\n" + + if history: + prompt += "PREVIOUS ATTEMPTS (do NOT repeat these — try something structurally different):\n\n" + for h in history: + train_s = f"{h.get('train_passed', h.get('passed', 0))}/{h.get('train_total', h.get('total', 0))}" + test_s = f"{h.get('test_passed', '?')}/{h.get('test_total', '?')}" if h.get('test_passed') is not None else None + score_str = f"train={train_s}" + (f", test={test_s}" if test_s else "") + prompt += f'\n' + prompt += f'Description: "{h["description"]}"\n' + if "results" in h: + prompt += "Train results:\n" + for r in h["results"]: + status = "PASS" if r["pass"] else "FAIL" + prompt += f' [{status}] "{r["query"][:80]}" (triggered {r["triggers"]}/{r["runs"]})\n' + if h.get("note"): + prompt += f'Note: {h["note"]}\n' + prompt += "\n\n" + + prompt += f""" + +Skill content (for context on what the skill does): + +{skill_content} + + +Based on the failures, write a new and improved description that is more likely to trigger correctly. When I say "based on the failures", it's a bit of a tricky line to walk because we don't want to overfit to the specific cases you're seeing. So what I DON'T want you to do is produce an ever-expanding list of specific queries that this skill should or shouldn't trigger for. Instead, try to generalize from the failures to broader categories of user intent and situations where this skill would be useful or not useful. The reason for this is twofold: + +1. Avoid overfitting +2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description. + +Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy. There is a hard limit of 1024 characters — descriptions over that will be truncated, so stay comfortably under it. + +Here are some tips that we've found to work well in writing these descriptions: +- The skill should be phrased in the imperative -- "Use this skill for" rather than "this skill does" +- The skill description should focus on the user's intent, what they are trying to achieve, vs. the implementation details of how the skill works. +- The description competes with other skills for Claude's attention — make it distinctive and immediately recognizable. +- If you're getting lots of failures after repeated attempts, change things up. Try different sentence structures or wordings. + +I'd encourage you to be creative and mix up the style in different iterations since you'll have multiple opportunities to try different approaches and we'll just grab the highest-scoring one at the end. + +Please respond with only the new description text in tags, nothing else.""" + + text = _call_claude(prompt, model) + + match = re.search(r"(.*?)", text, re.DOTALL) + description = match.group(1).strip().strip('"') if match else text.strip().strip('"') + + transcript: dict = { + "iteration": iteration, + "prompt": prompt, + "response": text, + "parsed_description": description, + "char_count": len(description), + "over_limit": len(description) > 1024, + } + + # Safety net: the prompt already states the 1024-char hard limit, but if + # the model blew past it anyway, make one fresh single-turn call that + # quotes the too-long version and asks for a shorter rewrite. (The old + # SDK path did this as a true multi-turn; `claude -p` is one-shot, so we + # inline the prior output into the new prompt instead.) + if len(description) > 1024: + shorten_prompt = ( + f"{prompt}\n\n" + f"---\n\n" + f"A previous attempt produced this description, which at " + f"{len(description)} characters is over the 1024-character hard limit:\n\n" + f'"{description}"\n\n' + f"Rewrite it to be under 1024 characters while keeping the most " + f"important trigger words and intent coverage. Respond with only " + f"the new description in tags." + ) + shorten_text = _call_claude(shorten_prompt, model) + match = re.search(r"(.*?)", shorten_text, re.DOTALL) + shortened = match.group(1).strip().strip('"') if match else shorten_text.strip().strip('"') + + transcript["rewrite_prompt"] = shorten_prompt + transcript["rewrite_response"] = shorten_text + transcript["rewrite_description"] = shortened + transcript["rewrite_char_count"] = len(shortened) + description = shortened + + transcript["final_description"] = description + + if log_dir: + log_dir.mkdir(parents=True, exist_ok=True) + log_file = log_dir / f"improve_iter_{iteration or 'unknown'}.json" + log_file.write_text(json.dumps(transcript, indent=2)) + + return description + + +def main(): + parser = argparse.ArgumentParser(description="Improve a skill description based on eval results") + parser.add_argument("--eval-results", required=True, help="Path to eval results JSON (from run_eval.py)") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--history", default=None, help="Path to history JSON (previous attempts)") + parser.add_argument("--model", required=True, help="Model for improvement") + parser.add_argument("--verbose", action="store_true", help="Print thinking to stderr") + args = parser.parse_args() + + skill_path = Path(args.skill_path) + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + eval_results = json.loads(Path(args.eval_results).read_text()) + history = [] + if args.history: + history = json.loads(Path(args.history).read_text()) + + name, _, content = parse_skill_md(skill_path) + current_description = eval_results["description"] + + if args.verbose: + print(f"Current: {current_description}", file=sys.stderr) + print(f"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}", file=sys.stderr) + + new_description = improve_description( + skill_name=name, + skill_content=content, + current_description=current_description, + eval_results=eval_results, + history=history, + model=args.model, + ) + + if args.verbose: + print(f"Improved: {new_description}", file=sys.stderr) + + # Output as JSON with both the new description and updated history + output = { + "description": new_description, + "history": history + [{ + "description": current_description, + "passed": eval_results["summary"]["passed"], + "failed": eval_results["summary"]["failed"], + "total": eval_results["summary"]["total"], + "results": eval_results["results"], + }], + } + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/package_skill.py b/.agents/skills/skill-creator/scripts/package_skill.py new file mode 100644 index 0000000..f48eac4 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/package_skill.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Skill Packager - Creates a distributable .skill file of a skill folder + +Usage: + python utils/package_skill.py [output-directory] + +Example: + python utils/package_skill.py skills/public/my-skill + python utils/package_skill.py skills/public/my-skill ./dist +""" + +import fnmatch +import sys +import zipfile +from pathlib import Path +from scripts.quick_validate import validate_skill + +# Patterns to exclude when packaging skills. +EXCLUDE_DIRS = {"__pycache__", "node_modules"} +EXCLUDE_GLOBS = {"*.pyc"} +EXCLUDE_FILES = {".DS_Store"} +# Directories excluded only at the skill root (not when nested deeper). +ROOT_EXCLUDE_DIRS = {"evals"} + + +def should_exclude(rel_path: Path) -> bool: + """Check if a path should be excluded from packaging.""" + parts = rel_path.parts + if any(part in EXCLUDE_DIRS for part in parts): + return True + # rel_path is relative to skill_path.parent, so parts[0] is the skill + # folder name and parts[1] (if present) is the first subdir. + if len(parts) > 1 and parts[1] in ROOT_EXCLUDE_DIRS: + return True + name = rel_path.name + if name in EXCLUDE_FILES: + return True + return any(fnmatch.fnmatch(name, pat) for pat in EXCLUDE_GLOBS) + + +def package_skill(skill_path, output_dir=None): + """ + Package a skill folder into a .skill file. + + Args: + skill_path: Path to the skill folder + output_dir: Optional output directory for the .skill file (defaults to current directory) + + Returns: + Path to the created .skill file, or None if error + """ + skill_path = Path(skill_path).resolve() + + # Validate skill folder exists + if not skill_path.exists(): + print(f"❌ Error: Skill folder not found: {skill_path}") + return None + + if not skill_path.is_dir(): + print(f"❌ Error: Path is not a directory: {skill_path}") + return None + + # Validate SKILL.md exists + skill_md = skill_path / "SKILL.md" + if not skill_md.exists(): + print(f"❌ Error: SKILL.md not found in {skill_path}") + return None + + # Run validation before packaging + print("🔍 Validating skill...") + valid, message = validate_skill(skill_path) + if not valid: + print(f"❌ Validation failed: {message}") + print(" Please fix the validation errors before packaging.") + return None + print(f"✅ {message}\n") + + # Determine output location + skill_name = skill_path.name + if output_dir: + output_path = Path(output_dir).resolve() + output_path.mkdir(parents=True, exist_ok=True) + else: + output_path = Path.cwd() + + skill_filename = output_path / f"{skill_name}.skill" + + # Create the .skill file (zip format) + try: + with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf: + # Walk through the skill directory, excluding build artifacts + for file_path in skill_path.rglob('*'): + if not file_path.is_file(): + continue + arcname = file_path.relative_to(skill_path.parent) + if should_exclude(arcname): + print(f" Skipped: {arcname}") + continue + zipf.write(file_path, arcname) + print(f" Added: {arcname}") + + print(f"\n✅ Successfully packaged skill to: {skill_filename}") + return skill_filename + + except Exception as e: + print(f"❌ Error creating .skill file: {e}") + return None + + +def main(): + if len(sys.argv) < 2: + print("Usage: python utils/package_skill.py [output-directory]") + print("\nExample:") + print(" python utils/package_skill.py skills/public/my-skill") + print(" python utils/package_skill.py skills/public/my-skill ./dist") + sys.exit(1) + + skill_path = sys.argv[1] + output_dir = sys.argv[2] if len(sys.argv) > 2 else None + + print(f"📦 Packaging skill: {skill_path}") + if output_dir: + print(f" Output directory: {output_dir}") + print() + + result = package_skill(skill_path, output_dir) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/quick_validate.py b/.agents/skills/skill-creator/scripts/quick_validate.py new file mode 100644 index 0000000..ed8e1dd --- /dev/null +++ b/.agents/skills/skill-creator/scripts/quick_validate.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Quick validation script for skills - minimal version +""" + +import sys +import os +import re +import yaml +from pathlib import Path + +def validate_skill(skill_path): + """Basic validation of a skill""" + skill_path = Path(skill_path) + + # Check SKILL.md exists + skill_md = skill_path / 'SKILL.md' + if not skill_md.exists(): + return False, "SKILL.md not found" + + # Read and validate frontmatter + content = skill_md.read_text() + if not content.startswith('---'): + return False, "No YAML frontmatter found" + + # Extract frontmatter + match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if not match: + return False, "Invalid frontmatter format" + + frontmatter_text = match.group(1) + + # Parse YAML frontmatter + try: + frontmatter = yaml.safe_load(frontmatter_text) + if not isinstance(frontmatter, dict): + return False, "Frontmatter must be a YAML dictionary" + except yaml.YAMLError as e: + return False, f"Invalid YAML in frontmatter: {e}" + + # Define allowed properties + ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata', 'compatibility'} + + # Check for unexpected properties (excluding nested keys under metadata) + unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES + if unexpected_keys: + return False, ( + f"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. " + f"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}" + ) + + # Check required fields + if 'name' not in frontmatter: + return False, "Missing 'name' in frontmatter" + if 'description' not in frontmatter: + return False, "Missing 'description' in frontmatter" + + # Extract name for validation + name = frontmatter.get('name', '') + if not isinstance(name, str): + return False, f"Name must be a string, got {type(name).__name__}" + name = name.strip() + if name: + # Check naming convention (kebab-case: lowercase with hyphens) + if not re.match(r'^[a-z0-9-]+$', name): + return False, f"Name '{name}' should be kebab-case (lowercase letters, digits, and hyphens only)" + if name.startswith('-') or name.endswith('-') or '--' in name: + return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens" + # Check name length (max 64 characters per spec) + if len(name) > 64: + return False, f"Name is too long ({len(name)} characters). Maximum is 64 characters." + + # Extract and validate description + description = frontmatter.get('description', '') + if not isinstance(description, str): + return False, f"Description must be a string, got {type(description).__name__}" + description = description.strip() + if description: + # Check for angle brackets + if '<' in description or '>' in description: + return False, "Description cannot contain angle brackets (< or >)" + # Check description length (max 1024 characters per spec) + if len(description) > 1024: + return False, f"Description is too long ({len(description)} characters). Maximum is 1024 characters." + + # Validate compatibility field if present (optional) + compatibility = frontmatter.get('compatibility', '') + if compatibility: + if not isinstance(compatibility, str): + return False, f"Compatibility must be a string, got {type(compatibility).__name__}" + if len(compatibility) > 500: + return False, f"Compatibility is too long ({len(compatibility)} characters). Maximum is 500 characters." + + return True, "Skill is valid!" + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python quick_validate.py ") + sys.exit(1) + + valid, message = validate_skill(sys.argv[1]) + print(message) + sys.exit(0 if valid else 1) \ No newline at end of file diff --git a/.agents/skills/skill-creator/scripts/run_eval.py b/.agents/skills/skill-creator/scripts/run_eval.py new file mode 100644 index 0000000..e58c70b --- /dev/null +++ b/.agents/skills/skill-creator/scripts/run_eval.py @@ -0,0 +1,310 @@ +#!/usr/bin/env python3 +"""Run trigger evaluation for a skill description. + +Tests whether a skill's description causes Claude to trigger (read the skill) +for a set of queries. Outputs results as JSON. +""" + +import argparse +import json +import os +import select +import subprocess +import sys +import time +import uuid +from concurrent.futures import ProcessPoolExecutor, as_completed +from pathlib import Path + +from scripts.utils import parse_skill_md + + +def find_project_root() -> Path: + """Find the project root by walking up from cwd looking for .claude/. + + Mimics how Claude Code discovers its project root, so the command file + we create ends up where claude -p will look for it. + """ + current = Path.cwd() + for parent in [current, *current.parents]: + if (parent / ".claude").is_dir(): + return parent + return current + + +def run_single_query( + query: str, + skill_name: str, + skill_description: str, + timeout: int, + project_root: str, + model: str | None = None, +) -> bool: + """Run a single query and return whether the skill was triggered. + + Creates a command file in .claude/commands/ so it appears in Claude's + available_skills list, then runs `claude -p` with the raw query. + Uses --include-partial-messages to detect triggering early from + stream events (content_block_start) rather than waiting for the + full assistant message, which only arrives after tool execution. + """ + unique_id = uuid.uuid4().hex[:8] + clean_name = f"{skill_name}-skill-{unique_id}" + project_commands_dir = Path(project_root) / ".claude" / "commands" + command_file = project_commands_dir / f"{clean_name}.md" + + try: + project_commands_dir.mkdir(parents=True, exist_ok=True) + # Use YAML block scalar to avoid breaking on quotes in description + indented_desc = "\n ".join(skill_description.split("\n")) + command_content = ( + f"---\n" + f"description: |\n" + f" {indented_desc}\n" + f"---\n\n" + f"# {skill_name}\n\n" + f"This skill handles: {skill_description}\n" + ) + command_file.write_text(command_content) + + cmd = [ + "claude", + "-p", query, + "--output-format", "stream-json", + "--verbose", + "--include-partial-messages", + ] + if model: + cmd.extend(["--model", model]) + + # Remove CLAUDECODE env var to allow nesting claude -p inside a + # Claude Code session. The guard is for interactive terminal conflicts; + # programmatic subprocess usage is safe. + env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"} + + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=project_root, + env=env, + ) + + triggered = False + start_time = time.time() + buffer = "" + # Track state for stream event detection + pending_tool_name = None + accumulated_json = "" + + try: + while time.time() - start_time < timeout: + if process.poll() is not None: + remaining = process.stdout.read() + if remaining: + buffer += remaining.decode("utf-8", errors="replace") + break + + ready, _, _ = select.select([process.stdout], [], [], 1.0) + if not ready: + continue + + chunk = os.read(process.stdout.fileno(), 8192) + if not chunk: + break + buffer += chunk.decode("utf-8", errors="replace") + + while "\n" in buffer: + line, buffer = buffer.split("\n", 1) + line = line.strip() + if not line: + continue + + try: + event = json.loads(line) + except json.JSONDecodeError: + continue + + # Early detection via stream events + if event.get("type") == "stream_event": + se = event.get("event", {}) + se_type = se.get("type", "") + + if se_type == "content_block_start": + cb = se.get("content_block", {}) + if cb.get("type") == "tool_use": + tool_name = cb.get("name", "") + if tool_name in ("Skill", "Read"): + pending_tool_name = tool_name + accumulated_json = "" + else: + return False + + elif se_type == "content_block_delta" and pending_tool_name: + delta = se.get("delta", {}) + if delta.get("type") == "input_json_delta": + accumulated_json += delta.get("partial_json", "") + if clean_name in accumulated_json: + return True + + elif se_type in ("content_block_stop", "message_stop"): + if pending_tool_name: + return clean_name in accumulated_json + if se_type == "message_stop": + return False + + # Fallback: full assistant message + elif event.get("type") == "assistant": + message = event.get("message", {}) + for content_item in message.get("content", []): + if content_item.get("type") != "tool_use": + continue + tool_name = content_item.get("name", "") + tool_input = content_item.get("input", {}) + if tool_name == "Skill" and clean_name in tool_input.get("skill", ""): + triggered = True + elif tool_name == "Read" and clean_name in tool_input.get("file_path", ""): + triggered = True + return triggered + + elif event.get("type") == "result": + return triggered + finally: + # Clean up process on any exit path (return, exception, timeout) + if process.poll() is None: + process.kill() + process.wait() + + return triggered + finally: + if command_file.exists(): + command_file.unlink() + + +def run_eval( + eval_set: list[dict], + skill_name: str, + description: str, + num_workers: int, + timeout: int, + project_root: Path, + runs_per_query: int = 1, + trigger_threshold: float = 0.5, + model: str | None = None, +) -> dict: + """Run the full eval set and return results.""" + results = [] + + with ProcessPoolExecutor(max_workers=num_workers) as executor: + future_to_info = {} + for item in eval_set: + for run_idx in range(runs_per_query): + future = executor.submit( + run_single_query, + item["query"], + skill_name, + description, + timeout, + str(project_root), + model, + ) + future_to_info[future] = (item, run_idx) + + query_triggers: dict[str, list[bool]] = {} + query_items: dict[str, dict] = {} + for future in as_completed(future_to_info): + item, _ = future_to_info[future] + query = item["query"] + query_items[query] = item + if query not in query_triggers: + query_triggers[query] = [] + try: + query_triggers[query].append(future.result()) + except Exception as e: + print(f"Warning: query failed: {e}", file=sys.stderr) + query_triggers[query].append(False) + + for query, triggers in query_triggers.items(): + item = query_items[query] + trigger_rate = sum(triggers) / len(triggers) + should_trigger = item["should_trigger"] + if should_trigger: + did_pass = trigger_rate >= trigger_threshold + else: + did_pass = trigger_rate < trigger_threshold + results.append({ + "query": query, + "should_trigger": should_trigger, + "trigger_rate": trigger_rate, + "triggers": sum(triggers), + "runs": len(triggers), + "pass": did_pass, + }) + + passed = sum(1 for r in results if r["pass"]) + total = len(results) + + return { + "skill_name": skill_name, + "description": description, + "results": results, + "summary": { + "total": total, + "passed": passed, + "failed": total - passed, + }, + } + + +def main(): + parser = argparse.ArgumentParser(description="Run trigger evaluation for a skill description") + parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--description", default=None, help="Override description to test") + parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers") + parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds") + parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query") + parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold") + parser.add_argument("--model", default=None, help="Model to use for claude -p (default: user's configured model)") + parser.add_argument("--verbose", action="store_true", help="Print progress to stderr") + args = parser.parse_args() + + eval_set = json.loads(Path(args.eval_set).read_text()) + skill_path = Path(args.skill_path) + + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + name, original_description, content = parse_skill_md(skill_path) + description = args.description or original_description + project_root = find_project_root() + + if args.verbose: + print(f"Evaluating: {description}", file=sys.stderr) + + output = run_eval( + eval_set=eval_set, + skill_name=name, + description=description, + num_workers=args.num_workers, + timeout=args.timeout, + project_root=project_root, + runs_per_query=args.runs_per_query, + trigger_threshold=args.trigger_threshold, + model=args.model, + ) + + if args.verbose: + summary = output["summary"] + print(f"Results: {summary['passed']}/{summary['total']} passed", file=sys.stderr) + for r in output["results"]: + status = "PASS" if r["pass"] else "FAIL" + rate_str = f"{r['triggers']}/{r['runs']}" + print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:70]}", file=sys.stderr) + + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/run_loop.py b/.agents/skills/skill-creator/scripts/run_loop.py new file mode 100644 index 0000000..30a263d --- /dev/null +++ b/.agents/skills/skill-creator/scripts/run_loop.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +"""Run the eval + improve loop until all pass or max iterations reached. + +Combines run_eval.py and improve_description.py in a loop, tracking history +and returning the best description found. Supports train/test split to prevent +overfitting. +""" + +import argparse +import json +import random +import sys +import tempfile +import time +import webbrowser +from pathlib import Path + +from scripts.generate_report import generate_html +from scripts.improve_description import improve_description +from scripts.run_eval import find_project_root, run_eval +from scripts.utils import parse_skill_md + + +def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]: + """Split eval set into train and test sets, stratified by should_trigger.""" + random.seed(seed) + + # Separate by should_trigger + trigger = [e for e in eval_set if e["should_trigger"]] + no_trigger = [e for e in eval_set if not e["should_trigger"]] + + # Shuffle each group + random.shuffle(trigger) + random.shuffle(no_trigger) + + # Calculate split points + n_trigger_test = max(1, int(len(trigger) * holdout)) + n_no_trigger_test = max(1, int(len(no_trigger) * holdout)) + + # Split + test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test] + train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:] + + return train_set, test_set + + +def run_loop( + eval_set: list[dict], + skill_path: Path, + description_override: str | None, + num_workers: int, + timeout: int, + max_iterations: int, + runs_per_query: int, + trigger_threshold: float, + holdout: float, + model: str, + verbose: bool, + live_report_path: Path | None = None, + log_dir: Path | None = None, +) -> dict: + """Run the eval + improvement loop.""" + project_root = find_project_root() + name, original_description, content = parse_skill_md(skill_path) + current_description = description_override or original_description + + # Split into train/test if holdout > 0 + if holdout > 0: + train_set, test_set = split_eval_set(eval_set, holdout) + if verbose: + print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr) + else: + train_set = eval_set + test_set = [] + + history = [] + exit_reason = "unknown" + + for iteration in range(1, max_iterations + 1): + if verbose: + print(f"\n{'='*60}", file=sys.stderr) + print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr) + print(f"Description: {current_description}", file=sys.stderr) + print(f"{'='*60}", file=sys.stderr) + + # Evaluate train + test together in one batch for parallelism + all_queries = train_set + test_set + t0 = time.time() + all_results = run_eval( + eval_set=all_queries, + skill_name=name, + description=current_description, + num_workers=num_workers, + timeout=timeout, + project_root=project_root, + runs_per_query=runs_per_query, + trigger_threshold=trigger_threshold, + model=model, + ) + eval_elapsed = time.time() - t0 + + # Split results back into train/test by matching queries + train_queries_set = {q["query"] for q in train_set} + train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set] + test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set] + + train_passed = sum(1 for r in train_result_list if r["pass"]) + train_total = len(train_result_list) + train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total} + train_results = {"results": train_result_list, "summary": train_summary} + + if test_set: + test_passed = sum(1 for r in test_result_list if r["pass"]) + test_total = len(test_result_list) + test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total} + test_results = {"results": test_result_list, "summary": test_summary} + else: + test_results = None + test_summary = None + + history.append({ + "iteration": iteration, + "description": current_description, + "train_passed": train_summary["passed"], + "train_failed": train_summary["failed"], + "train_total": train_summary["total"], + "train_results": train_results["results"], + "test_passed": test_summary["passed"] if test_summary else None, + "test_failed": test_summary["failed"] if test_summary else None, + "test_total": test_summary["total"] if test_summary else None, + "test_results": test_results["results"] if test_results else None, + # For backward compat with report generator + "passed": train_summary["passed"], + "failed": train_summary["failed"], + "total": train_summary["total"], + "results": train_results["results"], + }) + + # Write live report if path provided + if live_report_path: + partial_output = { + "original_description": original_description, + "best_description": current_description, + "best_score": "in progress", + "iterations_run": len(history), + "holdout": holdout, + "train_size": len(train_set), + "test_size": len(test_set), + "history": history, + } + live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name)) + + if verbose: + def print_eval_stats(label, results, elapsed): + pos = [r for r in results if r["should_trigger"]] + neg = [r for r in results if not r["should_trigger"]] + tp = sum(r["triggers"] for r in pos) + pos_runs = sum(r["runs"] for r in pos) + fn = pos_runs - tp + fp = sum(r["triggers"] for r in neg) + neg_runs = sum(r["runs"] for r in neg) + tn = neg_runs - fp + total = tp + tn + fp + fn + precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0 + accuracy = (tp + tn) / total if total > 0 else 0.0 + print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr) + for r in results: + status = "PASS" if r["pass"] else "FAIL" + rate_str = f"{r['triggers']}/{r['runs']}" + print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr) + + print_eval_stats("Train", train_results["results"], eval_elapsed) + if test_summary: + print_eval_stats("Test ", test_results["results"], 0) + + if train_summary["failed"] == 0: + exit_reason = f"all_passed (iteration {iteration})" + if verbose: + print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr) + break + + if iteration == max_iterations: + exit_reason = f"max_iterations ({max_iterations})" + if verbose: + print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr) + break + + # Improve the description based on train results + if verbose: + print(f"\nImproving description...", file=sys.stderr) + + t0 = time.time() + # Strip test scores from history so improvement model can't see them + blinded_history = [ + {k: v for k, v in h.items() if not k.startswith("test_")} + for h in history + ] + new_description = improve_description( + skill_name=name, + skill_content=content, + current_description=current_description, + eval_results=train_results, + history=blinded_history, + model=model, + log_dir=log_dir, + iteration=iteration, + ) + improve_elapsed = time.time() - t0 + + if verbose: + print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr) + + current_description = new_description + + # Find the best iteration by TEST score (or train if no test set) + if test_set: + best = max(history, key=lambda h: h["test_passed"] or 0) + best_score = f"{best['test_passed']}/{best['test_total']}" + else: + best = max(history, key=lambda h: h["train_passed"]) + best_score = f"{best['train_passed']}/{best['train_total']}" + + if verbose: + print(f"\nExit reason: {exit_reason}", file=sys.stderr) + print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr) + + return { + "exit_reason": exit_reason, + "original_description": original_description, + "best_description": best["description"], + "best_score": best_score, + "best_train_score": f"{best['train_passed']}/{best['train_total']}", + "best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None, + "final_description": current_description, + "iterations_run": len(history), + "holdout": holdout, + "train_size": len(train_set), + "test_size": len(test_set), + "history": history, + } + + +def main(): + parser = argparse.ArgumentParser(description="Run eval + improve loop") + parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--description", default=None, help="Override starting description") + parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers") + parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds") + parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations") + parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query") + parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold") + parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)") + parser.add_argument("--model", required=True, help="Model for improvement") + parser.add_argument("--verbose", action="store_true", help="Print progress to stderr") + parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)") + parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here") + args = parser.parse_args() + + eval_set = json.loads(Path(args.eval_set).read_text()) + skill_path = Path(args.skill_path) + + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + name, _, _ = parse_skill_md(skill_path) + + # Set up live report path + if args.report != "none": + if args.report == "auto": + timestamp = time.strftime("%Y%m%d_%H%M%S") + live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html" + else: + live_report_path = Path(args.report) + # Open the report immediately so the user can watch + live_report_path.write_text("

Starting optimization loop...

") + webbrowser.open(str(live_report_path)) + else: + live_report_path = None + + # Determine output directory (create before run_loop so logs can be written) + if args.results_dir: + timestamp = time.strftime("%Y-%m-%d_%H%M%S") + results_dir = Path(args.results_dir) / timestamp + results_dir.mkdir(parents=True, exist_ok=True) + else: + results_dir = None + + log_dir = results_dir / "logs" if results_dir else None + + output = run_loop( + eval_set=eval_set, + skill_path=skill_path, + description_override=args.description, + num_workers=args.num_workers, + timeout=args.timeout, + max_iterations=args.max_iterations, + runs_per_query=args.runs_per_query, + trigger_threshold=args.trigger_threshold, + holdout=args.holdout, + model=args.model, + verbose=args.verbose, + live_report_path=live_report_path, + log_dir=log_dir, + ) + + # Save JSON output + json_output = json.dumps(output, indent=2) + print(json_output) + if results_dir: + (results_dir / "results.json").write_text(json_output) + + # Write final HTML report (without auto-refresh) + if live_report_path: + live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name)) + print(f"\nReport: {live_report_path}", file=sys.stderr) + + if results_dir and live_report_path: + (results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name)) + + if results_dir: + print(f"Results saved to: {results_dir}", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/utils.py b/.agents/skills/skill-creator/scripts/utils.py new file mode 100644 index 0000000..51b6a07 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/utils.py @@ -0,0 +1,47 @@ +"""Shared utilities for skill-creator scripts.""" + +from pathlib import Path + + + +def parse_skill_md(skill_path: Path) -> tuple[str, str, str]: + """Parse a SKILL.md file, returning (name, description, full_content).""" + content = (skill_path / "SKILL.md").read_text() + lines = content.split("\n") + + if lines[0].strip() != "---": + raise ValueError("SKILL.md missing frontmatter (no opening ---)") + + end_idx = None + for i, line in enumerate(lines[1:], start=1): + if line.strip() == "---": + end_idx = i + break + + if end_idx is None: + raise ValueError("SKILL.md missing frontmatter (no closing ---)") + + name = "" + description = "" + frontmatter_lines = lines[1:end_idx] + i = 0 + while i < len(frontmatter_lines): + line = frontmatter_lines[i] + if line.startswith("name:"): + name = line[len("name:"):].strip().strip('"').strip("'") + elif line.startswith("description:"): + value = line[len("description:"):].strip() + # Handle YAML multiline indicators (>, |, >-, |-) + if value in (">", "|", ">-", "|-"): + continuation_lines: list[str] = [] + i += 1 + while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(" ") or frontmatter_lines[i].startswith("\t")): + continuation_lines.append(frontmatter_lines[i].strip()) + i += 1 + description = " ".join(continuation_lines) + continue + else: + description = value.strip('"').strip("'") + i += 1 + + return name, description, content diff --git a/.agents/skills/tanstack-form/SKILL.md b/.agents/skills/tanstack-form/SKILL.md new file mode 100644 index 0000000..7bba8e5 --- /dev/null +++ b/.agents/skills/tanstack-form/SKILL.md @@ -0,0 +1,417 @@ +--- +name: tanstack-form +description: Headless, performant, and type-safe form state management for TS/JS, React, Vue, Angular, Solid, Lit, and Svelte. +--- + +## Overview + +TanStack Form is a headless form library with deep TypeScript integration. It provides field-level and form-level validation (sync/async), array fields, linked/dependent fields, fine-grained reactivity, and schema validation adapter support (Zod, Valibot, Yup). + +**Package:** `@tanstack/react-form` +**Adapters:** `@tanstack/zod-form-adapter`, `@tanstack/valibot-form-adapter` +**Status:** Stable (v1) + +## Installation + +```bash +npm install @tanstack/react-form +# Optional schema adapters: +npm install @tanstack/zod-form-adapter zod +npm install @tanstack/valibot-form-adapter valibot +``` + +## Core: useForm + +```tsx +import { useForm } from '@tanstack/react-form'; + +function MyForm() { + const form = useForm({ + defaultValues: { + firstName: '', + lastName: '', + email: '', + age: 0 + }, + onSubmit: async ({ value }) => { + // value is fully typed + await submitToServer(value); + }, + onSubmitInvalid: ({ value, formApi }) => { + console.log('Validation failed:', formApi.state.errors); + } + }); + + return ( + { + e.preventDefault(); + e.stopPropagation(); + form.handleSubmit(); + }} + > + {/* Fields */} + ({ canSubmit: state.canSubmit, isSubmitting: state.isSubmitting })} + children={({ canSubmit, isSubmitting }) => ( + + )} + /> + + ); +} +``` + +## Fields (form.Field) + +```tsx + + value.length < 3 ? 'Must be at least 3 characters' : undefined, + }} + children={(field) => ( +
+ + field.handleChange(e.target.value)} + /> + {field.state.meta.isTouched && field.state.meta.errors.length > 0 && ( + {field.state.meta.errors.join(', ')} + )} +
+ )} +/> + + + + {(field) => ( + field.handleChange(e.target.value)} + onBlur={field.handleBlur} + /> + )} + +``` + +## Validation + +### Validation Timing + +| Cause | When | +| ---------- | ------------------------ | +| `onChange` | After every value change | +| `onBlur` | When field loses focus | +| `onSubmit` | During submission | +| `onMount` | When field mounts | + +### Synchronous Validation + +```tsx + { + if (value < 18) return 'Must be 18 or older'; + return undefined; // undefined = valid + }, + onBlur: ({ value }) => { + if (!value) return 'Required'; + return undefined; + } + }} +/> +``` + +### Asynchronous Validation + +```tsx + { + const res = await fetch(`/api/check-username?q=${value}`); + const { available } = await res.json(); + if (!available) return 'Username taken'; + return undefined; + } + }} +> + {(field) => ( + <> + field.handleChange(e.target.value)} /> + {field.state.meta.isValidating && Checking...} + + )} + +``` + +### Schema Validation (Zod) + +```tsx +import { zodValidator } from '@tanstack/zod-form-adapter' +import { z } from 'zod' + +const form = useForm({ + defaultValues: { email: '', age: 0 }, + validatorAdapter: zodValidator(), + onSubmit: async ({ value }) => { /* ... */ }, +}) + + + + +``` + +### Form-Level Validation + +```tsx +const form = useForm({ + defaultValues: { password: '', confirmPassword: '' }, + validators: { + onChange: ({ value }) => { + if (value.password !== value.confirmPassword) { + return 'Passwords do not match'; + } + return undefined; + } + } +}); +``` + +### Linked/Dependent Fields + +```tsx + { + const password = fieldApi.form.getFieldValue('password'); + if (value !== password) return 'Passwords do not match'; + return undefined; + } + }} +/> +``` + +## Array Fields + +```tsx + + {(field) => ( +
+ {field.state.value.map((_, index) => ( +
+ + {(subField) => ( + subField.handleChange(e.target.value)} + /> + )} + + +
+ ))} + +
+ )} +
+``` + +### Array Methods + +```typescript +field.pushValue(item); // Add to end +field.insertValue(index, item); // Insert at index +field.replaceValue(index, item); // Replace at index +field.removeValue(index); // Remove at index +field.swapValues(indexA, indexB); // Swap positions +field.moveValue(from, to); // Move position +``` + +## Listeners (Side Effects) + +```tsx + { + // Side effect: reset dependent fields + form.setFieldValue('state', ''); + form.setFieldValue('postalCode', ''); + } + }} +/> +``` + +## Reactivity (form.Subscribe & useStore) + +```tsx +// Render-prop subscription (fine-grained) + ({ canSubmit: state.canSubmit, isDirty: state.isDirty })} + children={({ canSubmit, isDirty }) => ( +
+ {isDirty && Unsaved changes} + +
+ )} +/>; + +// Hook-based subscription +function FormStatus() { + const isValid = form.useStore((s) => s.isValid); + return isValid ? null :

Fix errors

; +} +``` + +## Form State + +```typescript +interface FormState { + values: TFormData; + errors: ValidationError[]; + errorMap: Record; + isFormValid: boolean; + isFieldsValid: boolean; + isValid: boolean; // isFormValid && isFieldsValid + isTouched: boolean; + isPristine: boolean; + isDirty: boolean; + isSubmitting: boolean; + isSubmitted: boolean; + isSubmitSuccessful: boolean; + submissionAttempts: number; + canSubmit: boolean; // isValid && !isSubmitting +} +``` + +## Field State + +```typescript +interface FieldState { + value: TData; + meta: { + isTouched: boolean; + isDirty: boolean; + isPristine: boolean; + isValidating: boolean; + errors: ValidationError[]; + errorMap: Record; + }; +} +``` + +## FormApi Methods + +```typescript +form.handleSubmit(); +form.reset(); +form.getFieldValue(field); +form.setFieldValue(field, value); +form.getFieldMeta(field); +form.setFieldMeta(field, updater); +form.validateAllFields(cause); +form.validateField(field, cause); +form.deleteField(field); +``` + +## Shared Form Options (formOptions) + +```tsx +import { formOptions } from '@tanstack/react-form'; + +const sharedOpts = formOptions({ + defaultValues: { firstName: '', lastName: '' } +}); + +// Reuse across components +const form = useForm({ + ...sharedOpts, + onSubmit: async ({ value }) => { + /* ... */ + } +}); +``` + +## Server-Side Validation + +```tsx +// TanStack Start / Next.js server action +import { ServerValidateError } from '@tanstack/react-form/nextjs'; + +export async function validateForm(data: FormData) { + const email = data.get('email') as string; + if (await checkEmailExists(email)) { + throw new ServerValidateError({ + form: 'Submission failed', + fields: { email: 'Email already registered' } + }); + } +} +``` + +## TypeScript Integration + +```tsx +// Type-safe field paths with DeepKeys +interface UserForm { + name: string + address: { street: string; city: string } + tags: string[] + contacts: Array<{ name: string; phone: string }> +} + +// TypeScript auto-completes all valid paths: +// 'name', 'address', 'address.street', 'address.city', 'tags', 'contacts' + // OK + // Type Error! +``` + +## Best Practices + +1. **Always call `e.preventDefault()` and `e.stopPropagation()`** on form submit +2. **Always attach `onBlur={field.handleBlur}`** for blur validation and isTouched tracking +3. **Use `mode="array"`** for array fields to get array methods +4. **Return `undefined`** (not null/false) for valid validators +5. **Use `asyncDebounceMs`** for async validators to prevent API spam +6. **Check `isTouched` before showing errors** for better UX +7. **Use `form.Subscribe` with selectors** to minimize re-renders +8. **Use `formOptions`** for shared configuration across components +9. **Use schema validators** (Zod/Valibot) for complex validation rules +10. **Use `onChangeListenTo`** for cross-field validation dependencies + +## Common Pitfalls + +- Forgetting `e.preventDefault()` on form submit (causes page reload) +- Not attaching `onBlur` to inputs (breaks blur validation and isTouched) +- Returning `null` or `false` instead of `undefined` for valid fields +- Using `mode="array"` incorrectly (only needed on the array field itself, not sub-fields) +- Subscribing to entire form state instead of using selectors (unnecessary re-renders) +- Not using `asyncDebounceMs` with async validators (fires on every keystroke) diff --git a/.agents/skills/tanstack-query/SKILL.md b/.agents/skills/tanstack-query/SKILL.md new file mode 100644 index 0000000..44d85f0 --- /dev/null +++ b/.agents/skills/tanstack-query/SKILL.md @@ -0,0 +1,472 @@ +--- +name: tanstack-query +description: TanStack Query v5 data fetching patterns including useSuspenseQuery, useQuery, mutations, cache management, and API service integration. Use when fetching data, managing server state, or working with TanStack Query hooks. +--- + +# TanStack Query Patterns + +## Purpose + +Modern data fetching with TanStack Query v5 (latest: 5.90.5, November 2025), emphasizing Suspense-based queries, cache-first strategies, and centralized API services. + +**Note**: v5 (released October 2023) has breaking changes from v4: + +- `isLoading` → `isPending` for status +- `cacheTime` → `gcTime` (garbage collection time) +- React 18.0+ required +- Callbacks removed from useQuery (onError, onSuccess, onSettled) +- `keepPreviousData` replaced with `placeholderData` function + +## When to Use This Skill + +- Fetching data with TanStack Query +- Using useSuspenseQuery or useQuery +- Managing mutations +- Cache invalidation and updates +- API service patterns + +--- + +## Quick Start + +### Primary Pattern: useSuspenseQuery + +For **all new components**, use `useSuspenseQuery`: + +```typescript +import { useSuspenseQuery } from '@tanstack/react-query'; +import { postsApi } from '~/features/posts/api/postsApi'; + +function PostList() { + const { data: posts } = useSuspenseQuery({ + queryKey: ['posts'], + queryFn: postsApi.getAll, + }); + + return ( +
+ {posts.map(post => ( + + ))} +
+ ); +} + +// Wrap with Suspense +}> + + +``` + +**Benefits:** + +- No `isLoading` checks needed +- Integrates with Suspense boundaries +- Cleaner component code +- Consistent loading UX + +--- + +## useSuspenseQuery Patterns + +### Basic Usage + +```typescript +const { data } = useSuspenseQuery({ + queryKey: ['user', userId], + queryFn: () => userApi.get(userId), +}); + +// data is never undefined - guaranteed by Suspense +return
{data.name}
; +``` + +### With Parameters + +```typescript +function UserPosts({ userId }: { userId: string }) { + const { data: posts } = useSuspenseQuery({ + queryKey: ['users', userId, 'posts'], + queryFn: () => postsApi.getByUser(userId), + }); + + return
{posts.length} posts
; +} +``` + +### Dependent Queries + +```typescript +function PostDetails({ postId }: { postId: string }) { + // First query + const { data: post } = useSuspenseQuery({ + queryKey: ['posts', postId], + queryFn: () => postsApi.get(postId), + }); + + // Second query depends on first + const { data: author } = useSuspenseQuery({ + queryKey: ['users', post.authorId], + queryFn: () => userApi.get(post.authorId), + }); + + return
{author.name} wrote {post.title}
; +} +``` + +--- + +## useQuery (Legacy Pattern) + +Use `useQuery` only when you need loading/error states in the component: + +```typescript +import { useQuery } from '@tanstack/react-query'; + +function Component() { + const { data, isPending, error } = useQuery({ + queryKey: ['posts'], + queryFn: postsApi.getAll, + }); + + if (isPending) return ; + if (error) return ; + + return
{data.map(...)}
; +} +``` + +**When to use `useQuery` vs `useSuspenseQuery`:** + +- Use `useSuspenseQuery` by default (preferred) +- Use `useQuery` only when you need component-level loading states +- Most cases should use `useSuspenseQuery` + Suspense boundaries + +--- + +## Mutations + +### Basic Mutation + +```typescript +import { useMutation, useQueryClient } from '@tanstack/react-query'; + +function CreatePostButton() { + const queryClient = useQueryClient(); + + const mutation = useMutation({ + mutationFn: postsApi.create, + onSuccess: () => { + // Invalidate and refetch + queryClient.invalidateQueries({ queryKey: ['posts'] }); + }, + }); + + const handleCreate = () => { + mutation.mutate({ + title: 'New Post', + content: 'Content here', + }); + }; + + return ( + + ); +} +``` + +### Optimistic Updates + +```typescript +const mutation = useMutation({ + mutationFn: postsApi.update, + onMutate: async (updatedPost) => { + // Cancel outgoing refetches + await queryClient.cancelQueries({ queryKey: ['posts', updatedPost.id] }); + + // Snapshot previous value + const previousPost = queryClient.getQueryData(['posts', updatedPost.id]); + + // Optimistically update + queryClient.setQueryData(['posts', updatedPost.id], updatedPost); + + // Return context with snapshot + return { previousPost }; + }, + onError: (err, updatedPost, context) => { + // Rollback on error + queryClient.setQueryData(['posts', updatedPost.id], context.previousPost); + }, + onSettled: (data, error, variables) => { + // Refetch after mutation + queryClient.invalidateQueries({ queryKey: ['posts', variables.id] }); + } +}); +``` + +--- + +## Cache Management + +### Invalidation + +```typescript +import { useQueryClient } from '@tanstack/react-query'; + +const queryClient = useQueryClient(); + +// Invalidate all posts queries +queryClient.invalidateQueries({ queryKey: ['posts'] }); + +// Invalidate specific post +queryClient.invalidateQueries({ queryKey: ['posts', postId] }); + +// Invalidate all queries +queryClient.invalidateQueries(); +``` + +### Manual Updates + +```typescript +// Update cache directly +queryClient.setQueryData(['posts', postId], newPost); + +// Update with function +queryClient.setQueryData(['posts'], (oldPosts) => [...oldPosts, newPost]); +``` + +### Prefetching + +```typescript +// Prefetch data +await queryClient.prefetchQuery({ + queryKey: ['posts', postId], + queryFn: () => postsApi.get(postId), +}); + +// In a component +const prefetchPost = (postId: string) => { + queryClient.prefetchQuery({ + queryKey: ['posts', postId], + queryFn: () => postsApi.get(postId), + }); +}; + + prefetchPost(post.id)} +> + {post.title} + +``` + +--- + +## API Service Pattern + +### Centralized API Service + +```typescript +// features/posts/api/postsApi.ts +import { apiClient } from '@/lib/apiClient'; +import type { Post, CreatePostDto, UpdatePostDto } from '~/types/post'; + +export const postsApi = { + getAll: async (): Promise => { + const response = await apiClient.get('/posts'); + return response.data; + }, + + get: async (id: string): Promise => { + const response = await apiClient.get(`/posts/${id}`); + return response.data; + }, + + create: async (data: CreatePostDto): Promise => { + const response = await apiClient.post('/posts', data); + return response.data; + }, + + update: async (id: string, data: UpdatePostDto): Promise => { + const response = await apiClient.put(`/posts/${id}`, data); + return response.data; + }, + + delete: async (id: string): Promise => { + await apiClient.delete(`/posts/${id}`); + }, + + getByUser: async (userId: string): Promise => { + const response = await apiClient.get(`/users/${userId}/posts`); + return response.data; + } +}; +``` + +### Usage in Components + +```typescript +import { postsApi } from '~/features/posts/api/postsApi'; + +// In query +const { data } = useSuspenseQuery({ + queryKey: ['posts'], + queryFn: postsApi.getAll +}); + +// In mutation +const mutation = useMutation({ + mutationFn: postsApi.create +}); +``` + +--- + +## Query Keys + +### Key Structure + +```typescript +// List queries +['posts'][('posts', { status: 'published' })][ // All posts // Filtered posts + // Detail queries + ('posts', postId) +][('posts', postId, 'comments')][ // Single post // Post comments + // Nested resources + ('users', userId, 'posts') +][('users', userId, 'posts', postId)]; // User's posts // Specific user post +``` + +### Key Factories + +```typescript +// features/posts/api/postKeys.ts +export const postKeys = { + all: ['posts'] as const, + lists: () => [...postKeys.all, 'list'] as const, + list: (filters: string) => [...postKeys.lists(), { filters }] as const, + details: () => [...postKeys.all, 'detail'] as const, + detail: (id: string) => [...postKeys.details(), id] as const, + comments: (id: string) => [...postKeys.detail(id), 'comments'] as const +}; + +// Usage +const { data } = useSuspenseQuery({ + queryKey: postKeys.detail(postId), + queryFn: () => postsApi.get(postId) +}); + +// Invalidate all post lists +queryClient.invalidateQueries({ queryKey: postKeys.lists() }); +``` + +--- + +## Error Handling + +### With Error Boundaries + +```typescript +import { ErrorBoundary } from 'react-error-boundary'; + +}> + }> + + + + +// In component +function DataComponent() { + const { data } = useSuspenseQuery({ + queryKey: ['data'], + queryFn: fetchData, + // Errors automatically caught by ErrorBoundary + }); + + return
{data}
; +} +``` + +### Retry and Cache Configuration + +```typescript +const { data } = useQuery({ + queryKey: ['posts'], + queryFn: postsApi.getAll, + retry: 3, // Retry 3 times + retryDelay: 1000, // Wait 1s between retries + gcTime: 5 * 60 * 1000 // Garbage collection time: 5 minutes (v5: was 'cacheTime') +}); +``` + +--- + +## Best Practices + +### 1. Use Suspense by Default + +```typescript +// ✅ Good: useSuspenseQuery + Suspense +}> + + + +function DataComponent() { + const { data } = useSuspenseQuery({...}); + return
{data}
; +} + +// ❌ Avoid: useQuery with manual loading +function DataComponent() { + const { data, isPending } = useQuery({...}); + if (isPending) return ; + return
{data}
; +} +``` + +### 2. Consistent Query Keys + +```typescript +// ✅ Good: Use key factories +const { data } = useSuspenseQuery({ + queryKey: postKeys.detail(id), + queryFn: () => postsApi.get(id) +}); + +// ❌ Avoid: Inconsistent keys +const { data } = useSuspenseQuery({ + queryKey: ['post', id], // Different format + queryFn: () => postsApi.get(id) +}); +``` + +### 3. Centralized API Services + +```typescript +// ✅ Good: API service +const { data } = useSuspenseQuery({ + queryKey: ['posts'], + queryFn: postsApi.getAll +}); + +// ❌ Avoid: Inline fetching +const { data } = useSuspenseQuery({ + queryKey: ['posts'], + queryFn: async () => { + const res = await fetch('/api/posts'); + return res.json(); + } +}); +``` + +--- + +## Additional Resources + +For more patterns, see: + +- [data-fetching.md](resources/data-fetching.md) - Advanced patterns +- [cache-strategies.md](resources/cache-strategies.md) - Cache management +- [mutation-patterns.md](resources/mutation-patterns.md) - Complex mutations diff --git a/.agents/skills/tanstack-query/resources/cache-strategies.md b/.agents/skills/tanstack-query/resources/cache-strategies.md new file mode 100644 index 0000000..d544c92 --- /dev/null +++ b/.agents/skills/tanstack-query/resources/cache-strategies.md @@ -0,0 +1,252 @@ +# Cache Management Strategies + +## Cache Time Configuration + +```typescript +const { data } = useQuery({ + queryKey: ['posts'], + queryFn: fetchPosts, + staleTime: 5 * 60 * 1000, // Consider fresh for 5 minutes + gcTime: 10 * 60 * 1000 // Keep in cache for 10 minutes (formerly cacheTime) +}); +``` + +## Cache Invalidation + +### Invalidate Specific Queries + +```typescript +const queryClient = useQueryClient(); + +// Invalidate all post queries +queryClient.invalidateQueries({ queryKey: ['posts'] }); + +// Invalidate specific post +queryClient.invalidateQueries({ queryKey: ['post', postId] }); + +// Invalidate with exact match +queryClient.invalidateQueries({ + queryKey: ['posts'], + exact: true // Only ['posts'], not ['posts', 'list'] +}); +``` + +### Invalidate on Mutation + +```typescript +const { mutate } = useMutation({ + mutationFn: createPost, + onSuccess: () => { + // Invalidate and refetch + queryClient.invalidateQueries({ queryKey: ['posts'] }); + } +}); +``` + +## Manual Cache Updates + +### Set Query Data + +```typescript +// Update cache directly +queryClient.setQueryData(['post', postId], (oldData) => ({ + ...oldData, + title: 'New Title' +})); + +// Set new data +queryClient.setQueryData(['post', postId], newPost); +``` + +### Get Query Data + +```typescript +// Read from cache +const cachedPost = queryClient.getQueryData(['post', postId]); + +// Use in initialData +const { data } = useQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId), + initialData: () => queryClient.getQueryData(['posts'])?.find((p) => p.id === postId) +}); +``` + +## Refetch Strategies + +### Refetch on Window Focus + +```typescript +const { data } = useQuery({ + queryKey: ['posts'], + queryFn: fetchPosts, + refetchOnWindowFocus: true // Refetch when tab regains focus +}); +``` + +### Refetch on Reconnect + +```typescript +const { data } = useQuery({ + queryKey: ['posts'], + queryFn: fetchPosts, + refetchOnReconnect: true // Refetch when internet reconnects +}); +``` + +### Refetch Intervals + +```typescript +const { data } = useQuery({ + queryKey: ['live-data'], + queryFn: fetchLiveData, + refetchInterval: 5000, // Refetch every 5 seconds + refetchIntervalInBackground: false // Pause when tab not active +}); +``` + +## Cache Persistence + +### Persist to localStorage + +```typescript +import { QueryClient } from '@tanstack/react-query'; +import { PersistQueryClientProvider } from '@tanstack/react-query-persist-client'; +import { createSyncStoragePersister } from '@tanstack/query-sync-storage-persister'; + +const queryClient = new QueryClient({ + defaultOptions: { + queries: { + gcTime: 1000 * 60 * 60 * 24 // 24 hours + } + } +}); + +const persister = createSyncStoragePersister({ + storage: window.localStorage +}); + + + + +``` + +## Cache Deduplication + +### Automatic Request Deduplication + +```typescript +// Both components will share the same request +function Component1() { + const { data } = useQuery({ + queryKey: ['posts'], + queryFn: fetchPosts + }); +} + +function Component2() { + const { data } = useQuery({ + queryKey: ['posts'], // Same key = same request + queryFn: fetchPosts + }); +} +``` + +## Cache Preloading + +### Prefetch Queries + +```typescript +const queryClient = useQueryClient(); + +// Prefetch before navigation +const handleMouseEnter = () => { + queryClient.prefetchQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId) + }); +}; + +// Prefetch in loader +router.beforeEach(async (to, from, next) => { + await queryClient.prefetchQuery({ + queryKey: ['user', to.params.userId], + queryFn: () => fetchUser(to.params.userId) + }); + next(); +}); +``` + +### Ensure Query Data + +```typescript +// Fetch if not in cache, otherwise use cached +await queryClient.ensureQueryData({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId) +}); +``` + +## Selective Cache Updates + +### Update Nested Data + +```typescript +queryClient.setQueryData(['posts'], (oldPosts) => { + return oldPosts.map((post) => (post.id === updatedPost.id ? updatedPost : post)); +}); +``` + +### Add to List Cache + +```typescript +// After creating a post +queryClient.setQueryData(['posts'], (oldPosts = []) => { + return [newPost, ...oldPosts]; +}); +``` + +### Remove from List Cache + +```typescript +// After deleting a post +queryClient.setQueryData(['posts'], (oldPosts) => { + return oldPosts.filter((post) => post.id !== deletedPostId); +}); +``` + +## Cache Debugging + +### React Query Devtools + +```typescript +import { ReactQueryDevtools } from '@tanstack/react-query-devtools'; + + + + + +``` + +### Query Cache Events + +```typescript +const queryCache = queryClient.getQueryCache(); + +queryCache.subscribe((event) => { + console.log('Query cache event:', event.type, event.query.queryKey); +}); +``` + +## Best Practices + +1. **Set Appropriate staleTime** - Balance freshness vs performance +2. **Use Invalidation Over Refetch** - Let queries refetch when needed +3. **Prefetch Predictably** - Preload data on hover/intent +4. **Update Cache on Mutations** - Keep UI in sync +5. **Use Devtools** - Debug cache issues visually +6. **Persist Important Data** - Save to localStorage for offline support +7. **Deduplicate Requests** - Rely on automatic deduplication diff --git a/.agents/skills/tanstack-query/resources/data-fetching.md b/.agents/skills/tanstack-query/resources/data-fetching.md new file mode 100644 index 0000000..7a02830 --- /dev/null +++ b/.agents/skills/tanstack-query/resources/data-fetching.md @@ -0,0 +1,240 @@ +# Advanced Data Fetching Patterns with TanStack Query + +## Dependent Queries + +Queries that depend on data from other queries: + +```typescript +// First query - Get user ID +const { data: user } = useQuery({ + queryKey: ['user'], + queryFn: fetchCurrentUser +}); + +// Second query - Depends on user ID +const { data: posts } = useQuery({ + queryKey: ['posts', user?.id], + queryFn: () => fetchUserPosts(user!.id), + enabled: !!user // Only run when user is available +}); +``` + +## Parallel Queries + +Fetch multiple independent queries simultaneously: + +```typescript +function Dashboard() { + const queries = useQueries({ + queries: [ + { queryKey: ['stats'], queryFn: fetchStats }, + { queryKey: ['recentPosts'], queryFn: fetchRecentPosts }, + { queryKey: ['notifications'], queryFn: fetchNotifications } + ] + }); + + const [statsQuery, postsQuery, notificationsQuery] = queries; + + if (queries.some(q => q.isLoading)) return ; + + return ; +} +``` + +## Infinite Queries + +For pagination and infinite scroll: + +```typescript +const { + data, + fetchNextPage, + hasNextPage, + isFetchingNextPage +} = useInfiniteQuery({ + queryKey: ['posts'], + queryFn: ({ pageParam = 1 }) => fetchPosts(pageParam), + getNextPageParam: (lastPage, pages) => lastPage.nextCursor, + initialPageParam: 1 +}); + +// Flatten pages +const allPosts = data?.pages.flatMap(page => page.posts) ?? []; + +return ( +
+ {allPosts.map(post => )} + {hasNextPage && ( + + )} +
+); +``` + +## Prefetching + +Preload data before it's needed: + +```typescript +import { useQueryClient } from '@tanstack/react-query'; + +function PostLink({ postId }: { postId: string }) { + const queryClient = useQueryClient(); + + const handleMouseEnter = () => { + queryClient.prefetchQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId) + }); + }; + + return ( + + View Post + + ); +} +``` + +## Suspense Mode + +Use with React Suspense: + +```typescript +import { useSuspenseQuery } from '@tanstack/react-query'; + +function PostDetails({ postId }: { postId: string }) { + // Throws promise on loading, error on error + const { data: post } = useSuspenseQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId) + }); + + return
{post.title}
; +} + +// Wrap with Suspense +}> + + +``` + +## Query Cancellation + +Cancel queries when component unmounts: + +```typescript +const { data, isLoading } = useQuery({ + queryKey: ['search', searchTerm], + queryFn: async ({ signal }) => { + const response = await fetch(`/api/search?q=${searchTerm}`, { signal }); + return response.json(); + } +}); +``` + +## Initial Data + +Provide initial data to avoid loading state: + +```typescript +const { data } = useQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId), + initialData: () => { + // Get from cache or other source + return queryClient.getQueryData(['posts'])?.find((post) => post.id === postId); + } +}); +``` + +## Placeholder Data + +Show placeholder while loading: + +```typescript +const { data, isPlaceholderData } = useQuery({ + queryKey: ['posts', page], + queryFn: () => fetchPosts(page), + placeholderData: (previousData) => previousData // Keep previous page while loading +}); + +// Or provide static placeholder +placeholderData: { posts: [], total: 0 } +``` + +## Optimistic Updates with Queries + +Update UI immediately, rollback on error: + +```typescript +const queryClient = useQueryClient(); + +const { mutate } = useMutation({ + mutationFn: updatePost, + onMutate: async (newPost) => { + // Cancel outgoing queries + await queryClient.cancelQueries({ queryKey: ['post', newPost.id] }); + + // Snapshot current value + const previousPost = queryClient.getQueryData(['post', newPost.id]); + + // Optimistically update + queryClient.setQueryData(['post', newPost.id], newPost); + + return { previousPost }; + }, + onError: (err, newPost, context) => { + // Rollback on error + queryClient.setQueryData(['post', newPost.id], context?.previousPost); + }, + onSettled: (newPost) => { + // Refetch after success or error + queryClient.invalidateQueries({ queryKey: ['post', newPost.id] }); + } +}); +``` + +## Query Retries + +Configure retry behavior: + +```typescript +const { data } = useQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId), + retry: 3, // Retry 3 times + retryDelay: (attemptIndex) => Math.min(1000 * 2 ** attemptIndex, 30000) // Exponential backoff +}); +``` + +## Error Handling + +Handle query errors: + +```typescript +const { data, error, isError } = useQuery({ + queryKey: ['post', postId], + queryFn: () => fetchPost(postId), + throwOnError: false // Don't throw, just set error +}); + +if (isError) { + return ; +} +``` + +## Best Practices + +1. **Use Suspense** - Better loading UX with React Suspense +2. **Prefetch on Intent** - Preload data on hover/focus +3. **Enable Queries Conditionally** - Use `enabled` option +4. **Cancel on Unmount** - Use abort signals +5. **Handle Errors Gracefully** - Show error states +6. **Optimize with Placeholders** - Show previous data while loading diff --git a/.agents/skills/tanstack-query/resources/mutation-patterns.md b/.agents/skills/tanstack-query/resources/mutation-patterns.md new file mode 100644 index 0000000..676ba96 --- /dev/null +++ b/.agents/skills/tanstack-query/resources/mutation-patterns.md @@ -0,0 +1,344 @@ +# Complex Mutation Patterns + +## Basic Mutations + +```typescript +const { mutate, isPending, isError, error } = useMutation({ + mutationFn: (newPost: CreatePostDto) => createPost(newPost), + onSuccess: (data) => { + console.log('Post created:', data); + }, + onError: (error) => { + console.error('Failed to create post:', error); + } +}); + +// Trigger mutation +mutate({ title: 'New Post', content: '...' }); +``` + +## Optimistic Updates + +Update UI immediately, rollback on error: + +```typescript +const { mutate } = useMutation({ + mutationFn: updatePost, + onMutate: async (newPost) => { + // Cancel outgoing refetches + await queryClient.cancelQueries({ queryKey: ['posts'] }); + + // Snapshot previous value + const previousPosts = queryClient.getQueryData(['posts']); + + // Optimistically update to the new value + queryClient.setQueryData(['posts'], (old) => + old.map((post) => (post.id === newPost.id ? newPost : post)) + ); + + // Return context with snapshot + return { previousPosts }; + }, + onError: (err, newPost, context) => { + // Rollback to previous value + queryClient.setQueryData(['posts'], context.previousPosts); + }, + onSettled: () => { + // Always refetch after error or success + queryClient.invalidateQueries({ queryKey: ['posts'] }); + } +}); +``` + +## Sequential Mutations + +Run mutations in sequence: + +```typescript +const createAndPublish = async (postData) => { + // Create post + const post = await createPostMutation.mutateAsync(postData); + + // Publish post + const published = await publishPostMutation.mutateAsync(post.id); + + return published; +}; +``` + +## Parallel Mutations + +Run multiple mutations simultaneously: + +```typescript +const { mutate } = useMutation({ + mutationFn: async (updates) => { + const results = await Promise.all([ + updateProfile(updates.profile), + updateSettings(updates.settings), + updatePreferences(updates.preferences) + ]); + return results; + } +}); +``` + +## Mutation with Invalidation + +```typescript +const { mutate } = useMutation({ + mutationFn: createPost, + onSuccess: () => { + // Invalidate and refetch + queryClient.invalidateQueries({ queryKey: ['posts'] }); + + // Or update cache directly + queryClient.setQueryData(['posts'], (old) => [newPost, ...old]); + } +}); +``` + +## Mutation with Multiple Cache Updates + +```typescript +const { mutate } = useMutation({ + mutationFn: deletePost, + onSuccess: (_, deletedPostId) => { + // Update posts list + queryClient.setQueryData(['posts'], (old) => old.filter((post) => post.id !== deletedPostId)); + + // Update post count + queryClient.setQueryData(['postsCount'], (old) => old - 1); + + // Invalidate related queries + queryClient.invalidateQueries({ queryKey: ['user', 'stats'] }); + } +}); +``` + +## Error Handling + +```typescript +const { mutate, isError, error, reset } = useMutation({ + mutationFn: createPost, + onError: (error) => { + if (error.code === 'VALIDATION_ERROR') { + setFormErrors(error.fields); + } else if (error.code === 'NETWORK_ERROR') { + showRetryDialog(); + } else { + showGenericError(); + } + } +}); + +// Clear error state +reset(); +``` + +## Retry Failed Mutations + +```typescript +const { mutate } = useMutation({ + mutationFn: createPost, + retry: 3, // Retry 3 times on failure + retryDelay: (attemptIndex) => Math.min(1000 * 2 ** attemptIndex, 30000) // Exponential backoff +}); +``` + +## Mutation with Loading State + +```typescript +function CreatePostForm() { + const { mutate, isPending } = useMutation({ + mutationFn: createPost, + onSuccess: () => { + navigate('/posts'); + } + }); + + const handleSubmit = (data) => { + mutate(data); + }; + + return ( +
+ {/* form fields */} + +
+ ); +} +``` + +## Mutation with Variables + +```typescript +const { mutate, variables } = useMutation({ + mutationFn: updatePost +}); + +// Access last mutation variables +console.log('Last updated post:', variables); +``` + +## Mutation Callbacks + +```typescript +const { mutate } = useMutation({ + mutationFn: createPost, + onMutate: (variables) => { + console.log('Starting mutation with:', variables); + }, + onSuccess: (data, variables, context) => { + console.log('Success!', data); + }, + onError: (error, variables, context) => { + console.error('Error!', error); + }, + onSettled: (data, error, variables, context) => { + console.log('Mutation finished (success or error)'); + } +}); +``` + +## Mutation with Form Integration + +```typescript +import { useForm } from 'react-hook-form'; + +function CreatePostForm() { + const { register, handleSubmit, reset } = useForm(); + + const { mutate, isPending, isError, error } = useMutation({ + mutationFn: createPost, + onSuccess: () => { + reset(); // Clear form + toast.success('Post created!'); + }, + onError: (error) => { + toast.error(error.message); + } + }); + + const onSubmit = (data) => { + mutate(data); + }; + + return ( +
+ +