mirror of
https://github.com/samiyev/puaros.git
synced 2025-12-28 07:16:53 +05:00
Compare commits
16 Commits
ipuaro-v0.
...
ipuaro-v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c6eb6ce9b | ||
|
|
7d18e87423 | ||
|
|
fd1e6ad86e | ||
|
|
259ecc181a | ||
|
|
0f2ed5b301 | ||
|
|
56643d903f | ||
|
|
f5f904a847 | ||
|
|
2ae1ac13f5 | ||
|
|
caf7aac116 | ||
|
|
4ad5a209c4 | ||
|
|
25146003cc | ||
|
|
68f927d906 | ||
|
|
b3e04a411c | ||
|
|
294d085ad4 | ||
|
|
958e4daed5 | ||
|
|
6234fbce92 |
29
CLAUDE.md
29
CLAUDE.md
@@ -447,6 +447,35 @@ Copy and use for each release:
|
||||
- [ ] Published to npm (if public release)
|
||||
```
|
||||
|
||||
## Working with Roadmap
|
||||
|
||||
When the user points to `ROADMAP.md` or asks about the roadmap/next steps:
|
||||
|
||||
1. **Read both files together:**
|
||||
- `packages/<package>/ROADMAP.md` - to understand the planned features and milestones
|
||||
- `packages/<package>/CHANGELOG.md` - to see what's already implemented
|
||||
|
||||
2. **Determine current position:**
|
||||
- Check the latest version in CHANGELOG.md
|
||||
- Cross-reference with ROADMAP.md milestones
|
||||
- Identify which roadmap items are already completed (present in CHANGELOG)
|
||||
|
||||
3. **Suggest next steps:**
|
||||
- Find the first uncompleted item in the current milestone
|
||||
- Or identify the next milestone if current one is complete
|
||||
- Present clear "start here" recommendation
|
||||
|
||||
**Example workflow:**
|
||||
```
|
||||
User: "Let's work on the roadmap" or points to ROADMAP.md
|
||||
|
||||
Claude should:
|
||||
1. Read ROADMAP.md → See milestones v0.1.0, v0.2.0, v0.3.0...
|
||||
2. Read CHANGELOG.md → See latest release is v0.1.1
|
||||
3. Compare → v0.1.0 milestone complete, v0.2.0 in progress
|
||||
4. Report → "v0.1.0 is complete. For v0.2.0, next item is: <feature>"
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Adding a new CLI option
|
||||
|
||||
@@ -5,6 +5,26 @@ All notable changes to @samiyev/guardian will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.9.4] - 2025-11-30
|
||||
|
||||
### Added
|
||||
|
||||
- **VERSION export** - Package version is now exported from index.ts, automatically read from package.json
|
||||
|
||||
### Changed
|
||||
|
||||
- 🔄 **Refactored SecretDetector** - Reduced cyclomatic complexity from 24 to <15:
|
||||
- Extracted helper methods: `extractByRuleId`, `extractAwsType`, `extractGithubType`, `extractSshType`, `extractSlackType`, `extractByMessage`
|
||||
- Used lookup arrays for SSH and message type mappings
|
||||
- 🔄 **Refactored AstNamingTraverser** - Reduced cyclomatic complexity from 17 to <15:
|
||||
- Replaced if-else chain with Map-based node handlers
|
||||
- Added `buildNodeHandlers()` method for cleaner architecture
|
||||
|
||||
### Quality
|
||||
|
||||
- ✅ **Zero lint warnings** - All ESLint warnings resolved
|
||||
- ✅ **All 616 tests pass**
|
||||
|
||||
## [0.9.2] - 2025-11-27
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@samiyev/guardian",
|
||||
"version": "0.9.3",
|
||||
"version": "0.9.4",
|
||||
"description": "Research-backed code quality guardian for AI-assisted development. Detects hardcodes, secrets, circular deps, framework leaks, entity exposure, and 9 architecture violations. Enforces Clean Architecture/DDD principles. Works with GitHub Copilot, Cursor, Windsurf, Claude, ChatGPT, Cline, and any AI coding tool.",
|
||||
"keywords": [
|
||||
"puaros",
|
||||
@@ -40,7 +40,7 @@
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/samiyev/puaros.git",
|
||||
"url": "git+https://github.com/samiyev/puaros.git",
|
||||
"directory": "packages/guardian"
|
||||
},
|
||||
"bugs": {
|
||||
|
||||
@@ -215,6 +215,7 @@ export class AnalyzeProject extends UseCase<
|
||||
private readonly detectionPipeline: ExecuteDetection
|
||||
private readonly resultAggregator: AggregateResults
|
||||
|
||||
// eslint-disable-next-line max-params
|
||||
constructor(
|
||||
fileScanner: IFileScanner,
|
||||
codeParser: ICodeParser,
|
||||
|
||||
@@ -56,6 +56,7 @@ export interface DetectionResult {
|
||||
* Pipeline step responsible for running all detectors
|
||||
*/
|
||||
export class ExecuteDetection {
|
||||
// eslint-disable-next-line max-params
|
||||
constructor(
|
||||
private readonly hardcodeDetector: IHardcodeDetector,
|
||||
private readonly namingConventionDetector: INamingConventionDetector,
|
||||
|
||||
@@ -171,6 +171,7 @@ export class HardcodedValue extends ValueObject<HardcodedValueProps> {
|
||||
return `${CONSTANT_NAMES.MAGIC_NUMBER}_${String(value)}`
|
||||
}
|
||||
|
||||
// eslint-disable-next-line complexity, max-lines-per-function
|
||||
private suggestStringConstantName(): string {
|
||||
const value = String(this.props.value)
|
||||
const context = this.props.context.toLowerCase()
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
import pkg from "../package.json"
|
||||
|
||||
export const VERSION = pkg.version
|
||||
|
||||
export * from "./domain"
|
||||
export * from "./application"
|
||||
export * from "./infrastructure"
|
||||
|
||||
@@ -90,80 +90,98 @@ export class SecretDetector implements ISecretDetector {
|
||||
}
|
||||
|
||||
private extractSecretType(message: string, ruleId: string): string {
|
||||
const lowerMessage = message.toLowerCase()
|
||||
|
||||
const ruleBasedType = this.extractByRuleId(ruleId, lowerMessage)
|
||||
if (ruleBasedType) {
|
||||
return ruleBasedType
|
||||
}
|
||||
|
||||
return this.extractByMessage(lowerMessage)
|
||||
}
|
||||
|
||||
private extractByRuleId(ruleId: string, lowerMessage: string): string | null {
|
||||
if (ruleId.includes(SECRET_KEYWORDS.AWS)) {
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.ACCESS_KEY)) {
|
||||
return this.extractAwsType(lowerMessage)
|
||||
}
|
||||
if (ruleId.includes(SECRET_KEYWORDS.GITHUB)) {
|
||||
return this.extractGithubType(lowerMessage)
|
||||
}
|
||||
if (ruleId.includes(SECRET_KEYWORDS.NPM)) {
|
||||
return SECRET_TYPE_NAMES.NPM_TOKEN
|
||||
}
|
||||
if (ruleId.includes(SECRET_KEYWORDS.GCP) || ruleId.includes(SECRET_KEYWORDS.GOOGLE)) {
|
||||
return SECRET_TYPE_NAMES.GCP_SERVICE_ACCOUNT_KEY
|
||||
}
|
||||
if (ruleId.includes(SECRET_KEYWORDS.PRIVATEKEY) || ruleId.includes(SECRET_KEYWORDS.SSH)) {
|
||||
return this.extractSshType(lowerMessage)
|
||||
}
|
||||
if (ruleId.includes(SECRET_KEYWORDS.SLACK)) {
|
||||
return this.extractSlackType(lowerMessage)
|
||||
}
|
||||
if (ruleId.includes(SECRET_KEYWORDS.BASICAUTH)) {
|
||||
return SECRET_TYPE_NAMES.BASIC_AUTH_CREDENTIALS
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
private extractAwsType(lowerMessage: string): string {
|
||||
if (lowerMessage.includes(SECRET_KEYWORDS.ACCESS_KEY)) {
|
||||
return SECRET_TYPE_NAMES.AWS_ACCESS_KEY
|
||||
}
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.SECRET)) {
|
||||
if (lowerMessage.includes(SECRET_KEYWORDS.SECRET)) {
|
||||
return SECRET_TYPE_NAMES.AWS_SECRET_KEY
|
||||
}
|
||||
return SECRET_TYPE_NAMES.AWS_CREDENTIAL
|
||||
}
|
||||
|
||||
if (ruleId.includes(SECRET_KEYWORDS.GITHUB)) {
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.PERSONAL_ACCESS_TOKEN)) {
|
||||
private extractGithubType(lowerMessage: string): string {
|
||||
if (lowerMessage.includes(SECRET_KEYWORDS.PERSONAL_ACCESS_TOKEN)) {
|
||||
return SECRET_TYPE_NAMES.GITHUB_PERSONAL_ACCESS_TOKEN
|
||||
}
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.OAUTH)) {
|
||||
if (lowerMessage.includes(SECRET_KEYWORDS.OAUTH)) {
|
||||
return SECRET_TYPE_NAMES.GITHUB_OAUTH_TOKEN
|
||||
}
|
||||
return SECRET_TYPE_NAMES.GITHUB_TOKEN
|
||||
}
|
||||
|
||||
if (ruleId.includes(SECRET_KEYWORDS.NPM)) {
|
||||
return SECRET_TYPE_NAMES.NPM_TOKEN
|
||||
private extractSshType(lowerMessage: string): string {
|
||||
const sshTypeMap: [string, string][] = [
|
||||
[SECRET_KEYWORDS.RSA, SECRET_TYPE_NAMES.SSH_RSA_PRIVATE_KEY],
|
||||
[SECRET_KEYWORDS.DSA, SECRET_TYPE_NAMES.SSH_DSA_PRIVATE_KEY],
|
||||
[SECRET_KEYWORDS.ECDSA, SECRET_TYPE_NAMES.SSH_ECDSA_PRIVATE_KEY],
|
||||
[SECRET_KEYWORDS.ED25519, SECRET_TYPE_NAMES.SSH_ED25519_PRIVATE_KEY],
|
||||
]
|
||||
for (const [keyword, typeName] of sshTypeMap) {
|
||||
if (lowerMessage.includes(keyword)) {
|
||||
return typeName
|
||||
}
|
||||
|
||||
if (ruleId.includes(SECRET_KEYWORDS.GCP) || ruleId.includes(SECRET_KEYWORDS.GOOGLE)) {
|
||||
return SECRET_TYPE_NAMES.GCP_SERVICE_ACCOUNT_KEY
|
||||
}
|
||||
|
||||
if (ruleId.includes(SECRET_KEYWORDS.PRIVATEKEY) || ruleId.includes(SECRET_KEYWORDS.SSH)) {
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.RSA)) {
|
||||
return SECRET_TYPE_NAMES.SSH_RSA_PRIVATE_KEY
|
||||
}
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.DSA)) {
|
||||
return SECRET_TYPE_NAMES.SSH_DSA_PRIVATE_KEY
|
||||
}
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.ECDSA)) {
|
||||
return SECRET_TYPE_NAMES.SSH_ECDSA_PRIVATE_KEY
|
||||
}
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.ED25519)) {
|
||||
return SECRET_TYPE_NAMES.SSH_ED25519_PRIVATE_KEY
|
||||
}
|
||||
return SECRET_TYPE_NAMES.SSH_PRIVATE_KEY
|
||||
}
|
||||
|
||||
if (ruleId.includes(SECRET_KEYWORDS.SLACK)) {
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.BOT)) {
|
||||
private extractSlackType(lowerMessage: string): string {
|
||||
if (lowerMessage.includes(SECRET_KEYWORDS.BOT)) {
|
||||
return SECRET_TYPE_NAMES.SLACK_BOT_TOKEN
|
||||
}
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.USER)) {
|
||||
if (lowerMessage.includes(SECRET_KEYWORDS.USER)) {
|
||||
return SECRET_TYPE_NAMES.SLACK_USER_TOKEN
|
||||
}
|
||||
return SECRET_TYPE_NAMES.SLACK_TOKEN
|
||||
}
|
||||
|
||||
if (ruleId.includes(SECRET_KEYWORDS.BASICAUTH)) {
|
||||
return SECRET_TYPE_NAMES.BASIC_AUTH_CREDENTIALS
|
||||
private extractByMessage(lowerMessage: string): string {
|
||||
const messageTypeMap: [string, string][] = [
|
||||
[SECRET_KEYWORDS.API_KEY, SECRET_TYPE_NAMES.API_KEY],
|
||||
[SECRET_KEYWORDS.TOKEN, SECRET_TYPE_NAMES.AUTHENTICATION_TOKEN],
|
||||
[SECRET_KEYWORDS.PASSWORD, SECRET_TYPE_NAMES.PASSWORD],
|
||||
[SECRET_KEYWORDS.SECRET, SECRET_TYPE_NAMES.SECRET],
|
||||
]
|
||||
for (const [keyword, typeName] of messageTypeMap) {
|
||||
if (lowerMessage.includes(keyword)) {
|
||||
return typeName
|
||||
}
|
||||
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.API_KEY)) {
|
||||
return SECRET_TYPE_NAMES.API_KEY
|
||||
}
|
||||
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.TOKEN)) {
|
||||
return SECRET_TYPE_NAMES.AUTHENTICATION_TOKEN
|
||||
}
|
||||
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.PASSWORD)) {
|
||||
return SECRET_TYPE_NAMES.PASSWORD
|
||||
}
|
||||
|
||||
if (message.toLowerCase().includes(SECRET_KEYWORDS.SECRET)) {
|
||||
return SECRET_TYPE_NAMES.SECRET
|
||||
}
|
||||
|
||||
return SECRET_TYPE_NAMES.SENSITIVE_DATA
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,13 @@ import { AstFunctionNameAnalyzer } from "./AstFunctionNameAnalyzer"
|
||||
import { AstInterfaceNameAnalyzer } from "./AstInterfaceNameAnalyzer"
|
||||
import { AstVariableNameAnalyzer } from "./AstVariableNameAnalyzer"
|
||||
|
||||
type NodeAnalyzer = (
|
||||
node: Parser.SyntaxNode,
|
||||
layer: string,
|
||||
filePath: string,
|
||||
lines: string[],
|
||||
) => NamingViolation | null
|
||||
|
||||
/**
|
||||
* AST tree traverser for detecting naming convention violations
|
||||
*
|
||||
@@ -13,12 +20,16 @@ import { AstVariableNameAnalyzer } from "./AstVariableNameAnalyzer"
|
||||
* to detect naming violations in classes, interfaces, functions, and variables.
|
||||
*/
|
||||
export class AstNamingTraverser {
|
||||
private readonly nodeHandlers: Map<string, NodeAnalyzer>
|
||||
|
||||
constructor(
|
||||
private readonly classAnalyzer: AstClassNameAnalyzer,
|
||||
private readonly interfaceAnalyzer: AstInterfaceNameAnalyzer,
|
||||
private readonly functionAnalyzer: AstFunctionNameAnalyzer,
|
||||
private readonly variableAnalyzer: AstVariableNameAnalyzer,
|
||||
) {}
|
||||
) {
|
||||
this.nodeHandlers = this.buildNodeHandlers()
|
||||
}
|
||||
|
||||
/**
|
||||
* Traverses the AST tree and collects naming violations
|
||||
@@ -38,6 +49,33 @@ export class AstNamingTraverser {
|
||||
return results
|
||||
}
|
||||
|
||||
private buildNodeHandlers(): Map<string, NodeAnalyzer> {
|
||||
const handlers = new Map<string, NodeAnalyzer>()
|
||||
|
||||
handlers.set(AST_CLASS_TYPES.CLASS_DECLARATION, (node, layer, filePath, lines) =>
|
||||
this.classAnalyzer.analyze(node, layer, filePath, lines),
|
||||
)
|
||||
handlers.set(AST_CLASS_TYPES.INTERFACE_DECLARATION, (node, layer, filePath, lines) =>
|
||||
this.interfaceAnalyzer.analyze(node, layer, filePath, lines),
|
||||
)
|
||||
|
||||
const functionHandler: NodeAnalyzer = (node, layer, filePath, lines) =>
|
||||
this.functionAnalyzer.analyze(node, layer, filePath, lines)
|
||||
handlers.set(AST_FUNCTION_TYPES.FUNCTION_DECLARATION, functionHandler)
|
||||
handlers.set(AST_FUNCTION_TYPES.METHOD_DEFINITION, functionHandler)
|
||||
handlers.set(AST_FUNCTION_TYPES.FUNCTION_SIGNATURE, functionHandler)
|
||||
|
||||
const variableHandler: NodeAnalyzer = (node, layer, filePath, lines) =>
|
||||
this.variableAnalyzer.analyze(node, layer, filePath, lines)
|
||||
handlers.set(AST_VARIABLE_TYPES.VARIABLE_DECLARATOR, variableHandler)
|
||||
handlers.set(AST_VARIABLE_TYPES.REQUIRED_PARAMETER, variableHandler)
|
||||
handlers.set(AST_VARIABLE_TYPES.OPTIONAL_PARAMETER, variableHandler)
|
||||
handlers.set(AST_VARIABLE_TYPES.PUBLIC_FIELD_DEFINITION, variableHandler)
|
||||
handlers.set(AST_VARIABLE_TYPES.PROPERTY_SIGNATURE, variableHandler)
|
||||
|
||||
return handlers
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively visits AST nodes
|
||||
*/
|
||||
@@ -49,34 +87,10 @@ export class AstNamingTraverser {
|
||||
results: NamingViolation[],
|
||||
): void {
|
||||
const node = cursor.currentNode
|
||||
const handler = this.nodeHandlers.get(node.type)
|
||||
|
||||
if (node.type === AST_CLASS_TYPES.CLASS_DECLARATION) {
|
||||
const violation = this.classAnalyzer.analyze(node, layer, filePath, lines)
|
||||
if (violation) {
|
||||
results.push(violation)
|
||||
}
|
||||
} else if (node.type === AST_CLASS_TYPES.INTERFACE_DECLARATION) {
|
||||
const violation = this.interfaceAnalyzer.analyze(node, layer, filePath, lines)
|
||||
if (violation) {
|
||||
results.push(violation)
|
||||
}
|
||||
} else if (
|
||||
node.type === AST_FUNCTION_TYPES.FUNCTION_DECLARATION ||
|
||||
node.type === AST_FUNCTION_TYPES.METHOD_DEFINITION ||
|
||||
node.type === AST_FUNCTION_TYPES.FUNCTION_SIGNATURE
|
||||
) {
|
||||
const violation = this.functionAnalyzer.analyze(node, layer, filePath, lines)
|
||||
if (violation) {
|
||||
results.push(violation)
|
||||
}
|
||||
} else if (
|
||||
node.type === AST_VARIABLE_TYPES.VARIABLE_DECLARATOR ||
|
||||
node.type === AST_VARIABLE_TYPES.REQUIRED_PARAMETER ||
|
||||
node.type === AST_VARIABLE_TYPES.OPTIONAL_PARAMETER ||
|
||||
node.type === AST_VARIABLE_TYPES.PUBLIC_FIELD_DEFINITION ||
|
||||
node.type === AST_VARIABLE_TYPES.PROPERTY_SIGNATURE
|
||||
) {
|
||||
const violation = this.variableAnalyzer.analyze(node, layer, filePath, lines)
|
||||
if (handler) {
|
||||
const violation = handler(node, layer, filePath, lines)
|
||||
if (violation) {
|
||||
results.push(violation)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,494 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.13.0] - 2025-12-01 - Security
|
||||
|
||||
### Added
|
||||
|
||||
- **PathValidator Utility (0.13.3)**
|
||||
- Centralized path validation for all file operations
|
||||
- Prevents path traversal attacks (`..`, `~`)
|
||||
- Validates paths are within project root
|
||||
- Sync (`validateSync`) and async (`validate`) validation methods
|
||||
- Quick check method (`isWithin`) for simple validations
|
||||
- Resolution methods (`resolve`, `relativize`, `resolveOrThrow`)
|
||||
- Detailed validation results with status and reason
|
||||
- Options for file existence, directory/file type checks
|
||||
|
||||
- **Security Module**
|
||||
- New `infrastructure/security` module
|
||||
- Exports: `PathValidator`, `createPathValidator`, `validatePath`
|
||||
- Type exports: `PathValidationResult`, `PathValidationStatus`, `PathValidatorOptions`
|
||||
|
||||
### Changed
|
||||
|
||||
- **Refactored All File Tools to Use PathValidator**
|
||||
- GetLinesTool: Uses PathValidator for path validation
|
||||
- GetFunctionTool: Uses PathValidator for path validation
|
||||
- GetClassTool: Uses PathValidator for path validation
|
||||
- GetStructureTool: Uses PathValidator for path validation
|
||||
- EditLinesTool: Uses PathValidator for path validation
|
||||
- CreateFileTool: Uses PathValidator for path validation
|
||||
- DeleteFileTool: Uses PathValidator for path validation
|
||||
|
||||
- **Improved Error Messages**
|
||||
- More specific error messages from PathValidator
|
||||
- "Path contains traversal patterns" for `..` attempts
|
||||
- "Path is outside project root" for absolute paths outside project
|
||||
- "Path is empty" for empty/whitespace paths
|
||||
|
||||
### Technical Details
|
||||
|
||||
- Total tests: 1305 (51 new PathValidator tests)
|
||||
- Test coverage: ~98% maintained
|
||||
- No breaking changes to existing tool APIs
|
||||
- Security validation is now consistent across all 7 file tools
|
||||
|
||||
---
|
||||
|
||||
## [0.12.0] - 2025-12-01 - TUI Advanced
|
||||
|
||||
### Added
|
||||
|
||||
- **DiffView Component (0.12.1)**
|
||||
- Inline diff display with green (added) and red (removed) highlighting
|
||||
- Header with file path and line range: `┌─── path (lines X-Y) ───┐`
|
||||
- Line numbers with proper padding
|
||||
- Stats footer showing additions and deletions count
|
||||
|
||||
- **ConfirmDialog Component (0.12.2)**
|
||||
- Confirmation dialog with [Y] Apply / [N] Cancel / [E] Edit options
|
||||
- Optional diff preview integration
|
||||
- Keyboard input handling (Y/N/E keys, Escape)
|
||||
- Visual selection feedback
|
||||
|
||||
- **ErrorDialog Component (0.12.3)**
|
||||
- Error dialog with [R] Retry / [S] Skip / [A] Abort options
|
||||
- Recoverable vs non-recoverable error handling
|
||||
- Disabled buttons for non-recoverable errors
|
||||
- Keyboard input with Escape support
|
||||
|
||||
- **Progress Component (0.12.4)**
|
||||
- Progress bar display: `[=====> ] 45% (120/267 files)`
|
||||
- Color-coded progress (cyan < 50%, yellow < 100%, green = 100%)
|
||||
- Configurable width
|
||||
- Label support for context
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 1254 (unchanged - TUI components excluded from coverage)
|
||||
- TUI layer now has 8 components + 2 hooks
|
||||
- All v0.12.0 roadmap items complete
|
||||
|
||||
---
|
||||
|
||||
## [0.11.0] - 2025-12-01 - TUI Basic
|
||||
|
||||
### Added
|
||||
|
||||
- **TUI Types (0.11.0)**
|
||||
- `TuiStatus`: Status type for TUI display (ready, thinking, tool_call, awaiting_confirmation, error)
|
||||
- `BranchInfo`: Git branch information (name, isDetached)
|
||||
- `AppProps`: Main app component props
|
||||
- `StatusBarData`: Status bar display data
|
||||
|
||||
- **App Shell (0.11.1)**
|
||||
- Main TUI App component with React/Ink
|
||||
- Session initialization and state management
|
||||
- Loading and error screens
|
||||
- Hotkey integration (Ctrl+C, Ctrl+D, Ctrl+Z)
|
||||
- Session time tracking
|
||||
|
||||
- **StatusBar Component (0.11.2)**
|
||||
- Displays: `[ipuaro] [ctx: 12%] [project] [branch] [time] status`
|
||||
- Context usage with color warning at >80%
|
||||
- Git branch with detached HEAD support
|
||||
- Status indicator with colors (ready=green, thinking=yellow, error=red)
|
||||
|
||||
- **Chat Component (0.11.3)**
|
||||
- Message history display with role-based styling
|
||||
- User messages (green), Assistant messages (cyan), System messages (gray)
|
||||
- Tool call display with parameters
|
||||
- Response stats: time, tokens, tool calls
|
||||
- Thinking indicator during LLM processing
|
||||
|
||||
- **Input Component (0.11.4)**
|
||||
- Prompt with `> ` prefix
|
||||
- History navigation with ↑/↓ arrow keys
|
||||
- Saved input restoration when navigating past history
|
||||
- Disabled state during processing
|
||||
- Custom placeholder support
|
||||
|
||||
- **useSession Hook (0.11.5)**
|
||||
- Session state management with React hooks
|
||||
- Message handling integration
|
||||
- Status tracking (ready, thinking, tool_call, error)
|
||||
- Undo support
|
||||
- Clear history functionality
|
||||
- Abort/interrupt support
|
||||
|
||||
- **useHotkeys Hook (0.11.6)**
|
||||
- Ctrl+C: Interrupt (1st), Exit (2nd within 1s)
|
||||
- Ctrl+D: Exit with session save
|
||||
- Ctrl+Z: Undo last change
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 1254 (was 1174)
|
||||
- Coverage: 97.75% lines, 92.22% branches
|
||||
- TUI layer now has 4 components + 2 hooks
|
||||
- TUI excluded from coverage thresholds (requires React testing setup)
|
||||
|
||||
---
|
||||
|
||||
## [0.10.0] - 2025-12-01 - Session Management
|
||||
|
||||
### Added
|
||||
|
||||
- **ISessionStorage (0.10.1)**
|
||||
- Session storage service interface
|
||||
- Methods: saveSession, loadSession, deleteSession, listSessions
|
||||
- Undo stack management: pushUndoEntry, popUndoEntry, getUndoStack
|
||||
- Session lifecycle: getLatestSession, sessionExists, touchSession
|
||||
|
||||
- **RedisSessionStorage (0.10.2)**
|
||||
- Redis implementation of ISessionStorage
|
||||
- Session data in Redis hashes (project, history, context, stats)
|
||||
- Undo stack in Redis lists (max 10 entries)
|
||||
- Sessions list for project-wide queries
|
||||
- 22 unit tests
|
||||
|
||||
- **ContextManager (0.10.3)**
|
||||
- Manages context window token budget
|
||||
- File context tracking with addToContext/removeFromContext
|
||||
- Usage monitoring: getUsage, getAvailableTokens, getRemainingTokens
|
||||
- Auto-compression at 80% threshold via LLM summarization
|
||||
- Context state export for session persistence
|
||||
- 23 unit tests
|
||||
|
||||
- **StartSession (0.10.4)**
|
||||
- Use case for session initialization
|
||||
- Creates new session or loads latest for project
|
||||
- Optional sessionId for specific session loading
|
||||
- forceNew option to always create fresh session
|
||||
- 10 unit tests
|
||||
|
||||
- **HandleMessage (0.10.5)**
|
||||
- Main orchestrator use case for message handling
|
||||
- LLM interaction with tool calling support
|
||||
- Edit confirmation flow with diff preview
|
||||
- Error handling with retry/skip/abort choices
|
||||
- Status tracking: ready, thinking, tool_call, awaiting_confirmation, error
|
||||
- Event callbacks: onMessage, onToolCall, onToolResult, onConfirmation, onError
|
||||
- 21 unit tests
|
||||
|
||||
- **UndoChange (0.10.6)**
|
||||
- Use case for reverting file changes
|
||||
- Validates file hasn't changed since edit
|
||||
- Restores original content from undo entry
|
||||
- Updates storage after successful undo
|
||||
- 12 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 1174 (was 1086)
|
||||
- Coverage: 97.73% lines, 92.21% branches
|
||||
- Application layer now has 4 use cases implemented
|
||||
- All planned session management features complete
|
||||
|
||||
---
|
||||
|
||||
## [0.9.0] - 2025-12-01 - Git & Run Tools
|
||||
|
||||
### Added
|
||||
|
||||
- **GitStatusTool (0.9.1)**
|
||||
- `git_status()`: Get current git repository status
|
||||
- Returns branch name, tracking branch, ahead/behind counts
|
||||
- Lists staged, modified, untracked, and conflicted files
|
||||
- Detects detached HEAD state
|
||||
- 29 unit tests
|
||||
|
||||
- **GitDiffTool (0.9.2)**
|
||||
- `git_diff(path?, staged?)`: Get uncommitted changes
|
||||
- Returns file-by-file diff summary with insertions/deletions
|
||||
- Full diff text output
|
||||
- Optional path filter for specific files/directories
|
||||
- Staged-only mode (`--cached`)
|
||||
- Handles binary files
|
||||
- 25 unit tests
|
||||
|
||||
- **GitCommitTool (0.9.3)**
|
||||
- `git_commit(message, files?)`: Create a git commit
|
||||
- Requires user confirmation before commit
|
||||
- Optional file staging before commit
|
||||
- Returns commit hash, summary, author info
|
||||
- Validates staged files exist
|
||||
- 26 unit tests
|
||||
|
||||
- **CommandSecurity**
|
||||
- Security module for shell command validation
|
||||
- Blacklist: dangerous commands always blocked (rm -rf, sudo, git push --force, etc.)
|
||||
- Whitelist: safe commands allowed without confirmation (npm, node, git status, etc.)
|
||||
- Classification: `allowed`, `blocked`, `requires_confirmation`
|
||||
- Git subcommand awareness (safe read operations vs write operations)
|
||||
- Extensible via `addToBlacklist()` and `addToWhitelist()`
|
||||
- 65 unit tests
|
||||
|
||||
- **RunCommandTool (0.9.4)**
|
||||
- `run_command(command, timeout?)`: Execute shell commands
|
||||
- Security-first design with blacklist/whitelist checks
|
||||
- Blocked commands rejected immediately
|
||||
- Unknown commands require user confirmation
|
||||
- Configurable timeout (default 30s, max 10min)
|
||||
- Output truncation for large outputs
|
||||
- Returns stdout, stderr, exit code, duration
|
||||
- 40 unit tests
|
||||
|
||||
- **RunTestsTool (0.9.5)**
|
||||
- `run_tests(path?, filter?, watch?)`: Run project tests
|
||||
- Auto-detects test runner: vitest, jest, mocha, npm test
|
||||
- Detects by config files and package.json dependencies
|
||||
- Path filtering for specific test files/directories
|
||||
- Name pattern filtering (`-t` / `--grep`)
|
||||
- Watch mode support
|
||||
- Returns pass/fail status, exit code, output
|
||||
- 48 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 1086 (was 853)
|
||||
- Coverage: 98.08% lines, 92.21% branches
|
||||
- Git tools category now fully implemented (3/3 tools)
|
||||
- Run tools category now fully implemented (2/2 tools)
|
||||
- All 18 planned tools now implemented
|
||||
|
||||
---
|
||||
|
||||
## [0.8.0] - 2025-12-01 - Analysis Tools
|
||||
|
||||
### Added
|
||||
|
||||
- **GetDependenciesTool (0.8.1)**
|
||||
- `get_dependencies(path)`: Get files that a specific file imports
|
||||
- Returns internal dependencies resolved to file paths
|
||||
- Includes metadata: exists, isHub, isEntryPoint, fileType
|
||||
- Sorted by path for consistent output
|
||||
- 23 unit tests
|
||||
|
||||
- **GetDependentsTool (0.8.2)**
|
||||
- `get_dependents(path)`: Get files that import a specific file
|
||||
- Shows hub status for the analyzed file
|
||||
- Includes metadata: isHub, isEntryPoint, fileType, complexityScore
|
||||
- Sorted by path for consistent output
|
||||
- 24 unit tests
|
||||
|
||||
- **GetComplexityTool (0.8.3)**
|
||||
- `get_complexity(path?, limit?)`: Get complexity metrics for files
|
||||
- Returns LOC, nesting depth, cyclomatic complexity, and overall score
|
||||
- Summary statistics: high/medium/low complexity counts
|
||||
- Average score calculation
|
||||
- Sorted by complexity score descending
|
||||
- Default limit of 20 files
|
||||
- 31 unit tests
|
||||
|
||||
- **GetTodosTool (0.8.4)**
|
||||
- `get_todos(path?, type?)`: Find TODO/FIXME/HACK/XXX/BUG/NOTE comments
|
||||
- Supports multiple comment styles: `//`, `/* */`, `#`
|
||||
- Filter by type (case-insensitive)
|
||||
- Counts by type
|
||||
- Includes line context
|
||||
- 42 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 853 (was 733)
|
||||
- Coverage: 97.91% lines, 92.32% branches
|
||||
- Analysis tools category now fully implemented (4/4 tools)
|
||||
|
||||
---
|
||||
|
||||
## [0.7.0] - 2025-12-01 - Search Tools
|
||||
|
||||
### Added
|
||||
|
||||
- **FindReferencesTool (0.7.1)**
|
||||
- `find_references(symbol, path?)`: Find all usages of a symbol across the codebase
|
||||
- Word boundary matching with support for special characters (e.g., `$value`)
|
||||
- Context lines around each reference (1 line before/after)
|
||||
- Marks definition vs usage references
|
||||
- Optional path filter for scoped searches
|
||||
- Returns: path, line, column, context, isDefinition
|
||||
- 37 unit tests
|
||||
|
||||
- **FindDefinitionTool (0.7.2)**
|
||||
- `find_definition(symbol)`: Find where a symbol is defined
|
||||
- Uses SymbolIndex for fast lookups
|
||||
- Returns multiple definitions (for overloads/re-exports)
|
||||
- Suggests similar symbols when not found (Levenshtein distance)
|
||||
- Context lines around definition (2 lines before/after)
|
||||
- Returns: path, line, type, context
|
||||
- 32 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 733 (was 664)
|
||||
- Coverage: 97.71% lines, 91.84% branches
|
||||
- Search tools category now fully implemented (2/2 tools)
|
||||
|
||||
---
|
||||
|
||||
## [0.6.0] - 2025-12-01 - Edit Tools
|
||||
|
||||
### Added
|
||||
|
||||
- **EditLinesTool (0.6.1)**
|
||||
- `edit_lines(path, start, end, content)`: Replace lines in a file
|
||||
- Hash conflict detection (prevents editing externally modified files)
|
||||
- Confirmation required with diff preview
|
||||
- Automatic storage update after edit
|
||||
- 35 unit tests
|
||||
|
||||
- **CreateFileTool (0.6.2)**
|
||||
- `create_file(path, content)`: Create new file with content
|
||||
- Automatic directory creation if needed
|
||||
- Path validation (must be within project root)
|
||||
- Prevents overwriting existing files
|
||||
- Confirmation required before creation
|
||||
- 26 unit tests
|
||||
|
||||
- **DeleteFileTool (0.6.3)**
|
||||
- `delete_file(path)`: Delete file from filesystem and storage
|
||||
- Removes file data, AST, and meta from Redis
|
||||
- Confirmation required with file content preview
|
||||
- 20 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 664 (was 540)
|
||||
- Coverage: 97.71% lines, 91.89% branches
|
||||
- Coverage thresholds: 95% lines/functions/statements, 90% branches
|
||||
|
||||
---
|
||||
|
||||
## [0.5.0] - 2025-12-01 - Read Tools
|
||||
|
||||
### Added
|
||||
|
||||
- **ToolRegistry (0.5.1)**
|
||||
- `IToolRegistry` implementation for managing tool lifecycle
|
||||
- Methods: `register()`, `unregister()`, `get()`, `getAll()`, `getByCategory()`, `has()`
|
||||
- `execute()`: Tool execution with validation and confirmation flow
|
||||
- `getToolDefinitions()`: Convert tools to LLM-compatible JSON Schema format
|
||||
- Helper methods: `getConfirmationTools()`, `getSafeTools()`, `getNames()`, `clear()`
|
||||
- 34 unit tests
|
||||
|
||||
- **GetLinesTool (0.5.2)**
|
||||
- `get_lines(path, start?, end?)`: Read file lines with line numbers
|
||||
- Reads from Redis storage or filesystem fallback
|
||||
- Line number formatting with proper padding
|
||||
- Path validation (must be within project root)
|
||||
- 25 unit tests
|
||||
|
||||
- **GetFunctionTool (0.5.3)**
|
||||
- `get_function(path, name)`: Get function source by name
|
||||
- Uses AST to find exact line range
|
||||
- Returns metadata: isAsync, isExported, params, returnType
|
||||
- Lists available functions if target not found
|
||||
- 20 unit tests
|
||||
|
||||
- **GetClassTool (0.5.4)**
|
||||
- `get_class(path, name)`: Get class source by name
|
||||
- Uses AST to find exact line range
|
||||
- Returns metadata: isAbstract, extends, implements, methods, properties
|
||||
- Lists available classes if target not found
|
||||
- 19 unit tests
|
||||
|
||||
- **GetStructureTool (0.5.5)**
|
||||
- `get_structure(path?, depth?)`: Get directory tree
|
||||
- ASCII tree output with 📁/📄 icons
|
||||
- Filters: node_modules, .git, dist, coverage, etc.
|
||||
- Directories sorted before files
|
||||
- Stats: directory and file counts
|
||||
- 23 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 540 (was 419)
|
||||
- Coverage: 96%+
|
||||
|
||||
---
|
||||
|
||||
## [0.4.0] - 2025-11-30 - LLM Integration
|
||||
|
||||
### Added
|
||||
|
||||
- **OllamaClient (0.4.1)**
|
||||
- Full `ILLMClient` implementation for Ollama SDK
|
||||
- Chat completion with tool/function calling support
|
||||
- Token counting via estimation (Ollama has no tokenizer API)
|
||||
- Model management: `pullModel()`, `hasModel()`, `listModels()`
|
||||
- Connection status check: `isAvailable()`
|
||||
- Request abortion support: `abort()`
|
||||
- Error handling with `IpuaroError` for connection and model errors
|
||||
- 21 unit tests
|
||||
|
||||
- **System Prompt & Context Builder (0.4.2)**
|
||||
- `SYSTEM_PROMPT`: Comprehensive agent instructions with tool descriptions
|
||||
- `buildInitialContext()`: Generates compact project overview from structure and ASTs
|
||||
- `buildFileContext()`: Detailed file context with imports, exports, functions, classes
|
||||
- `truncateContext()`: Token-aware context truncation
|
||||
- Hub/entry point/complexity flags in file summaries
|
||||
- 17 unit tests
|
||||
|
||||
- **Tool Definitions (0.4.3)**
|
||||
- 18 tool definitions across 6 categories:
|
||||
- Read: `get_lines`, `get_function`, `get_class`, `get_structure`
|
||||
- Edit: `edit_lines`, `create_file`, `delete_file`
|
||||
- Search: `find_references`, `find_definition`
|
||||
- Analysis: `get_dependencies`, `get_dependents`, `get_complexity`, `get_todos`
|
||||
- Git: `git_status`, `git_diff`, `git_commit`
|
||||
- Run: `run_command`, `run_tests`
|
||||
- Category groupings: `READ_TOOLS`, `EDIT_TOOLS`, etc.
|
||||
- `CONFIRMATION_TOOLS` set for tools requiring user approval
|
||||
- Helper functions: `requiresConfirmation()`, `getToolDef()`, `getToolsByCategory()`
|
||||
- 39 unit tests
|
||||
|
||||
- **Response Parser (0.4.4)**
|
||||
- XML tool call parsing: `<tool_call name="...">...</tool_call>`
|
||||
- Parameter extraction from XML elements
|
||||
- Type coercion: boolean, number, null, JSON arrays/objects
|
||||
- `extractThinking()`: Extracts `<thinking>...</thinking>` blocks
|
||||
- `hasToolCalls()`: Quick check for tool call presence
|
||||
- `validateToolCallParams()`: Parameter validation against required list
|
||||
- `formatToolCallsAsXml()`: Tool calls to XML for prompt injection
|
||||
- 21 unit tests
|
||||
|
||||
### Changed
|
||||
|
||||
- Total tests: 419 (was 321)
|
||||
- Coverage: 96.38%
|
||||
|
||||
---
|
||||
|
||||
## [0.3.1] - 2025-11-30
|
||||
|
||||
### Added
|
||||
|
||||
- **VERSION export** - Package version is now exported from index.ts, automatically read from package.json via `createRequire`
|
||||
|
||||
### Changed
|
||||
|
||||
- 🔄 **Refactored ASTParser** - Reduced complexity and nesting depth:
|
||||
- Extracted `extractClassHeritage()`, `parseHeritageClause()`, `findTypeIdentifier()`, `collectImplements()` helper methods
|
||||
- Max nesting depth reduced from 5 to 4
|
||||
- 🔄 **Refactored RedisStorage** - Removed unnecessary type parameter from `parseJSON()` method
|
||||
|
||||
### Quality
|
||||
|
||||
- ✅ **Zero lint warnings** - All ESLint warnings resolved
|
||||
- ✅ **All 321 tests pass**
|
||||
|
||||
## [0.3.0] - 2025-11-30 - Indexer
|
||||
|
||||
### Added
|
||||
|
||||
@@ -1,40 +1,95 @@
|
||||
# ipuaro TODO
|
||||
|
||||
## Completed
|
||||
|
||||
### Version 0.1.0 - Foundation
|
||||
- [x] Project setup (package.json, tsconfig, vitest)
|
||||
- [x] Domain entities (Session, Project)
|
||||
- [x] Domain value objects (FileData, FileAST, FileMeta, ChatMessage, etc.)
|
||||
- [x] Domain service interfaces (IStorage, ILLMClient, ITool, IIndexer)
|
||||
- [x] Shared config loader with Zod validation
|
||||
- [x] IpuaroError class
|
||||
|
||||
### Version 0.2.0 - Redis Storage
|
||||
- [x] RedisClient with AOF config
|
||||
- [x] Redis schema implementation
|
||||
- [x] RedisStorage class
|
||||
|
||||
### Version 0.3.0 - Indexer
|
||||
- [x] FileScanner with gitignore support
|
||||
- [x] ASTParser with tree-sitter
|
||||
- [x] MetaAnalyzer for complexity
|
||||
- [x] IndexBuilder for symbols
|
||||
- [x] Watchdog for file changes
|
||||
|
||||
### Version 0.4.0 - LLM Integration
|
||||
- [x] OllamaClient implementation
|
||||
- [x] System prompt design
|
||||
- [x] Tool definitions (18 tools)
|
||||
- [x] Response parser (XML format)
|
||||
|
||||
### Version 0.5.0 - Read Tools
|
||||
- [x] ToolRegistry implementation
|
||||
- [x] get_lines tool
|
||||
- [x] get_function tool
|
||||
- [x] get_class tool
|
||||
- [x] get_structure tool
|
||||
|
||||
### Version 0.6.0 - Edit Tools
|
||||
- [x] edit_lines tool
|
||||
- [x] create_file tool
|
||||
- [x] delete_file tool
|
||||
|
||||
### Version 0.7.0 - Search Tools
|
||||
- [x] find_references tool
|
||||
- [x] find_definition tool
|
||||
|
||||
### Version 0.8.0 - Analysis Tools
|
||||
- [x] get_dependencies tool
|
||||
- [x] get_dependents tool
|
||||
- [x] get_complexity tool
|
||||
- [x] get_todos tool
|
||||
|
||||
### Version 0.9.0 - Git & Run Tools
|
||||
- [x] git_status tool
|
||||
- [x] git_diff tool
|
||||
- [x] git_commit tool
|
||||
- [x] CommandSecurity (blacklist/whitelist)
|
||||
- [x] run_command tool
|
||||
- [x] run_tests tool
|
||||
|
||||
### Version 0.10.0 - Session Management
|
||||
- [x] ISessionStorage interface
|
||||
- [x] RedisSessionStorage implementation
|
||||
- [x] ContextManager use case
|
||||
- [x] StartSession use case
|
||||
- [x] HandleMessage use case
|
||||
- [x] UndoChange use case
|
||||
|
||||
## In Progress
|
||||
|
||||
### Version 0.2.0 - Redis Storage
|
||||
- [ ] RedisClient with AOF config
|
||||
- [ ] Redis schema implementation
|
||||
- [ ] RedisStorage class
|
||||
### Version 0.11.0 - TUI Basic
|
||||
- [ ] App shell (Ink/React)
|
||||
- [ ] StatusBar component
|
||||
- [ ] Chat component
|
||||
- [ ] Input component
|
||||
|
||||
## Planned
|
||||
|
||||
### Version 0.3.0 - Indexer
|
||||
- [ ] FileScanner with gitignore support
|
||||
- [ ] ASTParser with tree-sitter
|
||||
- [ ] MetaAnalyzer for complexity
|
||||
- [ ] IndexBuilder for symbols
|
||||
- [ ] Watchdog for file changes
|
||||
### Version 0.12.0 - TUI Advanced
|
||||
- [ ] DiffView component
|
||||
- [ ] ConfirmDialog component
|
||||
- [ ] ErrorDialog component
|
||||
- [ ] Progress component
|
||||
|
||||
### Version 0.4.0 - LLM Integration
|
||||
- [ ] OllamaClient implementation
|
||||
- [ ] System prompt design
|
||||
- [ ] Tool definitions (XML format)
|
||||
- [ ] Response parser
|
||||
### Version 0.13.0+ - Commands & Polish
|
||||
- [ ] Slash commands (/help, /clear, /undo, /sessions, /status)
|
||||
- [ ] Hotkeys (Ctrl+C, Ctrl+D, Ctrl+Z)
|
||||
- [ ] Auto-compression at 80% context
|
||||
|
||||
### Version 0.5.0+ - Tools
|
||||
- [ ] Read tools (get_lines, get_function, get_class, get_structure)
|
||||
- [ ] Edit tools (edit_lines, create_file, delete_file)
|
||||
- [ ] Search tools (find_references, find_definition)
|
||||
- [ ] Analysis tools (get_dependencies, get_dependents, get_complexity, get_todos)
|
||||
- [ ] Git tools (git_status, git_diff, git_commit)
|
||||
- [ ] Run tools (run_command, run_tests)
|
||||
|
||||
### Version 0.10.0+ - Session & TUI
|
||||
- [ ] Session management
|
||||
- [ ] Context compression
|
||||
- [ ] TUI components (StatusBar, Chat, Input, DiffView)
|
||||
- [ ] Slash commands (/help, /clear, /undo, etc.)
|
||||
### Version 0.14.0 - CLI Entry Point
|
||||
- [ ] Full CLI commands (start, init, index)
|
||||
- [ ] Onboarding flow (Redis check, Ollama check, model pull)
|
||||
|
||||
## Technical Debt
|
||||
|
||||
@@ -51,4 +106,4 @@ _None at this time._
|
||||
|
||||
---
|
||||
|
||||
**Last Updated:** 2025-01-29
|
||||
**Last Updated:** 2025-12-01
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@samiyev/ipuaro",
|
||||
"version": "0.3.0",
|
||||
"version": "0.13.0",
|
||||
"description": "Local AI agent for codebase operations with infinite context feeling",
|
||||
"author": "Fozilbek Samiyev <fozilbek.samiyev@gmail.com>",
|
||||
"license": "MIT",
|
||||
@@ -70,7 +70,7 @@
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/samiyev/puaros.git",
|
||||
"url": "git+https://github.com/samiyev/puaros.git",
|
||||
"directory": "packages/ipuaro"
|
||||
},
|
||||
"bugs": {
|
||||
|
||||
229
packages/ipuaro/src/application/use-cases/ContextManager.ts
Normal file
229
packages/ipuaro/src/application/use-cases/ContextManager.ts
Normal file
@@ -0,0 +1,229 @@
|
||||
import type { ContextState, Session } from "../../domain/entities/Session.js"
|
||||
import type { ILLMClient } from "../../domain/services/ILLMClient.js"
|
||||
import { type ChatMessage, createSystemMessage } from "../../domain/value-objects/ChatMessage.js"
|
||||
import { CONTEXT_COMPRESSION_THRESHOLD, CONTEXT_WINDOW_SIZE } from "../../domain/constants/index.js"
|
||||
|
||||
/**
|
||||
* File in context with token count.
|
||||
*/
|
||||
export interface FileContext {
|
||||
path: string
|
||||
tokens: number
|
||||
addedAt: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Compression result.
|
||||
*/
|
||||
export interface CompressionResult {
|
||||
compressed: boolean
|
||||
removedMessages: number
|
||||
tokensSaved: number
|
||||
summary?: string
|
||||
}
|
||||
|
||||
const COMPRESSION_PROMPT = `Summarize the following conversation history in a concise way,
|
||||
preserving key information about:
|
||||
- What files were discussed or modified
|
||||
- What changes were made
|
||||
- Important decisions or context
|
||||
Keep the summary under 500 tokens.`
|
||||
|
||||
const MESSAGES_TO_KEEP = 5
|
||||
const MIN_MESSAGES_FOR_COMPRESSION = 10
|
||||
|
||||
/**
|
||||
* Manages context window token budget and compression.
|
||||
*/
|
||||
export class ContextManager {
|
||||
private readonly filesInContext = new Map<string, FileContext>()
|
||||
private currentTokens = 0
|
||||
private readonly contextWindowSize: number
|
||||
|
||||
constructor(contextWindowSize: number = CONTEXT_WINDOW_SIZE) {
|
||||
this.contextWindowSize = contextWindowSize
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a file to the context.
|
||||
*/
|
||||
addToContext(file: string, tokens: number): void {
|
||||
const existing = this.filesInContext.get(file)
|
||||
if (existing) {
|
||||
this.currentTokens -= existing.tokens
|
||||
}
|
||||
|
||||
this.filesInContext.set(file, {
|
||||
path: file,
|
||||
tokens,
|
||||
addedAt: Date.now(),
|
||||
})
|
||||
this.currentTokens += tokens
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a file from the context.
|
||||
*/
|
||||
removeFromContext(file: string): void {
|
||||
const existing = this.filesInContext.get(file)
|
||||
if (existing) {
|
||||
this.currentTokens -= existing.tokens
|
||||
this.filesInContext.delete(file)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current token usage ratio (0-1).
|
||||
*/
|
||||
getUsage(): number {
|
||||
return this.currentTokens / this.contextWindowSize
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current token count.
|
||||
*/
|
||||
getTokenCount(): number {
|
||||
return this.currentTokens
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available tokens.
|
||||
*/
|
||||
getAvailableTokens(): number {
|
||||
return this.contextWindowSize - this.currentTokens
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if compression is needed.
|
||||
*/
|
||||
needsCompression(): boolean {
|
||||
return this.getUsage() > CONTEXT_COMPRESSION_THRESHOLD
|
||||
}
|
||||
|
||||
/**
|
||||
* Update token count (e.g., after receiving a message).
|
||||
*/
|
||||
addTokens(tokens: number): void {
|
||||
this.currentTokens += tokens
|
||||
}
|
||||
|
||||
/**
|
||||
* Get files in context.
|
||||
*/
|
||||
getFilesInContext(): string[] {
|
||||
return Array.from(this.filesInContext.keys())
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync context state from session.
|
||||
*/
|
||||
syncFromSession(session: Session): void {
|
||||
this.filesInContext.clear()
|
||||
this.currentTokens = 0
|
||||
|
||||
for (const file of session.context.filesInContext) {
|
||||
this.filesInContext.set(file, {
|
||||
path: file,
|
||||
tokens: 0,
|
||||
addedAt: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
this.currentTokens = Math.floor(session.context.tokenUsage * this.contextWindowSize)
|
||||
}
|
||||
|
||||
/**
|
||||
* Update session context state.
|
||||
*/
|
||||
updateSession(session: Session): void {
|
||||
session.context.filesInContext = this.getFilesInContext()
|
||||
session.context.tokenUsage = this.getUsage()
|
||||
session.context.needsCompression = this.needsCompression()
|
||||
}
|
||||
|
||||
/**
|
||||
* Compress context using LLM to summarize old messages.
|
||||
*/
|
||||
async compress(session: Session, llm: ILLMClient): Promise<CompressionResult> {
|
||||
const history = session.history
|
||||
if (history.length < MIN_MESSAGES_FOR_COMPRESSION) {
|
||||
return {
|
||||
compressed: false,
|
||||
removedMessages: 0,
|
||||
tokensSaved: 0,
|
||||
}
|
||||
}
|
||||
|
||||
const messagesToCompress = history.slice(0, -MESSAGES_TO_KEEP)
|
||||
const messagesToKeep = history.slice(-MESSAGES_TO_KEEP)
|
||||
|
||||
const tokensBeforeCompression = await this.countHistoryTokens(messagesToCompress, llm)
|
||||
|
||||
const summary = await this.summarizeMessages(messagesToCompress, llm)
|
||||
const summaryTokens = await llm.countTokens(summary)
|
||||
|
||||
const summaryMessage = createSystemMessage(`[Previous conversation summary]\n${summary}`)
|
||||
|
||||
session.history = [summaryMessage, ...messagesToKeep]
|
||||
|
||||
const tokensSaved = tokensBeforeCompression - summaryTokens
|
||||
this.currentTokens -= tokensSaved
|
||||
|
||||
this.updateSession(session)
|
||||
|
||||
return {
|
||||
compressed: true,
|
||||
removedMessages: messagesToCompress.length,
|
||||
tokensSaved,
|
||||
summary,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new context state.
|
||||
*/
|
||||
static createInitialState(): ContextState {
|
||||
return {
|
||||
filesInContext: [],
|
||||
tokenUsage: 0,
|
||||
needsCompression: false,
|
||||
}
|
||||
}
|
||||
|
||||
private async summarizeMessages(messages: ChatMessage[], llm: ILLMClient): Promise<string> {
|
||||
const conversation = this.formatMessagesForSummary(messages)
|
||||
|
||||
const response = await llm.chat([
|
||||
createSystemMessage(COMPRESSION_PROMPT),
|
||||
createSystemMessage(conversation),
|
||||
])
|
||||
|
||||
return response.content
|
||||
}
|
||||
|
||||
private formatMessagesForSummary(messages: ChatMessage[]): string {
|
||||
return messages
|
||||
.filter((m) => m.role !== "tool")
|
||||
.map((m) => {
|
||||
const role = m.role === "user" ? "User" : "Assistant"
|
||||
const content = this.truncateContent(m.content, 500)
|
||||
return `${role}: ${content}`
|
||||
})
|
||||
.join("\n\n")
|
||||
}
|
||||
|
||||
private truncateContent(content: string, maxLength: number): string {
|
||||
if (content.length <= maxLength) {
|
||||
return content
|
||||
}
|
||||
return `${content.slice(0, maxLength)}...`
|
||||
}
|
||||
|
||||
private async countHistoryTokens(messages: ChatMessage[], llm: ILLMClient): Promise<number> {
|
||||
let total = 0
|
||||
for (const message of messages) {
|
||||
total += await llm.countTokens(message.content)
|
||||
}
|
||||
return total
|
||||
}
|
||||
}
|
||||
383
packages/ipuaro/src/application/use-cases/HandleMessage.ts
Normal file
383
packages/ipuaro/src/application/use-cases/HandleMessage.ts
Normal file
@@ -0,0 +1,383 @@
|
||||
import { randomUUID } from "node:crypto"
|
||||
import type { Session } from "../../domain/entities/Session.js"
|
||||
import type { ILLMClient } from "../../domain/services/ILLMClient.js"
|
||||
import type { ISessionStorage } from "../../domain/services/ISessionStorage.js"
|
||||
import type { IStorage } from "../../domain/services/IStorage.js"
|
||||
import type { DiffInfo, ToolContext } from "../../domain/services/ITool.js"
|
||||
import {
|
||||
type ChatMessage,
|
||||
createAssistantMessage,
|
||||
createSystemMessage,
|
||||
createToolMessage,
|
||||
createUserMessage,
|
||||
} from "../../domain/value-objects/ChatMessage.js"
|
||||
import type { ToolCall } from "../../domain/value-objects/ToolCall.js"
|
||||
import { createErrorResult, type ToolResult } from "../../domain/value-objects/ToolResult.js"
|
||||
import { createUndoEntry, type UndoEntry } from "../../domain/value-objects/UndoEntry.js"
|
||||
import { IpuaroError } from "../../shared/errors/IpuaroError.js"
|
||||
import type { ErrorChoice } from "../../shared/types/index.js"
|
||||
import {
|
||||
buildInitialContext,
|
||||
type ProjectStructure,
|
||||
SYSTEM_PROMPT,
|
||||
} from "../../infrastructure/llm/prompts.js"
|
||||
import { parseToolCalls } from "../../infrastructure/llm/ResponseParser.js"
|
||||
import type { IToolRegistry } from "../interfaces/IToolRegistry.js"
|
||||
import { ContextManager } from "./ContextManager.js"
|
||||
|
||||
/**
|
||||
* Status during message handling.
|
||||
*/
|
||||
export type HandleMessageStatus =
|
||||
| "ready"
|
||||
| "thinking"
|
||||
| "tool_call"
|
||||
| "awaiting_confirmation"
|
||||
| "error"
|
||||
|
||||
/**
|
||||
* Edit request for confirmation.
|
||||
*/
|
||||
export interface EditRequest {
|
||||
toolCall: ToolCall
|
||||
filePath: string
|
||||
description: string
|
||||
diff?: DiffInfo
|
||||
}
|
||||
|
||||
/**
|
||||
* User's choice for edit confirmation.
|
||||
*/
|
||||
export type EditChoice = "apply" | "skip" | "edit" | "abort"
|
||||
|
||||
/**
|
||||
* Event callbacks for HandleMessage.
|
||||
*/
|
||||
export interface HandleMessageEvents {
|
||||
onMessage?: (message: ChatMessage) => void
|
||||
onToolCall?: (call: ToolCall) => void
|
||||
onToolResult?: (result: ToolResult) => void
|
||||
onConfirmation?: (message: string, diff?: DiffInfo) => Promise<boolean>
|
||||
onError?: (error: IpuaroError) => Promise<ErrorChoice>
|
||||
onStatusChange?: (status: HandleMessageStatus) => void
|
||||
onUndoEntry?: (entry: UndoEntry) => void
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for HandleMessage.
|
||||
*/
|
||||
export interface HandleMessageOptions {
|
||||
autoApply?: boolean
|
||||
maxToolCalls?: number
|
||||
}
|
||||
|
||||
const DEFAULT_MAX_TOOL_CALLS = 20
|
||||
|
||||
/**
|
||||
* Use case for handling a user message.
|
||||
* Main orchestrator for the LLM interaction loop.
|
||||
*/
|
||||
export class HandleMessage {
|
||||
private readonly storage: IStorage
|
||||
private readonly sessionStorage: ISessionStorage
|
||||
private readonly llm: ILLMClient
|
||||
private readonly tools: IToolRegistry
|
||||
private readonly contextManager: ContextManager
|
||||
private readonly projectRoot: string
|
||||
private projectStructure?: ProjectStructure
|
||||
|
||||
private events: HandleMessageEvents = {}
|
||||
private options: HandleMessageOptions = {}
|
||||
private aborted = false
|
||||
|
||||
constructor(
|
||||
storage: IStorage,
|
||||
sessionStorage: ISessionStorage,
|
||||
llm: ILLMClient,
|
||||
tools: IToolRegistry,
|
||||
projectRoot: string,
|
||||
) {
|
||||
this.storage = storage
|
||||
this.sessionStorage = sessionStorage
|
||||
this.llm = llm
|
||||
this.tools = tools
|
||||
this.projectRoot = projectRoot
|
||||
this.contextManager = new ContextManager(llm.getContextWindowSize())
|
||||
}
|
||||
|
||||
/**
|
||||
* Set event callbacks.
|
||||
*/
|
||||
setEvents(events: HandleMessageEvents): void {
|
||||
this.events = events
|
||||
}
|
||||
|
||||
/**
|
||||
* Set options.
|
||||
*/
|
||||
setOptions(options: HandleMessageOptions): void {
|
||||
this.options = options
|
||||
}
|
||||
|
||||
/**
|
||||
* Set project structure for context building.
|
||||
*/
|
||||
setProjectStructure(structure: ProjectStructure): void {
|
||||
this.projectStructure = structure
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort current processing.
|
||||
*/
|
||||
abort(): void {
|
||||
this.aborted = true
|
||||
this.llm.abort()
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the message handling flow.
|
||||
*/
|
||||
async execute(session: Session, message: string): Promise<void> {
|
||||
this.aborted = false
|
||||
this.contextManager.syncFromSession(session)
|
||||
|
||||
if (message.trim()) {
|
||||
const userMessage = createUserMessage(message)
|
||||
session.addMessage(userMessage)
|
||||
session.addInputToHistory(message)
|
||||
this.emitMessage(userMessage)
|
||||
}
|
||||
|
||||
await this.sessionStorage.saveSession(session)
|
||||
|
||||
this.emitStatus("thinking")
|
||||
|
||||
let toolCallCount = 0
|
||||
const maxToolCalls = this.options.maxToolCalls ?? DEFAULT_MAX_TOOL_CALLS
|
||||
|
||||
while (!this.aborted) {
|
||||
const messages = await this.buildMessages(session)
|
||||
|
||||
const startTime = Date.now()
|
||||
let response
|
||||
|
||||
try {
|
||||
response = await this.llm.chat(messages)
|
||||
} catch (error) {
|
||||
await this.handleLLMError(error, session)
|
||||
return
|
||||
}
|
||||
|
||||
if (this.aborted) {
|
||||
return
|
||||
}
|
||||
|
||||
const parsed = parseToolCalls(response.content)
|
||||
const timeMs = Date.now() - startTime
|
||||
|
||||
if (parsed.toolCalls.length === 0) {
|
||||
const assistantMessage = createAssistantMessage(parsed.content, undefined, {
|
||||
tokens: response.tokens,
|
||||
timeMs,
|
||||
toolCalls: 0,
|
||||
})
|
||||
session.addMessage(assistantMessage)
|
||||
this.emitMessage(assistantMessage)
|
||||
this.contextManager.addTokens(response.tokens)
|
||||
this.contextManager.updateSession(session)
|
||||
await this.sessionStorage.saveSession(session)
|
||||
this.emitStatus("ready")
|
||||
return
|
||||
}
|
||||
|
||||
const assistantMessage = createAssistantMessage(parsed.content, parsed.toolCalls, {
|
||||
tokens: response.tokens,
|
||||
timeMs,
|
||||
toolCalls: parsed.toolCalls.length,
|
||||
})
|
||||
session.addMessage(assistantMessage)
|
||||
this.emitMessage(assistantMessage)
|
||||
|
||||
toolCallCount += parsed.toolCalls.length
|
||||
if (toolCallCount > maxToolCalls) {
|
||||
const errorMsg = `Maximum tool calls (${String(maxToolCalls)}) exceeded`
|
||||
const errorMessage = createSystemMessage(errorMsg)
|
||||
session.addMessage(errorMessage)
|
||||
this.emitMessage(errorMessage)
|
||||
this.emitStatus("ready")
|
||||
return
|
||||
}
|
||||
|
||||
this.emitStatus("tool_call")
|
||||
|
||||
const results: ToolResult[] = []
|
||||
|
||||
for (const toolCall of parsed.toolCalls) {
|
||||
if (this.aborted) {
|
||||
return
|
||||
}
|
||||
|
||||
this.emitToolCall(toolCall)
|
||||
|
||||
const result = await this.executeToolCall(toolCall, session)
|
||||
results.push(result)
|
||||
this.emitToolResult(result)
|
||||
}
|
||||
|
||||
const toolMessage = createToolMessage(results)
|
||||
session.addMessage(toolMessage)
|
||||
|
||||
this.contextManager.addTokens(response.tokens)
|
||||
|
||||
if (this.contextManager.needsCompression()) {
|
||||
await this.contextManager.compress(session, this.llm)
|
||||
}
|
||||
|
||||
this.contextManager.updateSession(session)
|
||||
await this.sessionStorage.saveSession(session)
|
||||
|
||||
this.emitStatus("thinking")
|
||||
}
|
||||
}
|
||||
|
||||
private async buildMessages(session: Session): Promise<ChatMessage[]> {
|
||||
const messages: ChatMessage[] = []
|
||||
|
||||
messages.push(createSystemMessage(SYSTEM_PROMPT))
|
||||
|
||||
if (this.projectStructure) {
|
||||
const asts = await this.storage.getAllASTs()
|
||||
const metas = await this.storage.getAllMetas()
|
||||
const context = buildInitialContext(this.projectStructure, asts, metas)
|
||||
messages.push(createSystemMessage(context))
|
||||
}
|
||||
|
||||
messages.push(...session.history)
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
private async executeToolCall(toolCall: ToolCall, session: Session): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const tool = this.tools.get(toolCall.name)
|
||||
|
||||
if (!tool) {
|
||||
return createErrorResult(
|
||||
toolCall.id,
|
||||
`Unknown tool: ${toolCall.name}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const context: ToolContext = {
|
||||
projectRoot: this.projectRoot,
|
||||
storage: this.storage,
|
||||
requestConfirmation: async (msg: string, diff?: DiffInfo) => {
|
||||
return this.handleConfirmation(msg, diff, toolCall, session)
|
||||
},
|
||||
onProgress: (_msg: string) => {
|
||||
this.events.onStatusChange?.("tool_call")
|
||||
},
|
||||
}
|
||||
|
||||
try {
|
||||
const validationError = tool.validateParams(toolCall.params)
|
||||
if (validationError) {
|
||||
return createErrorResult(toolCall.id, validationError, Date.now() - startTime)
|
||||
}
|
||||
|
||||
const result = await tool.execute(toolCall.params, context)
|
||||
return result
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(toolCall.id, errorMessage, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
private async handleConfirmation(
|
||||
msg: string,
|
||||
diff: DiffInfo | undefined,
|
||||
toolCall: ToolCall,
|
||||
session: Session,
|
||||
): Promise<boolean> {
|
||||
if (this.options.autoApply) {
|
||||
if (diff) {
|
||||
this.createUndoEntryFromDiff(diff, toolCall, session)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
this.emitStatus("awaiting_confirmation")
|
||||
|
||||
if (this.events.onConfirmation) {
|
||||
const confirmed = await this.events.onConfirmation(msg, diff)
|
||||
|
||||
if (confirmed && diff) {
|
||||
this.createUndoEntryFromDiff(diff, toolCall, session)
|
||||
}
|
||||
|
||||
return confirmed
|
||||
}
|
||||
|
||||
if (diff) {
|
||||
this.createUndoEntryFromDiff(diff, toolCall, session)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
private createUndoEntryFromDiff(diff: DiffInfo, toolCall: ToolCall, session: Session): void {
|
||||
const entry = createUndoEntry(
|
||||
randomUUID(),
|
||||
diff.filePath,
|
||||
diff.oldLines,
|
||||
diff.newLines,
|
||||
`${toolCall.name}: ${diff.filePath}`,
|
||||
toolCall.id,
|
||||
)
|
||||
|
||||
session.addUndoEntry(entry)
|
||||
void this.sessionStorage.pushUndoEntry(session.id, entry)
|
||||
session.stats.editsApplied++
|
||||
this.events.onUndoEntry?.(entry)
|
||||
}
|
||||
|
||||
private async handleLLMError(error: unknown, session: Session): Promise<void> {
|
||||
this.emitStatus("error")
|
||||
|
||||
const ipuaroError =
|
||||
error instanceof IpuaroError
|
||||
? error
|
||||
: IpuaroError.llm(error instanceof Error ? error.message : String(error))
|
||||
|
||||
if (this.events.onError) {
|
||||
const choice = await this.events.onError(ipuaroError)
|
||||
|
||||
if (choice === "retry") {
|
||||
this.emitStatus("thinking")
|
||||
return this.execute(session, "")
|
||||
}
|
||||
}
|
||||
|
||||
const errorMessage = createSystemMessage(`Error: ${ipuaroError.message}`)
|
||||
session.addMessage(errorMessage)
|
||||
this.emitMessage(errorMessage)
|
||||
|
||||
this.emitStatus("ready")
|
||||
}
|
||||
|
||||
private emitMessage(message: ChatMessage): void {
|
||||
this.events.onMessage?.(message)
|
||||
}
|
||||
|
||||
private emitToolCall(call: ToolCall): void {
|
||||
this.events.onToolCall?.(call)
|
||||
}
|
||||
|
||||
private emitToolResult(result: ToolResult): void {
|
||||
this.events.onToolResult?.(result)
|
||||
}
|
||||
|
||||
private emitStatus(status: HandleMessageStatus): void {
|
||||
this.events.onStatusChange?.(status)
|
||||
}
|
||||
}
|
||||
62
packages/ipuaro/src/application/use-cases/StartSession.ts
Normal file
62
packages/ipuaro/src/application/use-cases/StartSession.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { randomUUID } from "node:crypto"
|
||||
import { Session } from "../../domain/entities/Session.js"
|
||||
import type { ISessionStorage } from "../../domain/services/ISessionStorage.js"
|
||||
|
||||
/**
|
||||
* Options for starting a session.
|
||||
*/
|
||||
export interface StartSessionOptions {
|
||||
/** Force creation of a new session even if one exists */
|
||||
forceNew?: boolean
|
||||
/** Specific session ID to load */
|
||||
sessionId?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of starting a session.
|
||||
*/
|
||||
export interface StartSessionResult {
|
||||
session: Session
|
||||
isNew: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Use case for starting a session.
|
||||
* Creates a new session or loads the latest one for a project.
|
||||
*/
|
||||
export class StartSession {
|
||||
constructor(private readonly sessionStorage: ISessionStorage) {}
|
||||
|
||||
/**
|
||||
* Execute the use case.
|
||||
*
|
||||
* @param projectName - The project name to start a session for
|
||||
* @param options - Optional configuration
|
||||
* @returns The session and whether it was newly created
|
||||
*/
|
||||
async execute(
|
||||
projectName: string,
|
||||
options: StartSessionOptions = {},
|
||||
): Promise<StartSessionResult> {
|
||||
if (options.sessionId) {
|
||||
const session = await this.sessionStorage.loadSession(options.sessionId)
|
||||
if (session) {
|
||||
await this.sessionStorage.touchSession(session.id)
|
||||
return { session, isNew: false }
|
||||
}
|
||||
}
|
||||
|
||||
if (!options.forceNew) {
|
||||
const latestSession = await this.sessionStorage.getLatestSession(projectName)
|
||||
if (latestSession) {
|
||||
await this.sessionStorage.touchSession(latestSession.id)
|
||||
return { session: latestSession, isNew: false }
|
||||
}
|
||||
}
|
||||
|
||||
const session = new Session(randomUUID(), projectName)
|
||||
await this.sessionStorage.saveSession(session)
|
||||
|
||||
return { session, isNew: true }
|
||||
}
|
||||
}
|
||||
119
packages/ipuaro/src/application/use-cases/UndoChange.ts
Normal file
119
packages/ipuaro/src/application/use-cases/UndoChange.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import type { Session } from "../../domain/entities/Session.js"
|
||||
import type { ISessionStorage } from "../../domain/services/ISessionStorage.js"
|
||||
import type { IStorage } from "../../domain/services/IStorage.js"
|
||||
import { canUndo, type UndoEntry } from "../../domain/value-objects/UndoEntry.js"
|
||||
import { md5 } from "../../shared/utils/hash.js"
|
||||
|
||||
/**
|
||||
* Result of undo operation.
|
||||
*/
|
||||
export interface UndoResult {
|
||||
success: boolean
|
||||
entry?: UndoEntry
|
||||
error?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Use case for undoing the last file change.
|
||||
*/
|
||||
export class UndoChange {
|
||||
constructor(
|
||||
private readonly sessionStorage: ISessionStorage,
|
||||
private readonly storage: IStorage,
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Execute undo operation.
|
||||
*
|
||||
* @param session - The current session
|
||||
* @returns Result of the undo operation
|
||||
*/
|
||||
async execute(session: Session): Promise<UndoResult> {
|
||||
const entry = await this.sessionStorage.popUndoEntry(session.id)
|
||||
if (!entry) {
|
||||
return {
|
||||
success: false,
|
||||
error: "No changes to undo",
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const currentContent = await this.readCurrentContent(entry.filePath)
|
||||
|
||||
if (!canUndo(entry, currentContent)) {
|
||||
await this.sessionStorage.pushUndoEntry(session.id, entry)
|
||||
return {
|
||||
success: false,
|
||||
entry,
|
||||
error: "File has been modified since the change was made",
|
||||
}
|
||||
}
|
||||
|
||||
await this.restoreContent(entry.filePath, entry.previousContent)
|
||||
|
||||
session.popUndoEntry()
|
||||
session.stats.editsApplied--
|
||||
|
||||
return {
|
||||
success: true,
|
||||
entry,
|
||||
}
|
||||
} catch (error) {
|
||||
await this.sessionStorage.pushUndoEntry(session.id, entry)
|
||||
|
||||
const message = error instanceof Error ? error.message : "Unknown error"
|
||||
return {
|
||||
success: false,
|
||||
entry,
|
||||
error: `Failed to undo: ${message}`,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if undo is available.
|
||||
*/
|
||||
async canUndo(session: Session): Promise<boolean> {
|
||||
const stack = await this.sessionStorage.getUndoStack(session.id)
|
||||
return stack.length > 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the next undo entry without removing it.
|
||||
*/
|
||||
async peekUndoEntry(session: Session): Promise<UndoEntry | null> {
|
||||
const stack = await this.sessionStorage.getUndoStack(session.id)
|
||||
if (stack.length === 0) {
|
||||
return null
|
||||
}
|
||||
return stack[stack.length - 1]
|
||||
}
|
||||
|
||||
private async readCurrentContent(filePath: string): Promise<string[]> {
|
||||
try {
|
||||
const content = await fs.readFile(filePath, "utf-8")
|
||||
return content.split("\n")
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
return []
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private async restoreContent(filePath: string, content: string[]): Promise<void> {
|
||||
const fileContent = content.join("\n")
|
||||
await fs.writeFile(filePath, fileContent, "utf-8")
|
||||
|
||||
const hash = md5(fileContent)
|
||||
const stats = await fs.stat(filePath)
|
||||
|
||||
await this.storage.setFile(filePath, {
|
||||
lines: content,
|
||||
hash,
|
||||
size: stats.size,
|
||||
lastModified: stats.mtimeMs,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
/*
|
||||
* Application Use Cases
|
||||
* Will be implemented in version 0.10.0+
|
||||
*/
|
||||
// Application Use Cases
|
||||
|
||||
export * from "./StartSession.js"
|
||||
export * from "./HandleMessage.js"
|
||||
export * from "./UndoChange.js"
|
||||
export * from "./ContextManager.js"
|
||||
|
||||
88
packages/ipuaro/src/domain/services/ISessionStorage.ts
Normal file
88
packages/ipuaro/src/domain/services/ISessionStorage.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
import type { ContextState, Session, SessionStats } from "../entities/Session.js"
|
||||
import type { ChatMessage } from "../value-objects/ChatMessage.js"
|
||||
import type { UndoEntry } from "../value-objects/UndoEntry.js"
|
||||
|
||||
/**
|
||||
* Session data stored in persistence layer.
|
||||
*/
|
||||
export interface SessionData {
|
||||
id: string
|
||||
projectName: string
|
||||
createdAt: number
|
||||
lastActivityAt: number
|
||||
history: ChatMessage[]
|
||||
context: ContextState
|
||||
stats: SessionStats
|
||||
inputHistory: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Session list item (minimal info for listing).
|
||||
*/
|
||||
export interface SessionListItem {
|
||||
id: string
|
||||
projectName: string
|
||||
createdAt: number
|
||||
lastActivityAt: number
|
||||
messageCount: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Storage service interface for session persistence.
|
||||
*/
|
||||
export interface ISessionStorage {
|
||||
/**
|
||||
* Save a session to storage.
|
||||
*/
|
||||
saveSession(session: Session): Promise<void>
|
||||
|
||||
/**
|
||||
* Load a session by ID.
|
||||
*/
|
||||
loadSession(sessionId: string): Promise<Session | null>
|
||||
|
||||
/**
|
||||
* Delete a session.
|
||||
*/
|
||||
deleteSession(sessionId: string): Promise<void>
|
||||
|
||||
/**
|
||||
* Get list of all sessions for a project.
|
||||
*/
|
||||
listSessions(projectName?: string): Promise<SessionListItem[]>
|
||||
|
||||
/**
|
||||
* Get the latest session for a project.
|
||||
*/
|
||||
getLatestSession(projectName: string): Promise<Session | null>
|
||||
|
||||
/**
|
||||
* Check if a session exists.
|
||||
*/
|
||||
sessionExists(sessionId: string): Promise<boolean>
|
||||
|
||||
/**
|
||||
* Add undo entry to session's undo stack.
|
||||
*/
|
||||
pushUndoEntry(sessionId: string, entry: UndoEntry): Promise<void>
|
||||
|
||||
/**
|
||||
* Pop undo entry from session's undo stack.
|
||||
*/
|
||||
popUndoEntry(sessionId: string): Promise<UndoEntry | null>
|
||||
|
||||
/**
|
||||
* Get undo stack for a session.
|
||||
*/
|
||||
getUndoStack(sessionId: string): Promise<UndoEntry[]>
|
||||
|
||||
/**
|
||||
* Update session's last activity timestamp.
|
||||
*/
|
||||
touchSession(sessionId: string): Promise<void>
|
||||
|
||||
/**
|
||||
* Clear all sessions.
|
||||
*/
|
||||
clearAllSessions(): Promise<void>
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
// Domain Service Interfaces (Ports)
|
||||
export * from "./IStorage.js"
|
||||
export * from "./ISessionStorage.js"
|
||||
export * from "./ILLMClient.js"
|
||||
export * from "./ITool.js"
|
||||
export * from "./IIndexer.js"
|
||||
|
||||
@@ -4,6 +4,11 @@
|
||||
* Main entry point for the library.
|
||||
*/
|
||||
|
||||
import { createRequire } from "node:module"
|
||||
|
||||
const require = createRequire(import.meta.url)
|
||||
const pkg = require("../package.json") as { version: string }
|
||||
|
||||
// Domain exports
|
||||
export * from "./domain/index.js"
|
||||
|
||||
@@ -16,5 +21,8 @@ export * from "./shared/index.js"
|
||||
// Infrastructure exports
|
||||
export * from "./infrastructure/index.js"
|
||||
|
||||
// TUI exports
|
||||
export * from "./tui/index.js"
|
||||
|
||||
// Version
|
||||
export const VERSION = "0.2.0"
|
||||
export const VERSION = pkg.version
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Infrastructure layer exports
|
||||
export * from "./storage/index.js"
|
||||
export * from "./indexer/index.js"
|
||||
export * from "./llm/index.js"
|
||||
export * from "./tools/index.js"
|
||||
export * from "./security/index.js"
|
||||
|
||||
@@ -306,38 +306,7 @@ export class ASTParser {
|
||||
}
|
||||
}
|
||||
|
||||
let extendsName: string | undefined
|
||||
const implementsList: string[] = []
|
||||
|
||||
for (const child of node.children) {
|
||||
if (child.type === NodeType.CLASS_HERITAGE) {
|
||||
for (const clause of child.children) {
|
||||
if (clause.type === NodeType.EXTENDS_CLAUSE) {
|
||||
const typeNode = clause.children.find(
|
||||
(c) =>
|
||||
c.type === NodeType.TYPE_IDENTIFIER ||
|
||||
c.type === NodeType.IDENTIFIER,
|
||||
)
|
||||
extendsName = typeNode?.text
|
||||
} else if (clause.type === NodeType.IMPLEMENTS_CLAUSE) {
|
||||
for (const impl of clause.children) {
|
||||
if (
|
||||
impl.type === NodeType.TYPE_IDENTIFIER ||
|
||||
impl.type === NodeType.IDENTIFIER
|
||||
) {
|
||||
implementsList.push(impl.text)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (child.type === NodeType.EXTENDS_CLAUSE) {
|
||||
const typeNode = child.children.find(
|
||||
(c) => c.type === NodeType.TYPE_IDENTIFIER || c.type === NodeType.IDENTIFIER,
|
||||
)
|
||||
extendsName = typeNode?.text
|
||||
}
|
||||
}
|
||||
|
||||
const { extendsName, implementsList } = this.extractClassHeritage(node)
|
||||
const isAbstract = node.children.some((c) => c.type === NodeType.ABSTRACT)
|
||||
|
||||
ast.classes.push({
|
||||
@@ -353,6 +322,56 @@ export class ASTParser {
|
||||
})
|
||||
}
|
||||
|
||||
private extractClassHeritage(node: SyntaxNode): {
|
||||
extendsName: string | undefined
|
||||
implementsList: string[]
|
||||
} {
|
||||
let extendsName: string | undefined
|
||||
const implementsList: string[] = []
|
||||
|
||||
for (const child of node.children) {
|
||||
if (child.type === NodeType.CLASS_HERITAGE) {
|
||||
this.parseHeritageClause(child, (ext) => (extendsName = ext), implementsList)
|
||||
} else if (child.type === NodeType.EXTENDS_CLAUSE) {
|
||||
extendsName = this.findTypeIdentifier(child)
|
||||
}
|
||||
}
|
||||
|
||||
return { extendsName, implementsList }
|
||||
}
|
||||
|
||||
private parseHeritageClause(
|
||||
heritage: SyntaxNode,
|
||||
setExtends: (name: string) => void,
|
||||
implementsList: string[],
|
||||
): void {
|
||||
for (const clause of heritage.children) {
|
||||
if (clause.type === NodeType.EXTENDS_CLAUSE) {
|
||||
const typeId = this.findTypeIdentifier(clause)
|
||||
if (typeId) {
|
||||
setExtends(typeId)
|
||||
}
|
||||
} else if (clause.type === NodeType.IMPLEMENTS_CLAUSE) {
|
||||
this.collectImplements(clause, implementsList)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private findTypeIdentifier(node: SyntaxNode): string | undefined {
|
||||
const typeNode = node.children.find(
|
||||
(c) => c.type === NodeType.TYPE_IDENTIFIER || c.type === NodeType.IDENTIFIER,
|
||||
)
|
||||
return typeNode?.text
|
||||
}
|
||||
|
||||
private collectImplements(clause: SyntaxNode, list: string[]): void {
|
||||
for (const impl of clause.children) {
|
||||
if (impl.type === NodeType.TYPE_IDENTIFIER || impl.type === NodeType.IDENTIFIER) {
|
||||
list.push(impl.text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private extractMethod(node: SyntaxNode): MethodInfo {
|
||||
const nameNode = node.childForFieldName(FieldName.NAME)
|
||||
const params = this.extractParameters(node)
|
||||
|
||||
302
packages/ipuaro/src/infrastructure/llm/OllamaClient.ts
Normal file
302
packages/ipuaro/src/infrastructure/llm/OllamaClient.ts
Normal file
@@ -0,0 +1,302 @@
|
||||
import { type Message, Ollama, type Tool } from "ollama"
|
||||
import type {
|
||||
ILLMClient,
|
||||
LLMResponse,
|
||||
ToolDef,
|
||||
ToolParameter,
|
||||
} from "../../domain/services/ILLMClient.js"
|
||||
import type { ChatMessage } from "../../domain/value-objects/ChatMessage.js"
|
||||
import { createToolCall, type ToolCall } from "../../domain/value-objects/ToolCall.js"
|
||||
import type { LLMConfig } from "../../shared/constants/config.js"
|
||||
import { IpuaroError } from "../../shared/errors/IpuaroError.js"
|
||||
import { estimateTokens } from "../../shared/utils/tokens.js"
|
||||
|
||||
/**
|
||||
* Ollama LLM client implementation.
|
||||
* Wraps the Ollama SDK for chat completions with tool support.
|
||||
*/
|
||||
export class OllamaClient implements ILLMClient {
|
||||
private readonly client: Ollama
|
||||
private readonly host: string
|
||||
private readonly model: string
|
||||
private readonly contextWindow: number
|
||||
private readonly temperature: number
|
||||
private readonly timeout: number
|
||||
private abortController: AbortController | null = null
|
||||
|
||||
constructor(config: LLMConfig) {
|
||||
this.host = config.host
|
||||
this.client = new Ollama({ host: this.host })
|
||||
this.model = config.model
|
||||
this.contextWindow = config.contextWindow
|
||||
this.temperature = config.temperature
|
||||
this.timeout = config.timeout
|
||||
}
|
||||
|
||||
/**
|
||||
* Send messages to LLM and get response.
|
||||
*/
|
||||
async chat(messages: ChatMessage[], tools?: ToolDef[]): Promise<LLMResponse> {
|
||||
const startTime = Date.now()
|
||||
this.abortController = new AbortController()
|
||||
|
||||
try {
|
||||
const ollamaMessages = this.convertMessages(messages)
|
||||
const ollamaTools = tools ? this.convertTools(tools) : undefined
|
||||
|
||||
const response = await this.client.chat({
|
||||
model: this.model,
|
||||
messages: ollamaMessages,
|
||||
tools: ollamaTools,
|
||||
options: {
|
||||
temperature: this.temperature,
|
||||
},
|
||||
stream: false,
|
||||
})
|
||||
|
||||
const timeMs = Date.now() - startTime
|
||||
const toolCalls = this.extractToolCalls(response.message)
|
||||
|
||||
return {
|
||||
content: response.message.content,
|
||||
toolCalls,
|
||||
tokens: response.eval_count ?? estimateTokens(response.message.content),
|
||||
timeMs,
|
||||
truncated: false,
|
||||
stopReason: this.determineStopReason(response, toolCalls),
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.name === "AbortError") {
|
||||
throw IpuaroError.llm("Request was aborted")
|
||||
}
|
||||
throw this.handleError(error)
|
||||
} finally {
|
||||
this.abortController = null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Count tokens in text.
|
||||
* Uses estimation since Ollama doesn't provide a tokenizer endpoint.
|
||||
*/
|
||||
async countTokens(text: string): Promise<number> {
|
||||
return Promise.resolve(estimateTokens(text))
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if LLM service is available.
|
||||
*/
|
||||
async isAvailable(): Promise<boolean> {
|
||||
try {
|
||||
await this.client.list()
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current model name.
|
||||
*/
|
||||
getModelName(): string {
|
||||
return this.model
|
||||
}
|
||||
|
||||
/**
|
||||
* Get context window size.
|
||||
*/
|
||||
getContextWindowSize(): number {
|
||||
return this.contextWindow
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull/download model if not available locally.
|
||||
*/
|
||||
async pullModel(model: string): Promise<void> {
|
||||
try {
|
||||
await this.client.pull({ model, stream: false })
|
||||
} catch (error) {
|
||||
throw this.handleError(error, `Failed to pull model: ${model}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a specific model is available locally.
|
||||
*/
|
||||
async hasModel(model: string): Promise<boolean> {
|
||||
try {
|
||||
const result = await this.client.list()
|
||||
return result.models.some((m) => m.name === model || m.name.startsWith(`${model}:`))
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List available models.
|
||||
*/
|
||||
async listModels(): Promise<string[]> {
|
||||
try {
|
||||
const result = await this.client.list()
|
||||
return result.models.map((m) => m.name)
|
||||
} catch (error) {
|
||||
throw this.handleError(error, "Failed to list models")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort current generation.
|
||||
*/
|
||||
abort(): void {
|
||||
if (this.abortController) {
|
||||
this.abortController.abort()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert ChatMessage array to Ollama Message format.
|
||||
*/
|
||||
private convertMessages(messages: ChatMessage[]): Message[] {
|
||||
return messages.map((msg): Message => {
|
||||
const role = this.convertRole(msg.role)
|
||||
|
||||
if (msg.role === "tool" && msg.toolResults) {
|
||||
return {
|
||||
role: "tool",
|
||||
content: msg.content,
|
||||
}
|
||||
}
|
||||
|
||||
if (msg.role === "assistant" && msg.toolCalls && msg.toolCalls.length > 0) {
|
||||
return {
|
||||
role: "assistant",
|
||||
content: msg.content,
|
||||
tool_calls: msg.toolCalls.map((tc) => ({
|
||||
function: {
|
||||
name: tc.name,
|
||||
arguments: tc.params,
|
||||
},
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
role,
|
||||
content: msg.content,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert message role to Ollama role.
|
||||
*/
|
||||
private convertRole(role: ChatMessage["role"]): "user" | "assistant" | "system" | "tool" {
|
||||
switch (role) {
|
||||
case "user":
|
||||
return "user"
|
||||
case "assistant":
|
||||
return "assistant"
|
||||
case "system":
|
||||
return "system"
|
||||
case "tool":
|
||||
return "tool"
|
||||
default:
|
||||
return "user"
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert ToolDef array to Ollama Tool format.
|
||||
*/
|
||||
private convertTools(tools: ToolDef[]): Tool[] {
|
||||
return tools.map(
|
||||
(tool): Tool => ({
|
||||
type: "function",
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: this.convertParameters(tool.parameters),
|
||||
required: tool.parameters.filter((p) => p.required).map((p) => p.name),
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert ToolParameter array to JSON Schema properties.
|
||||
*/
|
||||
private convertParameters(
|
||||
params: ToolParameter[],
|
||||
): Record<string, { type: string; description: string; enum?: string[] }> {
|
||||
const properties: Record<string, { type: string; description: string; enum?: string[] }> =
|
||||
{}
|
||||
|
||||
for (const param of params) {
|
||||
properties[param.name] = {
|
||||
type: param.type,
|
||||
description: param.description,
|
||||
...(param.enum && { enum: param.enum }),
|
||||
}
|
||||
}
|
||||
|
||||
return properties
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract tool calls from Ollama response message.
|
||||
*/
|
||||
private extractToolCalls(message: Message): ToolCall[] {
|
||||
if (!message.tool_calls || message.tool_calls.length === 0) {
|
||||
return []
|
||||
}
|
||||
|
||||
return message.tool_calls.map((tc, index) =>
|
||||
createToolCall(
|
||||
`call_${String(Date.now())}_${String(index)}`,
|
||||
tc.function.name,
|
||||
tc.function.arguments,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine stop reason from response.
|
||||
*/
|
||||
private determineStopReason(
|
||||
response: { done_reason?: string },
|
||||
toolCalls: ToolCall[],
|
||||
): "end" | "length" | "tool_use" {
|
||||
if (toolCalls.length > 0) {
|
||||
return "tool_use"
|
||||
}
|
||||
|
||||
if (response.done_reason === "length") {
|
||||
return "length"
|
||||
}
|
||||
|
||||
return "end"
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle and wrap errors.
|
||||
*/
|
||||
private handleError(error: unknown, context?: string): IpuaroError {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
const fullMessage = context ? `${context}: ${message}` : message
|
||||
|
||||
if (message.includes("ECONNREFUSED") || message.includes("fetch failed")) {
|
||||
return IpuaroError.llm(`Cannot connect to Ollama at ${this.host}`)
|
||||
}
|
||||
|
||||
if (message.includes("model") && message.includes("not found")) {
|
||||
return IpuaroError.llm(
|
||||
`Model "${this.model}" not found. Run: ollama pull ${this.model}`,
|
||||
)
|
||||
}
|
||||
|
||||
return IpuaroError.llm(fullMessage)
|
||||
}
|
||||
}
|
||||
220
packages/ipuaro/src/infrastructure/llm/ResponseParser.ts
Normal file
220
packages/ipuaro/src/infrastructure/llm/ResponseParser.ts
Normal file
@@ -0,0 +1,220 @@
|
||||
import { createToolCall, type ToolCall } from "../../domain/value-objects/ToolCall.js"
|
||||
|
||||
/**
|
||||
* Parsed response from LLM.
|
||||
*/
|
||||
export interface ParsedResponse {
|
||||
/** Text content (excluding tool calls) */
|
||||
content: string
|
||||
/** Extracted tool calls */
|
||||
toolCalls: ToolCall[]
|
||||
/** Whether parsing encountered issues */
|
||||
hasParseErrors: boolean
|
||||
/** Parse error messages */
|
||||
parseErrors: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* XML tool call tag pattern.
|
||||
* Matches: <tool_call name="tool_name">...</tool_call>
|
||||
*/
|
||||
const TOOL_CALL_REGEX = /<tool_call\s+name\s*=\s*"([^"]+)">([\s\S]*?)<\/tool_call>/gi
|
||||
|
||||
/**
|
||||
* XML parameter tag pattern.
|
||||
* Matches: <param name="param_name">value</param> or <param_name>value</param_name>
|
||||
*/
|
||||
const PARAM_REGEX_NAMED = /<param\s+name\s*=\s*"([^"]+)">([\s\S]*?)<\/param>/gi
|
||||
const PARAM_REGEX_ELEMENT = /<([a-z_][a-z0-9_]*)>([\s\S]*?)<\/\1>/gi
|
||||
|
||||
/**
|
||||
* Parse tool calls from LLM response text.
|
||||
* Supports XML format: <tool_call name="get_lines"><path>src/index.ts</path></tool_call>
|
||||
*/
|
||||
export function parseToolCalls(response: string): ParsedResponse {
|
||||
const toolCalls: ToolCall[] = []
|
||||
const parseErrors: string[] = []
|
||||
let content = response
|
||||
|
||||
const matches = [...response.matchAll(TOOL_CALL_REGEX)]
|
||||
|
||||
for (const match of matches) {
|
||||
const [fullMatch, toolName, paramsXml] = match
|
||||
|
||||
try {
|
||||
const params = parseParameters(paramsXml)
|
||||
const toolCall = createToolCall(
|
||||
`xml_${String(Date.now())}_${String(toolCalls.length)}`,
|
||||
toolName,
|
||||
params,
|
||||
)
|
||||
toolCalls.push(toolCall)
|
||||
content = content.replace(fullMatch, "")
|
||||
} catch (error) {
|
||||
const errorMsg = error instanceof Error ? error.message : String(error)
|
||||
parseErrors.push(`Failed to parse tool call "${toolName}": ${errorMsg}`)
|
||||
}
|
||||
}
|
||||
|
||||
content = content.trim()
|
||||
|
||||
return {
|
||||
content,
|
||||
toolCalls,
|
||||
hasParseErrors: parseErrors.length > 0,
|
||||
parseErrors,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse parameters from XML content.
|
||||
*/
|
||||
function parseParameters(xml: string): Record<string, unknown> {
|
||||
const params: Record<string, unknown> = {}
|
||||
|
||||
const namedMatches = [...xml.matchAll(PARAM_REGEX_NAMED)]
|
||||
for (const match of namedMatches) {
|
||||
const [, name, value] = match
|
||||
params[name] = parseValue(value)
|
||||
}
|
||||
|
||||
if (namedMatches.length === 0) {
|
||||
const elementMatches = [...xml.matchAll(PARAM_REGEX_ELEMENT)]
|
||||
for (const match of elementMatches) {
|
||||
const [, name, value] = match
|
||||
params[name] = parseValue(value)
|
||||
}
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a value string to appropriate type.
|
||||
*/
|
||||
function parseValue(value: string): unknown {
|
||||
const trimmed = value.trim()
|
||||
|
||||
if (trimmed === "true") {
|
||||
return true
|
||||
}
|
||||
|
||||
if (trimmed === "false") {
|
||||
return false
|
||||
}
|
||||
|
||||
if (trimmed === "null") {
|
||||
return null
|
||||
}
|
||||
|
||||
const num = Number(trimmed)
|
||||
if (!isNaN(num) && trimmed !== "") {
|
||||
return num
|
||||
}
|
||||
|
||||
if (
|
||||
(trimmed.startsWith("[") && trimmed.endsWith("]")) ||
|
||||
(trimmed.startsWith("{") && trimmed.endsWith("}"))
|
||||
) {
|
||||
try {
|
||||
return JSON.parse(trimmed)
|
||||
} catch {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
|
||||
return trimmed
|
||||
}
|
||||
|
||||
/**
|
||||
* Format tool calls to XML for prompt injection.
|
||||
* Useful when you need to show the LLM the expected format.
|
||||
*/
|
||||
export function formatToolCallsAsXml(toolCalls: ToolCall[]): string {
|
||||
return toolCalls
|
||||
.map((tc) => {
|
||||
const params = Object.entries(tc.params)
|
||||
.map(([key, value]) => ` <${key}>${formatValueForXml(value)}</${key}>`)
|
||||
.join("\n")
|
||||
return `<tool_call name="${tc.name}">\n${params}\n</tool_call>`
|
||||
})
|
||||
.join("\n\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a value for XML output.
|
||||
*/
|
||||
function formatValueForXml(value: unknown): string {
|
||||
if (value === null || value === undefined) {
|
||||
return ""
|
||||
}
|
||||
|
||||
if (typeof value === "object") {
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
|
||||
if (typeof value === "string") {
|
||||
return value
|
||||
}
|
||||
|
||||
if (typeof value === "number" || typeof value === "boolean") {
|
||||
return String(value)
|
||||
}
|
||||
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract thinking/reasoning from response.
|
||||
* Matches content between <thinking>...</thinking> tags.
|
||||
*/
|
||||
export function extractThinking(response: string): { thinking: string; content: string } {
|
||||
const thinkingRegex = /<thinking>([\s\S]*?)<\/thinking>/gi
|
||||
const matches = [...response.matchAll(thinkingRegex)]
|
||||
|
||||
if (matches.length === 0) {
|
||||
return { thinking: "", content: response }
|
||||
}
|
||||
|
||||
let content = response
|
||||
const thoughts: string[] = []
|
||||
|
||||
for (const match of matches) {
|
||||
thoughts.push(match[1].trim())
|
||||
content = content.replace(match[0], "")
|
||||
}
|
||||
|
||||
return {
|
||||
thinking: thoughts.join("\n\n"),
|
||||
content: content.trim(),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if response contains tool calls.
|
||||
*/
|
||||
export function hasToolCalls(response: string): boolean {
|
||||
return TOOL_CALL_REGEX.test(response)
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate tool call parameters against expected schema.
|
||||
*/
|
||||
export function validateToolCallParams(
|
||||
toolName: string,
|
||||
params: Record<string, unknown>,
|
||||
requiredParams: string[],
|
||||
): { valid: boolean; errors: string[] } {
|
||||
const errors: string[] = []
|
||||
|
||||
for (const param of requiredParams) {
|
||||
if (!(param in params) || params[param] === undefined || params[param] === null) {
|
||||
errors.push(`Missing required parameter: ${param}`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors,
|
||||
}
|
||||
}
|
||||
48
packages/ipuaro/src/infrastructure/llm/index.ts
Normal file
48
packages/ipuaro/src/infrastructure/llm/index.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
// LLM infrastructure exports
|
||||
export { OllamaClient } from "./OllamaClient.js"
|
||||
export {
|
||||
SYSTEM_PROMPT,
|
||||
buildInitialContext,
|
||||
buildFileContext,
|
||||
truncateContext,
|
||||
type ProjectStructure,
|
||||
} from "./prompts.js"
|
||||
export {
|
||||
ALL_TOOLS,
|
||||
READ_TOOLS,
|
||||
EDIT_TOOLS,
|
||||
SEARCH_TOOLS,
|
||||
ANALYSIS_TOOLS,
|
||||
GIT_TOOLS,
|
||||
RUN_TOOLS,
|
||||
CONFIRMATION_TOOLS,
|
||||
requiresConfirmation,
|
||||
getToolDef,
|
||||
getToolsByCategory,
|
||||
GET_LINES_TOOL,
|
||||
GET_FUNCTION_TOOL,
|
||||
GET_CLASS_TOOL,
|
||||
GET_STRUCTURE_TOOL,
|
||||
EDIT_LINES_TOOL,
|
||||
CREATE_FILE_TOOL,
|
||||
DELETE_FILE_TOOL,
|
||||
FIND_REFERENCES_TOOL,
|
||||
FIND_DEFINITION_TOOL,
|
||||
GET_DEPENDENCIES_TOOL,
|
||||
GET_DEPENDENTS_TOOL,
|
||||
GET_COMPLEXITY_TOOL,
|
||||
GET_TODOS_TOOL,
|
||||
GIT_STATUS_TOOL,
|
||||
GIT_DIFF_TOOL,
|
||||
GIT_COMMIT_TOOL,
|
||||
RUN_COMMAND_TOOL,
|
||||
RUN_TESTS_TOOL,
|
||||
} from "./toolDefs.js"
|
||||
export {
|
||||
parseToolCalls,
|
||||
formatToolCallsAsXml,
|
||||
extractThinking,
|
||||
hasToolCalls,
|
||||
validateToolCallParams,
|
||||
type ParsedResponse,
|
||||
} from "./ResponseParser.js"
|
||||
335
packages/ipuaro/src/infrastructure/llm/prompts.ts
Normal file
335
packages/ipuaro/src/infrastructure/llm/prompts.ts
Normal file
@@ -0,0 +1,335 @@
|
||||
import type { FileAST } from "../../domain/value-objects/FileAST.js"
|
||||
import type { FileMeta } from "../../domain/value-objects/FileMeta.js"
|
||||
|
||||
/**
|
||||
* Project structure for context building.
|
||||
*/
|
||||
export interface ProjectStructure {
|
||||
name: string
|
||||
rootPath: string
|
||||
files: string[]
|
||||
directories: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* System prompt for the ipuaro AI agent.
|
||||
*/
|
||||
export const SYSTEM_PROMPT = `You are ipuaro, a local AI code assistant specialized in helping developers understand and modify their codebase. You operate within a single project directory and have access to powerful tools for reading, searching, analyzing, and editing code.
|
||||
|
||||
## Core Principles
|
||||
|
||||
1. **Lazy Loading**: You don't have the full code in context. Use tools to fetch exactly what you need.
|
||||
2. **Precision**: Always verify file paths and line numbers before making changes.
|
||||
3. **Safety**: Confirm destructive operations. Never execute dangerous commands.
|
||||
4. **Efficiency**: Minimize context usage. Request only necessary code sections.
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Reading Tools
|
||||
- \`get_lines\`: Get specific lines from a file
|
||||
- \`get_function\`: Get a function by name
|
||||
- \`get_class\`: Get a class by name
|
||||
- \`get_structure\`: Get project directory structure
|
||||
|
||||
### Editing Tools (require confirmation)
|
||||
- \`edit_lines\`: Replace specific lines in a file
|
||||
- \`create_file\`: Create a new file
|
||||
- \`delete_file\`: Delete a file
|
||||
|
||||
### Search Tools
|
||||
- \`find_references\`: Find all usages of a symbol
|
||||
- \`find_definition\`: Find where a symbol is defined
|
||||
|
||||
### Analysis Tools
|
||||
- \`get_dependencies\`: Get files this file imports
|
||||
- \`get_dependents\`: Get files that import this file
|
||||
- \`get_complexity\`: Get complexity metrics
|
||||
- \`get_todos\`: Find TODO/FIXME comments
|
||||
|
||||
### Git Tools
|
||||
- \`git_status\`: Get repository status
|
||||
- \`git_diff\`: Get uncommitted changes
|
||||
- \`git_commit\`: Create a commit (requires confirmation)
|
||||
|
||||
### Run Tools
|
||||
- \`run_command\`: Execute a shell command (security checked)
|
||||
- \`run_tests\`: Run the test suite
|
||||
|
||||
## Response Guidelines
|
||||
|
||||
1. **Be concise**: Don't repeat information already in context.
|
||||
2. **Show your work**: Explain what tools you're using and why.
|
||||
3. **Verify before editing**: Always read the target code before modifying it.
|
||||
4. **Handle errors gracefully**: If a tool fails, explain what went wrong and suggest alternatives.
|
||||
|
||||
## Code Editing Rules
|
||||
|
||||
1. Always use \`get_lines\` or \`get_function\` before \`edit_lines\`.
|
||||
2. Provide exact line numbers for edits.
|
||||
3. For large changes, break into multiple small edits.
|
||||
4. After editing, suggest running tests if available.
|
||||
|
||||
## Safety Rules
|
||||
|
||||
1. Never execute commands that could harm the system.
|
||||
2. Never expose sensitive data (API keys, passwords).
|
||||
3. Always confirm file deletions and destructive git operations.
|
||||
4. Stay within the project directory.
|
||||
|
||||
When you need to perform an action, use the appropriate tool. Think step by step about what information you need and which tools will provide it most efficiently.`
|
||||
|
||||
/**
|
||||
* Build initial context from project structure and AST metadata.
|
||||
* Returns a compact representation without actual code.
|
||||
*/
|
||||
export function buildInitialContext(
|
||||
structure: ProjectStructure,
|
||||
asts: Map<string, FileAST>,
|
||||
metas?: Map<string, FileMeta>,
|
||||
): string {
|
||||
const sections: string[] = []
|
||||
|
||||
sections.push(formatProjectHeader(structure))
|
||||
sections.push(formatDirectoryTree(structure))
|
||||
sections.push(formatFileOverview(asts, metas))
|
||||
|
||||
return sections.join("\n\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format project header section.
|
||||
*/
|
||||
function formatProjectHeader(structure: ProjectStructure): string {
|
||||
const fileCount = String(structure.files.length)
|
||||
const dirCount = String(structure.directories.length)
|
||||
return `# Project: ${structure.name}
|
||||
Root: ${structure.rootPath}
|
||||
Files: ${fileCount} | Directories: ${dirCount}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Format directory tree.
|
||||
*/
|
||||
function formatDirectoryTree(structure: ProjectStructure): string {
|
||||
const lines: string[] = ["## Structure", ""]
|
||||
|
||||
const sortedDirs = [...structure.directories].sort()
|
||||
for (const dir of sortedDirs) {
|
||||
const depth = dir.split("/").length - 1
|
||||
const indent = " ".repeat(depth)
|
||||
const name = dir.split("/").pop() ?? dir
|
||||
lines.push(`${indent}${name}/`)
|
||||
}
|
||||
|
||||
return lines.join("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format file overview with AST summaries.
|
||||
*/
|
||||
function formatFileOverview(asts: Map<string, FileAST>, metas?: Map<string, FileMeta>): string {
|
||||
const lines: string[] = ["## Files", ""]
|
||||
|
||||
const sortedPaths = [...asts.keys()].sort()
|
||||
for (const path of sortedPaths) {
|
||||
const ast = asts.get(path)
|
||||
if (!ast) {
|
||||
continue
|
||||
}
|
||||
|
||||
const meta = metas?.get(path)
|
||||
lines.push(formatFileSummary(path, ast, meta))
|
||||
}
|
||||
|
||||
return lines.join("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single file's AST summary.
|
||||
*/
|
||||
function formatFileSummary(path: string, ast: FileAST, meta?: FileMeta): string {
|
||||
const parts: string[] = []
|
||||
|
||||
if (ast.functions.length > 0) {
|
||||
const names = ast.functions.map((f) => f.name).join(", ")
|
||||
parts.push(`fn: ${names}`)
|
||||
}
|
||||
|
||||
if (ast.classes.length > 0) {
|
||||
const names = ast.classes.map((c) => c.name).join(", ")
|
||||
parts.push(`class: ${names}`)
|
||||
}
|
||||
|
||||
if (ast.interfaces.length > 0) {
|
||||
const names = ast.interfaces.map((i) => i.name).join(", ")
|
||||
parts.push(`interface: ${names}`)
|
||||
}
|
||||
|
||||
if (ast.typeAliases.length > 0) {
|
||||
const names = ast.typeAliases.map((t) => t.name).join(", ")
|
||||
parts.push(`type: ${names}`)
|
||||
}
|
||||
|
||||
const summary = parts.length > 0 ? ` [${parts.join(" | ")}]` : ""
|
||||
const flags = formatFileFlags(meta)
|
||||
|
||||
return `- ${path}${summary}${flags}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Format file metadata flags.
|
||||
*/
|
||||
function formatFileFlags(meta?: FileMeta): string {
|
||||
if (!meta) {
|
||||
return ""
|
||||
}
|
||||
|
||||
const flags: string[] = []
|
||||
|
||||
if (meta.isHub) {
|
||||
flags.push("hub")
|
||||
}
|
||||
|
||||
if (meta.isEntryPoint) {
|
||||
flags.push("entry")
|
||||
}
|
||||
|
||||
if (meta.complexity.score > 70) {
|
||||
flags.push("complex")
|
||||
}
|
||||
|
||||
return flags.length > 0 ? ` (${flags.join(", ")})` : ""
|
||||
}
|
||||
|
||||
/**
|
||||
* Format line range for display.
|
||||
*/
|
||||
function formatLineRange(start: number, end: number): string {
|
||||
return `[${String(start)}-${String(end)}]`
|
||||
}
|
||||
|
||||
/**
|
||||
* Format imports section.
|
||||
*/
|
||||
function formatImportsSection(ast: FileAST): string[] {
|
||||
if (ast.imports.length === 0) {
|
||||
return []
|
||||
}
|
||||
const lines = ["### Imports"]
|
||||
for (const imp of ast.imports) {
|
||||
lines.push(`- ${imp.name} from "${imp.from}" (${imp.type})`)
|
||||
}
|
||||
lines.push("")
|
||||
return lines
|
||||
}
|
||||
|
||||
/**
|
||||
* Format exports section.
|
||||
*/
|
||||
function formatExportsSection(ast: FileAST): string[] {
|
||||
if (ast.exports.length === 0) {
|
||||
return []
|
||||
}
|
||||
const lines = ["### Exports"]
|
||||
for (const exp of ast.exports) {
|
||||
const defaultMark = exp.isDefault ? " (default)" : ""
|
||||
lines.push(`- ${exp.kind} ${exp.name}${defaultMark}`)
|
||||
}
|
||||
lines.push("")
|
||||
return lines
|
||||
}
|
||||
|
||||
/**
|
||||
* Format functions section.
|
||||
*/
|
||||
function formatFunctionsSection(ast: FileAST): string[] {
|
||||
if (ast.functions.length === 0) {
|
||||
return []
|
||||
}
|
||||
const lines = ["### Functions"]
|
||||
for (const fn of ast.functions) {
|
||||
const params = fn.params.map((p) => p.name).join(", ")
|
||||
const asyncMark = fn.isAsync ? "async " : ""
|
||||
const range = formatLineRange(fn.lineStart, fn.lineEnd)
|
||||
lines.push(`- ${asyncMark}${fn.name}(${params}) ${range}`)
|
||||
}
|
||||
lines.push("")
|
||||
return lines
|
||||
}
|
||||
|
||||
/**
|
||||
* Format classes section.
|
||||
*/
|
||||
function formatClassesSection(ast: FileAST): string[] {
|
||||
if (ast.classes.length === 0) {
|
||||
return []
|
||||
}
|
||||
const lines = ["### Classes"]
|
||||
for (const cls of ast.classes) {
|
||||
const ext = cls.extends ? ` extends ${cls.extends}` : ""
|
||||
const impl = cls.implements.length > 0 ? ` implements ${cls.implements.join(", ")}` : ""
|
||||
const range = formatLineRange(cls.lineStart, cls.lineEnd)
|
||||
lines.push(`- ${cls.name}${ext}${impl} ${range}`)
|
||||
|
||||
for (const method of cls.methods) {
|
||||
const vis = method.visibility === "public" ? "" : `${method.visibility} `
|
||||
const methodRange = formatLineRange(method.lineStart, method.lineEnd)
|
||||
lines.push(` - ${vis}${method.name}() ${methodRange}`)
|
||||
}
|
||||
}
|
||||
lines.push("")
|
||||
return lines
|
||||
}
|
||||
|
||||
/**
|
||||
* Format metadata section.
|
||||
*/
|
||||
function formatMetadataSection(meta: FileMeta): string[] {
|
||||
const loc = String(meta.complexity.loc)
|
||||
const score = String(meta.complexity.score)
|
||||
const deps = String(meta.dependencies.length)
|
||||
const dependents = String(meta.dependents.length)
|
||||
return [
|
||||
"### Metadata",
|
||||
`- LOC: ${loc}`,
|
||||
`- Complexity: ${score}/100`,
|
||||
`- Dependencies: ${deps}`,
|
||||
`- Dependents: ${dependents}`,
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Build context for a specific file request.
|
||||
*/
|
||||
export function buildFileContext(path: string, ast: FileAST, meta?: FileMeta): string {
|
||||
const lines: string[] = [`## ${path}`, ""]
|
||||
|
||||
lines.push(...formatImportsSection(ast))
|
||||
lines.push(...formatExportsSection(ast))
|
||||
lines.push(...formatFunctionsSection(ast))
|
||||
lines.push(...formatClassesSection(ast))
|
||||
|
||||
if (meta) {
|
||||
lines.push(...formatMetadataSection(meta))
|
||||
}
|
||||
|
||||
return lines.join("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate context to fit within token budget.
|
||||
*/
|
||||
export function truncateContext(context: string, maxTokens: number): string {
|
||||
const charsPerToken = 4
|
||||
const maxChars = maxTokens * charsPerToken
|
||||
|
||||
if (context.length <= maxChars) {
|
||||
return context
|
||||
}
|
||||
|
||||
const truncated = context.slice(0, maxChars - 100)
|
||||
const lastNewline = truncated.lastIndexOf("\n")
|
||||
const remaining = String(context.length - lastNewline)
|
||||
|
||||
return `${truncated.slice(0, lastNewline)}\n\n... (truncated, ${remaining} chars remaining)`
|
||||
}
|
||||
511
packages/ipuaro/src/infrastructure/llm/toolDefs.ts
Normal file
511
packages/ipuaro/src/infrastructure/llm/toolDefs.ts
Normal file
@@ -0,0 +1,511 @@
|
||||
import type { ToolDef } from "../../domain/services/ILLMClient.js"
|
||||
|
||||
/**
|
||||
* Tool definitions for ipuaro LLM.
|
||||
* 18 tools across 6 categories: read, edit, search, analysis, git, run.
|
||||
*/
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Read Tools (4)
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
export const GET_LINES_TOOL: ToolDef = {
|
||||
name: "get_lines",
|
||||
description:
|
||||
"Get specific lines from a file. Returns the content with line numbers. " +
|
||||
"If no range is specified, returns the entire file.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "start",
|
||||
type: "number",
|
||||
description: "Start line number (1-based, inclusive)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "end",
|
||||
type: "number",
|
||||
description: "End line number (1-based, inclusive)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GET_FUNCTION_TOOL: ToolDef = {
|
||||
name: "get_function",
|
||||
description:
|
||||
"Get a function's source code by name. Uses AST to find exact line range. " +
|
||||
"Returns the function code with line numbers.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "name",
|
||||
type: "string",
|
||||
description: "Function name to retrieve",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GET_CLASS_TOOL: ToolDef = {
|
||||
name: "get_class",
|
||||
description:
|
||||
"Get a class's source code by name. Uses AST to find exact line range. " +
|
||||
"Returns the class code with line numbers.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "name",
|
||||
type: "string",
|
||||
description: "Class name to retrieve",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GET_STRUCTURE_TOOL: ToolDef = {
|
||||
name: "get_structure",
|
||||
description:
|
||||
"Get project directory structure as a tree. " +
|
||||
"If path is specified, shows structure of that subdirectory only.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Subdirectory path relative to project root (optional, defaults to root)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "depth",
|
||||
type: "number",
|
||||
description: "Maximum depth to traverse (default: unlimited)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Edit Tools (3) - All require confirmation
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
export const EDIT_LINES_TOOL: ToolDef = {
|
||||
name: "edit_lines",
|
||||
description:
|
||||
"Replace lines in a file with new content. Requires reading the file first. " +
|
||||
"Will show diff and ask for confirmation before applying.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "start",
|
||||
type: "number",
|
||||
description: "Start line number (1-based, inclusive) to replace",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "end",
|
||||
type: "number",
|
||||
description: "End line number (1-based, inclusive) to replace",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "content",
|
||||
type: "string",
|
||||
description: "New content to insert (can be multiple lines)",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const CREATE_FILE_TOOL: ToolDef = {
|
||||
name: "create_file",
|
||||
description:
|
||||
"Create a new file with specified content. " +
|
||||
"Will fail if file already exists. Will ask for confirmation.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "content",
|
||||
type: "string",
|
||||
description: "File content",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const DELETE_FILE_TOOL: ToolDef = {
|
||||
name: "delete_file",
|
||||
description:
|
||||
"Delete a file from the project. " +
|
||||
"Will ask for confirmation. Previous content is saved to undo stack.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Search Tools (2)
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
export const FIND_REFERENCES_TOOL: ToolDef = {
|
||||
name: "find_references",
|
||||
description:
|
||||
"Find all usages of a symbol across the codebase. " +
|
||||
"Returns list of file paths, line numbers, and context.",
|
||||
parameters: [
|
||||
{
|
||||
name: "symbol",
|
||||
type: "string",
|
||||
description: "Symbol name to search for (function, class, variable, etc.)",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Limit search to specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const FIND_DEFINITION_TOOL: ToolDef = {
|
||||
name: "find_definition",
|
||||
description:
|
||||
"Find where a symbol is defined. " + "Returns file path, line number, and symbol type.",
|
||||
parameters: [
|
||||
{
|
||||
name: "symbol",
|
||||
type: "string",
|
||||
description: "Symbol name to find definition for",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Analysis Tools (4)
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
export const GET_DEPENDENCIES_TOOL: ToolDef = {
|
||||
name: "get_dependencies",
|
||||
description:
|
||||
"Get files that this file imports (internal dependencies). " +
|
||||
"Returns list of imported file paths.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GET_DEPENDENTS_TOOL: ToolDef = {
|
||||
name: "get_dependents",
|
||||
description:
|
||||
"Get files that import this file (reverse dependencies). " +
|
||||
"Returns list of file paths that depend on this file.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GET_COMPLEXITY_TOOL: ToolDef = {
|
||||
name: "get_complexity",
|
||||
description:
|
||||
"Get complexity metrics for a file or the entire project. " +
|
||||
"Returns LOC, nesting depth, cyclomatic complexity, and overall score.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path (optional, defaults to all files sorted by complexity)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "limit",
|
||||
type: "number",
|
||||
description: "Max files to return when showing all (default: 10)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GET_TODOS_TOOL: ToolDef = {
|
||||
name: "get_todos",
|
||||
description:
|
||||
"Find TODO, FIXME, HACK, and XXX comments in the codebase. " +
|
||||
"Returns list with file paths, line numbers, and comment text.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Limit search to specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "type",
|
||||
type: "string",
|
||||
description: "Filter by comment type",
|
||||
required: false,
|
||||
enum: ["TODO", "FIXME", "HACK", "XXX"],
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Git Tools (3)
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
export const GIT_STATUS_TOOL: ToolDef = {
|
||||
name: "git_status",
|
||||
description:
|
||||
"Get current git repository status. " +
|
||||
"Returns branch name, staged files, modified files, and untracked files.",
|
||||
parameters: [],
|
||||
}
|
||||
|
||||
export const GIT_DIFF_TOOL: ToolDef = {
|
||||
name: "git_diff",
|
||||
description:
|
||||
"Get uncommitted changes (diff). " + "Shows what has changed but not yet committed.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Limit diff to specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "staged",
|
||||
type: "boolean",
|
||||
description: "Show only staged changes (default: false, shows all)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const GIT_COMMIT_TOOL: ToolDef = {
|
||||
name: "git_commit",
|
||||
description:
|
||||
"Create a git commit with the specified message. " +
|
||||
"Will ask for confirmation. Optionally stage specific files first.",
|
||||
parameters: [
|
||||
{
|
||||
name: "message",
|
||||
type: "string",
|
||||
description: "Commit message",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
type: "array",
|
||||
description: "Files to stage before commit (optional, defaults to all staged)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Run Tools (2)
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
export const RUN_COMMAND_TOOL: ToolDef = {
|
||||
name: "run_command",
|
||||
description:
|
||||
"Execute a shell command in the project directory. " +
|
||||
"Commands are checked against blacklist/whitelist for security. " +
|
||||
"Unknown commands require user confirmation.",
|
||||
parameters: [
|
||||
{
|
||||
name: "command",
|
||||
type: "string",
|
||||
description: "Shell command to execute",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "timeout",
|
||||
type: "number",
|
||||
description: "Timeout in milliseconds (default: 30000)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
export const RUN_TESTS_TOOL: ToolDef = {
|
||||
name: "run_tests",
|
||||
description:
|
||||
"Run the project's test suite. Auto-detects test runner (vitest, jest, npm test). " +
|
||||
"Returns test results summary.",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Run tests for specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "filter",
|
||||
type: "string",
|
||||
description: "Filter tests by name pattern",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "watch",
|
||||
type: "boolean",
|
||||
description: "Run in watch mode (default: false)",
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
/*
|
||||
* =============================================================================
|
||||
* Tool Collection
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
/**
|
||||
* All read tools (no confirmation required).
|
||||
*/
|
||||
export const READ_TOOLS: ToolDef[] = [
|
||||
GET_LINES_TOOL,
|
||||
GET_FUNCTION_TOOL,
|
||||
GET_CLASS_TOOL,
|
||||
GET_STRUCTURE_TOOL,
|
||||
]
|
||||
|
||||
/**
|
||||
* All edit tools (require confirmation).
|
||||
*/
|
||||
export const EDIT_TOOLS: ToolDef[] = [EDIT_LINES_TOOL, CREATE_FILE_TOOL, DELETE_FILE_TOOL]
|
||||
|
||||
/**
|
||||
* All search tools (no confirmation required).
|
||||
*/
|
||||
export const SEARCH_TOOLS: ToolDef[] = [FIND_REFERENCES_TOOL, FIND_DEFINITION_TOOL]
|
||||
|
||||
/**
|
||||
* All analysis tools (no confirmation required).
|
||||
*/
|
||||
export const ANALYSIS_TOOLS: ToolDef[] = [
|
||||
GET_DEPENDENCIES_TOOL,
|
||||
GET_DEPENDENTS_TOOL,
|
||||
GET_COMPLEXITY_TOOL,
|
||||
GET_TODOS_TOOL,
|
||||
]
|
||||
|
||||
/**
|
||||
* All git tools (git_commit requires confirmation).
|
||||
*/
|
||||
export const GIT_TOOLS: ToolDef[] = [GIT_STATUS_TOOL, GIT_DIFF_TOOL, GIT_COMMIT_TOOL]
|
||||
|
||||
/**
|
||||
* All run tools (run_command may require confirmation).
|
||||
*/
|
||||
export const RUN_TOOLS: ToolDef[] = [RUN_COMMAND_TOOL, RUN_TESTS_TOOL]
|
||||
|
||||
/**
|
||||
* All 18 tool definitions.
|
||||
*/
|
||||
export const ALL_TOOLS: ToolDef[] = [
|
||||
...READ_TOOLS,
|
||||
...EDIT_TOOLS,
|
||||
...SEARCH_TOOLS,
|
||||
...ANALYSIS_TOOLS,
|
||||
...GIT_TOOLS,
|
||||
...RUN_TOOLS,
|
||||
]
|
||||
|
||||
/**
|
||||
* Tools that require user confirmation before execution.
|
||||
*/
|
||||
export const CONFIRMATION_TOOLS = new Set([
|
||||
"edit_lines",
|
||||
"create_file",
|
||||
"delete_file",
|
||||
"git_commit",
|
||||
])
|
||||
|
||||
/**
|
||||
* Check if a tool requires confirmation.
|
||||
*/
|
||||
export function requiresConfirmation(toolName: string): boolean {
|
||||
return CONFIRMATION_TOOLS.has(toolName)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool definition by name.
|
||||
*/
|
||||
export function getToolDef(name: string): ToolDef | undefined {
|
||||
return ALL_TOOLS.find((t) => t.name === name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool definitions by category.
|
||||
*/
|
||||
export function getToolsByCategory(category: string): ToolDef[] {
|
||||
switch (category) {
|
||||
case "read":
|
||||
return READ_TOOLS
|
||||
case "edit":
|
||||
return EDIT_TOOLS
|
||||
case "search":
|
||||
return SEARCH_TOOLS
|
||||
case "analysis":
|
||||
return ANALYSIS_TOOLS
|
||||
case "git":
|
||||
return GIT_TOOLS
|
||||
case "run":
|
||||
return RUN_TOOLS
|
||||
default:
|
||||
return []
|
||||
}
|
||||
}
|
||||
293
packages/ipuaro/src/infrastructure/security/PathValidator.ts
Normal file
293
packages/ipuaro/src/infrastructure/security/PathValidator.ts
Normal file
@@ -0,0 +1,293 @@
|
||||
import * as path from "node:path"
|
||||
import { promises as fs } from "node:fs"
|
||||
|
||||
/**
|
||||
* Path validation result classification.
|
||||
*/
|
||||
export type PathValidationStatus = "valid" | "invalid" | "outside_project"
|
||||
|
||||
/**
|
||||
* Result of path validation.
|
||||
*/
|
||||
export interface PathValidationResult {
|
||||
/** Validation status */
|
||||
status: PathValidationStatus
|
||||
/** Reason for the status */
|
||||
reason: string
|
||||
/** Normalized absolute path (only if valid) */
|
||||
absolutePath?: string
|
||||
/** Normalized relative path (only if valid) */
|
||||
relativePath?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for path validation.
|
||||
*/
|
||||
export interface PathValidatorOptions {
|
||||
/** Allow paths that don't exist yet (for create operations) */
|
||||
allowNonExistent?: boolean
|
||||
/** Check if path is a directory */
|
||||
requireDirectory?: boolean
|
||||
/** Check if path is a file */
|
||||
requireFile?: boolean
|
||||
/** Follow symlinks when checking existence */
|
||||
followSymlinks?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Path validator for ensuring file operations stay within project boundaries.
|
||||
* Prevents path traversal attacks and unauthorized file access.
|
||||
*/
|
||||
export class PathValidator {
|
||||
private readonly projectRoot: string
|
||||
|
||||
constructor(projectRoot: string) {
|
||||
this.projectRoot = path.resolve(projectRoot)
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate a path and return detailed result.
|
||||
* @param inputPath - Path to validate (relative or absolute)
|
||||
* @param options - Validation options
|
||||
*/
|
||||
async validate(
|
||||
inputPath: string,
|
||||
options: PathValidatorOptions = {},
|
||||
): Promise<PathValidationResult> {
|
||||
if (!inputPath || inputPath.trim() === "") {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path is empty",
|
||||
}
|
||||
}
|
||||
|
||||
const normalizedInput = inputPath.trim()
|
||||
|
||||
if (this.containsTraversalPatterns(normalizedInput)) {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path contains traversal patterns",
|
||||
}
|
||||
}
|
||||
|
||||
const absolutePath = path.resolve(this.projectRoot, normalizedInput)
|
||||
|
||||
if (!this.isWithinProject(absolutePath)) {
|
||||
return {
|
||||
status: "outside_project",
|
||||
reason: "Path is outside project root",
|
||||
}
|
||||
}
|
||||
|
||||
const relativePath = path.relative(this.projectRoot, absolutePath)
|
||||
|
||||
if (!options.allowNonExistent) {
|
||||
const existsResult = await this.checkExists(absolutePath, options)
|
||||
if (existsResult) {
|
||||
return existsResult
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
status: "valid",
|
||||
reason: "Path is valid",
|
||||
absolutePath,
|
||||
relativePath,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Synchronous validation for simple checks.
|
||||
* Does not check file existence or type.
|
||||
* @param inputPath - Path to validate (relative or absolute)
|
||||
*/
|
||||
validateSync(inputPath: string): PathValidationResult {
|
||||
if (!inputPath || inputPath.trim() === "") {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path is empty",
|
||||
}
|
||||
}
|
||||
|
||||
const normalizedInput = inputPath.trim()
|
||||
|
||||
if (this.containsTraversalPatterns(normalizedInput)) {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path contains traversal patterns",
|
||||
}
|
||||
}
|
||||
|
||||
const absolutePath = path.resolve(this.projectRoot, normalizedInput)
|
||||
|
||||
if (!this.isWithinProject(absolutePath)) {
|
||||
return {
|
||||
status: "outside_project",
|
||||
reason: "Path is outside project root",
|
||||
}
|
||||
}
|
||||
|
||||
const relativePath = path.relative(this.projectRoot, absolutePath)
|
||||
|
||||
return {
|
||||
status: "valid",
|
||||
reason: "Path is valid",
|
||||
absolutePath,
|
||||
relativePath,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Quick check if path is within project.
|
||||
* @param inputPath - Path to check (relative or absolute)
|
||||
*/
|
||||
isWithin(inputPath: string): boolean {
|
||||
if (!inputPath || inputPath.trim() === "") {
|
||||
return false
|
||||
}
|
||||
|
||||
const normalizedInput = inputPath.trim()
|
||||
|
||||
if (this.containsTraversalPatterns(normalizedInput)) {
|
||||
return false
|
||||
}
|
||||
|
||||
const absolutePath = path.resolve(this.projectRoot, normalizedInput)
|
||||
return this.isWithinProject(absolutePath)
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a path relative to project root.
|
||||
* Returns null if path would be outside project.
|
||||
* @param inputPath - Path to resolve
|
||||
*/
|
||||
resolve(inputPath: string): string | null {
|
||||
const result = this.validateSync(inputPath)
|
||||
return result.status === "valid" ? (result.absolutePath ?? null) : null
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a path or throw an error if invalid.
|
||||
* @param inputPath - Path to resolve
|
||||
* @returns Tuple of [absolutePath, relativePath]
|
||||
* @throws Error if path is invalid
|
||||
*/
|
||||
resolveOrThrow(inputPath: string): [absolutePath: string, relativePath: string] {
|
||||
const result = this.validateSync(inputPath)
|
||||
if (result.status !== "valid" || result.absolutePath === undefined) {
|
||||
throw new Error(result.reason)
|
||||
}
|
||||
return [result.absolutePath, result.relativePath ?? ""]
|
||||
}
|
||||
|
||||
/**
|
||||
* Get relative path from project root.
|
||||
* Returns null if path would be outside project.
|
||||
* @param inputPath - Path to make relative
|
||||
*/
|
||||
relativize(inputPath: string): string | null {
|
||||
const result = this.validateSync(inputPath)
|
||||
return result.status === "valid" ? (result.relativePath ?? null) : null
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the project root path.
|
||||
*/
|
||||
getProjectRoot(): string {
|
||||
return this.projectRoot
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if path contains directory traversal patterns.
|
||||
*/
|
||||
private containsTraversalPatterns(inputPath: string): boolean {
|
||||
const normalized = inputPath.replace(/\\/g, "/")
|
||||
|
||||
if (normalized.includes("..")) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (normalized.startsWith("~")) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if absolute path is within project root.
|
||||
*/
|
||||
private isWithinProject(absolutePath: string): boolean {
|
||||
const normalizedProject = this.projectRoot.replace(/\\/g, "/")
|
||||
const normalizedPath = absolutePath.replace(/\\/g, "/")
|
||||
|
||||
if (normalizedPath === normalizedProject) {
|
||||
return true
|
||||
}
|
||||
|
||||
const projectWithSep = normalizedProject.endsWith("/")
|
||||
? normalizedProject
|
||||
: `${normalizedProject}/`
|
||||
|
||||
return normalizedPath.startsWith(projectWithSep)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check file existence and type.
|
||||
*/
|
||||
private async checkExists(
|
||||
absolutePath: string,
|
||||
options: PathValidatorOptions,
|
||||
): Promise<PathValidationResult | null> {
|
||||
try {
|
||||
const statFn = options.followSymlinks ? fs.stat : fs.lstat
|
||||
const stats = await statFn(absolutePath)
|
||||
|
||||
if (options.requireDirectory && !stats.isDirectory()) {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path is not a directory",
|
||||
}
|
||||
}
|
||||
|
||||
if (options.requireFile && !stats.isFile()) {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path is not a file",
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: "Path does not exist",
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
status: "invalid",
|
||||
reason: `Cannot access path: ${(error as Error).message}`,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a path validator for a project.
|
||||
* @param projectRoot - Root directory of the project
|
||||
*/
|
||||
export function createPathValidator(projectRoot: string): PathValidator {
|
||||
return new PathValidator(projectRoot)
|
||||
}
|
||||
|
||||
/**
|
||||
* Standalone function for quick path validation.
|
||||
* @param inputPath - Path to validate
|
||||
* @param projectRoot - Project root directory
|
||||
*/
|
||||
export function validatePath(inputPath: string, projectRoot: string): boolean {
|
||||
const validator = new PathValidator(projectRoot)
|
||||
return validator.isWithin(inputPath)
|
||||
}
|
||||
9
packages/ipuaro/src/infrastructure/security/index.ts
Normal file
9
packages/ipuaro/src/infrastructure/security/index.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
// Security module exports
|
||||
export {
|
||||
PathValidator,
|
||||
createPathValidator,
|
||||
validatePath,
|
||||
type PathValidationResult,
|
||||
type PathValidationStatus,
|
||||
type PathValidatorOptions,
|
||||
} from "./PathValidator.js"
|
||||
@@ -0,0 +1,225 @@
|
||||
import type { ISessionStorage, SessionListItem } from "../../domain/services/ISessionStorage.js"
|
||||
import { type ContextState, Session, type SessionStats } from "../../domain/entities/Session.js"
|
||||
import type { ChatMessage } from "../../domain/value-objects/ChatMessage.js"
|
||||
import type { UndoEntry } from "../../domain/value-objects/UndoEntry.js"
|
||||
import { MAX_UNDO_STACK_SIZE } from "../../domain/constants/index.js"
|
||||
import { IpuaroError } from "../../shared/errors/IpuaroError.js"
|
||||
import { RedisClient } from "./RedisClient.js"
|
||||
import { SessionFields, SessionKeys } from "./schema.js"
|
||||
|
||||
/**
|
||||
* Redis implementation of ISessionStorage.
|
||||
* Stores session data in Redis hashes and lists.
|
||||
*/
|
||||
export class RedisSessionStorage implements ISessionStorage {
|
||||
private readonly client: RedisClient
|
||||
|
||||
constructor(client: RedisClient) {
|
||||
this.client = client
|
||||
}
|
||||
|
||||
async saveSession(session: Session): Promise<void> {
|
||||
const redis = this.getRedis()
|
||||
const dataKey = SessionKeys.data(session.id)
|
||||
|
||||
const pipeline = redis.pipeline()
|
||||
|
||||
pipeline.hset(dataKey, SessionFields.projectName, session.projectName)
|
||||
pipeline.hset(dataKey, SessionFields.createdAt, String(session.createdAt))
|
||||
pipeline.hset(dataKey, SessionFields.lastActivityAt, String(session.lastActivityAt))
|
||||
pipeline.hset(dataKey, SessionFields.history, JSON.stringify(session.history))
|
||||
pipeline.hset(dataKey, SessionFields.context, JSON.stringify(session.context))
|
||||
pipeline.hset(dataKey, SessionFields.stats, JSON.stringify(session.stats))
|
||||
pipeline.hset(dataKey, SessionFields.inputHistory, JSON.stringify(session.inputHistory))
|
||||
|
||||
await this.addToSessionsList(session.id)
|
||||
|
||||
await pipeline.exec()
|
||||
}
|
||||
|
||||
async loadSession(sessionId: string): Promise<Session | null> {
|
||||
const redis = this.getRedis()
|
||||
const dataKey = SessionKeys.data(sessionId)
|
||||
|
||||
const data = await redis.hgetall(dataKey)
|
||||
if (!data || Object.keys(data).length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
const session = new Session(
|
||||
sessionId,
|
||||
data[SessionFields.projectName],
|
||||
Number(data[SessionFields.createdAt]),
|
||||
)
|
||||
|
||||
session.lastActivityAt = Number(data[SessionFields.lastActivityAt])
|
||||
session.history = this.parseJSON(data[SessionFields.history], "history") as ChatMessage[]
|
||||
session.context = this.parseJSON(data[SessionFields.context], "context") as ContextState
|
||||
session.stats = this.parseJSON(data[SessionFields.stats], "stats") as SessionStats
|
||||
session.inputHistory = this.parseJSON(
|
||||
data[SessionFields.inputHistory],
|
||||
"inputHistory",
|
||||
) as string[]
|
||||
|
||||
const undoStack = await this.getUndoStack(sessionId)
|
||||
for (const entry of undoStack) {
|
||||
session.undoStack.push(entry)
|
||||
}
|
||||
|
||||
return session
|
||||
}
|
||||
|
||||
async deleteSession(sessionId: string): Promise<void> {
|
||||
const redis = this.getRedis()
|
||||
|
||||
await Promise.all([
|
||||
redis.del(SessionKeys.data(sessionId)),
|
||||
redis.del(SessionKeys.undo(sessionId)),
|
||||
redis.lrem(SessionKeys.list, 0, sessionId),
|
||||
])
|
||||
}
|
||||
|
||||
async listSessions(projectName?: string): Promise<SessionListItem[]> {
|
||||
const redis = this.getRedis()
|
||||
const sessionIds = await redis.lrange(SessionKeys.list, 0, -1)
|
||||
|
||||
const sessions: SessionListItem[] = []
|
||||
|
||||
for (const id of sessionIds) {
|
||||
const data = await redis.hgetall(SessionKeys.data(id))
|
||||
if (!data || Object.keys(data).length === 0) {
|
||||
continue
|
||||
}
|
||||
|
||||
const sessionProjectName = data[SessionFields.projectName]
|
||||
if (projectName && sessionProjectName !== projectName) {
|
||||
continue
|
||||
}
|
||||
|
||||
const history = this.parseJSON(data[SessionFields.history], "history") as ChatMessage[]
|
||||
|
||||
sessions.push({
|
||||
id,
|
||||
projectName: sessionProjectName,
|
||||
createdAt: Number(data[SessionFields.createdAt]),
|
||||
lastActivityAt: Number(data[SessionFields.lastActivityAt]),
|
||||
messageCount: history.length,
|
||||
})
|
||||
}
|
||||
|
||||
sessions.sort((a, b) => b.lastActivityAt - a.lastActivityAt)
|
||||
|
||||
return sessions
|
||||
}
|
||||
|
||||
async getLatestSession(projectName: string): Promise<Session | null> {
|
||||
const sessions = await this.listSessions(projectName)
|
||||
if (sessions.length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return this.loadSession(sessions[0].id)
|
||||
}
|
||||
|
||||
async sessionExists(sessionId: string): Promise<boolean> {
|
||||
const redis = this.getRedis()
|
||||
const exists = await redis.exists(SessionKeys.data(sessionId))
|
||||
return exists === 1
|
||||
}
|
||||
|
||||
async pushUndoEntry(sessionId: string, entry: UndoEntry): Promise<void> {
|
||||
const redis = this.getRedis()
|
||||
const undoKey = SessionKeys.undo(sessionId)
|
||||
|
||||
await redis.rpush(undoKey, JSON.stringify(entry))
|
||||
|
||||
const length = await redis.llen(undoKey)
|
||||
if (length > MAX_UNDO_STACK_SIZE) {
|
||||
await redis.lpop(undoKey)
|
||||
}
|
||||
}
|
||||
|
||||
async popUndoEntry(sessionId: string): Promise<UndoEntry | null> {
|
||||
const redis = this.getRedis()
|
||||
const undoKey = SessionKeys.undo(sessionId)
|
||||
|
||||
const data = await redis.rpop(undoKey)
|
||||
if (!data) {
|
||||
return null
|
||||
}
|
||||
|
||||
return this.parseJSON(data, "UndoEntry") as UndoEntry
|
||||
}
|
||||
|
||||
async getUndoStack(sessionId: string): Promise<UndoEntry[]> {
|
||||
const redis = this.getRedis()
|
||||
const undoKey = SessionKeys.undo(sessionId)
|
||||
|
||||
const entries = await redis.lrange(undoKey, 0, -1)
|
||||
return entries.map((entry) => this.parseJSON(entry, "UndoEntry") as UndoEntry)
|
||||
}
|
||||
|
||||
async touchSession(sessionId: string): Promise<void> {
|
||||
const redis = this.getRedis()
|
||||
await redis.hset(
|
||||
SessionKeys.data(sessionId),
|
||||
SessionFields.lastActivityAt,
|
||||
String(Date.now()),
|
||||
)
|
||||
}
|
||||
|
||||
async clearAllSessions(): Promise<void> {
|
||||
const redis = this.getRedis()
|
||||
const sessionIds = await redis.lrange(SessionKeys.list, 0, -1)
|
||||
|
||||
const pipeline = redis.pipeline()
|
||||
for (const id of sessionIds) {
|
||||
pipeline.del(SessionKeys.data(id))
|
||||
pipeline.del(SessionKeys.undo(id))
|
||||
}
|
||||
pipeline.del(SessionKeys.list)
|
||||
|
||||
await pipeline.exec()
|
||||
}
|
||||
|
||||
private async addToSessionsList(sessionId: string): Promise<void> {
|
||||
const redis = this.getRedis()
|
||||
|
||||
const exists = await redis.lpos(SessionKeys.list, sessionId)
|
||||
if (exists === null) {
|
||||
await redis.lpush(SessionKeys.list, sessionId)
|
||||
}
|
||||
}
|
||||
|
||||
private getRedis(): ReturnType<RedisClient["getClient"]> {
|
||||
return this.client.getClient()
|
||||
}
|
||||
|
||||
private parseJSON(data: string | undefined, type: string): unknown {
|
||||
if (!data) {
|
||||
if (type === "history" || type === "inputHistory") {
|
||||
return []
|
||||
}
|
||||
if (type === "context") {
|
||||
return { filesInContext: [], tokenUsage: 0, needsCompression: false }
|
||||
}
|
||||
if (type === "stats") {
|
||||
return {
|
||||
totalTokens: 0,
|
||||
totalTimeMs: 0,
|
||||
toolCalls: 0,
|
||||
editsApplied: 0,
|
||||
editsRejected: 0,
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.parse(data) as unknown
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Unknown error"
|
||||
throw IpuaroError.parse(`Failed to parse ${type}: ${message}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ export class RedisStorage implements IStorage {
|
||||
if (!data) {
|
||||
return null
|
||||
}
|
||||
return this.parseJSON<FileData>(data, "FileData")
|
||||
return this.parseJSON(data, "FileData") as FileData
|
||||
}
|
||||
|
||||
async setFile(path: string, data: FileData): Promise<void> {
|
||||
@@ -44,7 +44,7 @@ export class RedisStorage implements IStorage {
|
||||
const result = new Map<string, FileData>()
|
||||
|
||||
for (const [path, value] of Object.entries(data)) {
|
||||
const parsed = this.parseJSON<FileData>(value, "FileData")
|
||||
const parsed = this.parseJSON(value, "FileData") as FileData | null
|
||||
if (parsed) {
|
||||
result.set(path, parsed)
|
||||
}
|
||||
@@ -64,7 +64,7 @@ export class RedisStorage implements IStorage {
|
||||
if (!data) {
|
||||
return null
|
||||
}
|
||||
return this.parseJSON<FileAST>(data, "FileAST")
|
||||
return this.parseJSON(data, "FileAST") as FileAST
|
||||
}
|
||||
|
||||
async setAST(path: string, ast: FileAST): Promise<void> {
|
||||
@@ -83,7 +83,7 @@ export class RedisStorage implements IStorage {
|
||||
const result = new Map<string, FileAST>()
|
||||
|
||||
for (const [path, value] of Object.entries(data)) {
|
||||
const parsed = this.parseJSON<FileAST>(value, "FileAST")
|
||||
const parsed = this.parseJSON(value, "FileAST") as FileAST | null
|
||||
if (parsed) {
|
||||
result.set(path, parsed)
|
||||
}
|
||||
@@ -98,7 +98,7 @@ export class RedisStorage implements IStorage {
|
||||
if (!data) {
|
||||
return null
|
||||
}
|
||||
return this.parseJSON<FileMeta>(data, "FileMeta")
|
||||
return this.parseJSON(data, "FileMeta") as FileMeta
|
||||
}
|
||||
|
||||
async setMeta(path: string, meta: FileMeta): Promise<void> {
|
||||
@@ -117,7 +117,7 @@ export class RedisStorage implements IStorage {
|
||||
const result = new Map<string, FileMeta>()
|
||||
|
||||
for (const [path, value] of Object.entries(data)) {
|
||||
const parsed = this.parseJSON<FileMeta>(value, "FileMeta")
|
||||
const parsed = this.parseJSON(value, "FileMeta") as FileMeta | null
|
||||
if (parsed) {
|
||||
result.set(path, parsed)
|
||||
}
|
||||
@@ -133,7 +133,7 @@ export class RedisStorage implements IStorage {
|
||||
return new Map()
|
||||
}
|
||||
|
||||
const parsed = this.parseJSON<[string, unknown[]][]>(data, "SymbolIndex")
|
||||
const parsed = this.parseJSON(data, "SymbolIndex") as [string, unknown[]][] | null
|
||||
if (!parsed) {
|
||||
return new Map()
|
||||
}
|
||||
@@ -157,10 +157,10 @@ export class RedisStorage implements IStorage {
|
||||
}
|
||||
}
|
||||
|
||||
const parsed = this.parseJSON<{
|
||||
const parsed = this.parseJSON(data, "DepsGraph") as {
|
||||
imports: [string, string[]][]
|
||||
importedBy: [string, string[]][]
|
||||
}>(data, "DepsGraph")
|
||||
} | null
|
||||
|
||||
if (!parsed) {
|
||||
return {
|
||||
@@ -190,7 +190,7 @@ export class RedisStorage implements IStorage {
|
||||
if (!data) {
|
||||
return null
|
||||
}
|
||||
return this.parseJSON<unknown>(data, "ProjectConfig")
|
||||
return this.parseJSON(data, "ProjectConfig")
|
||||
}
|
||||
|
||||
async setProjectConfig(key: string, value: unknown): Promise<void> {
|
||||
@@ -225,9 +225,9 @@ export class RedisStorage implements IStorage {
|
||||
return this.client.getClient()
|
||||
}
|
||||
|
||||
private parseJSON<T>(data: string, type: string): T | null {
|
||||
private parseJSON(data: string, type: string): unknown {
|
||||
try {
|
||||
return JSON.parse(data) as T
|
||||
return JSON.parse(data) as unknown
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Unknown error"
|
||||
throw IpuaroError.parse(`Failed to parse ${type}: ${message}`)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// Storage module exports
|
||||
export { RedisClient } from "./RedisClient.js"
|
||||
export { RedisStorage } from "./RedisStorage.js"
|
||||
export { RedisSessionStorage } from "./RedisSessionStorage.js"
|
||||
export {
|
||||
ProjectKeys,
|
||||
SessionKeys,
|
||||
|
||||
@@ -0,0 +1,232 @@
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import type { ComplexityMetrics, FileMeta } from "../../../domain/value-objects/FileMeta.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* Complexity entry for a single file.
|
||||
*/
|
||||
export interface ComplexityEntry {
|
||||
/** Relative path to the file */
|
||||
path: string
|
||||
/** Complexity metrics */
|
||||
metrics: ComplexityMetrics
|
||||
/** File type classification */
|
||||
fileType: "source" | "test" | "config" | "types" | "unknown"
|
||||
/** Whether the file is a hub */
|
||||
isHub: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from get_complexity tool.
|
||||
*/
|
||||
export interface GetComplexityResult {
|
||||
/** The path that was analyzed (file or directory) */
|
||||
analyzedPath: string | null
|
||||
/** Total files analyzed */
|
||||
totalFiles: number
|
||||
/** Average complexity score */
|
||||
averageScore: number
|
||||
/** Files sorted by complexity score (descending) */
|
||||
files: ComplexityEntry[]
|
||||
/** Summary statistics */
|
||||
summary: {
|
||||
highComplexity: number
|
||||
mediumComplexity: number
|
||||
lowComplexity: number
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Complexity thresholds for classification.
|
||||
*/
|
||||
const COMPLEXITY_THRESHOLDS = {
|
||||
high: 60,
|
||||
medium: 30,
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for getting complexity metrics for files.
|
||||
* Can analyze a single file or all files in the project.
|
||||
*/
|
||||
export class GetComplexityTool implements ITool {
|
||||
readonly name = "get_complexity"
|
||||
readonly description =
|
||||
"Get complexity metrics for files. " +
|
||||
"Returns LOC, nesting depth, cyclomatic complexity, and overall score. " +
|
||||
"Without path, returns all files sorted by complexity."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File or directory path to analyze (optional, defaults to entire project)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "limit",
|
||||
type: "number",
|
||||
description: "Maximum number of files to return (default: 20)",
|
||||
required: false,
|
||||
default: 20,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "analysis" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.path !== undefined && typeof params.path !== "string") {
|
||||
return "Parameter 'path' must be a string"
|
||||
}
|
||||
if (params.limit !== undefined) {
|
||||
if (typeof params.limit !== "number" || !Number.isInteger(params.limit)) {
|
||||
return "Parameter 'limit' must be an integer"
|
||||
}
|
||||
if (params.limit < 1) {
|
||||
return "Parameter 'limit' must be at least 1"
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string | undefined
|
||||
const limit = (params.limit as number | undefined) ?? 20
|
||||
|
||||
try {
|
||||
const allMetas = await ctx.storage.getAllMetas()
|
||||
|
||||
if (allMetas.size === 0) {
|
||||
return createSuccessResult(
|
||||
callId,
|
||||
{
|
||||
analyzedPath: inputPath ?? null,
|
||||
totalFiles: 0,
|
||||
averageScore: 0,
|
||||
files: [],
|
||||
summary: { highComplexity: 0, mediumComplexity: 0, lowComplexity: 0 },
|
||||
} satisfies GetComplexityResult,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
let filteredMetas = allMetas
|
||||
let analyzedPath: string | null = null
|
||||
|
||||
if (inputPath) {
|
||||
const relativePath = this.normalizePathToRelative(inputPath, ctx.projectRoot)
|
||||
analyzedPath = relativePath
|
||||
filteredMetas = this.filterByPath(allMetas, relativePath)
|
||||
|
||||
if (filteredMetas.size === 0) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`No files found at path: ${relativePath}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const entries: ComplexityEntry[] = []
|
||||
for (const [filePath, meta] of filteredMetas) {
|
||||
entries.push({
|
||||
path: filePath,
|
||||
metrics: meta.complexity,
|
||||
fileType: meta.fileType,
|
||||
isHub: meta.isHub,
|
||||
})
|
||||
}
|
||||
|
||||
entries.sort((a, b) => b.metrics.score - a.metrics.score)
|
||||
|
||||
const summary = this.calculateSummary(entries)
|
||||
const averageScore = this.calculateAverageScore(entries)
|
||||
|
||||
const limitedEntries = entries.slice(0, limit)
|
||||
|
||||
const result: GetComplexityResult = {
|
||||
analyzedPath,
|
||||
totalFiles: entries.length,
|
||||
averageScore,
|
||||
files: limitedEntries,
|
||||
summary,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize input path to relative path from project root.
|
||||
*/
|
||||
private normalizePathToRelative(inputPath: string, projectRoot: string): string {
|
||||
if (path.isAbsolute(inputPath)) {
|
||||
return path.relative(projectRoot, inputPath)
|
||||
}
|
||||
return inputPath
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter metas by path prefix (file or directory).
|
||||
*/
|
||||
private filterByPath(
|
||||
allMetas: Map<string, FileMeta>,
|
||||
targetPath: string,
|
||||
): Map<string, FileMeta> {
|
||||
const filtered = new Map<string, FileMeta>()
|
||||
|
||||
for (const [filePath, meta] of allMetas) {
|
||||
if (filePath === targetPath || filePath.startsWith(`${targetPath}/`)) {
|
||||
filtered.set(filePath, meta)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate summary statistics for complexity entries.
|
||||
*/
|
||||
private calculateSummary(entries: ComplexityEntry[]): {
|
||||
highComplexity: number
|
||||
mediumComplexity: number
|
||||
lowComplexity: number
|
||||
} {
|
||||
let high = 0
|
||||
let medium = 0
|
||||
let low = 0
|
||||
|
||||
for (const entry of entries) {
|
||||
const score = entry.metrics.score
|
||||
if (score >= COMPLEXITY_THRESHOLDS.high) {
|
||||
high++
|
||||
} else if (score >= COMPLEXITY_THRESHOLDS.medium) {
|
||||
medium++
|
||||
} else {
|
||||
low++
|
||||
}
|
||||
}
|
||||
|
||||
return { highComplexity: high, mediumComplexity: medium, lowComplexity: low }
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate average complexity score.
|
||||
*/
|
||||
private calculateAverageScore(entries: ComplexityEntry[]): number {
|
||||
if (entries.length === 0) {
|
||||
return 0
|
||||
}
|
||||
const total = entries.reduce((sum, entry) => sum + entry.metrics.score, 0)
|
||||
return Math.round((total / entries.length) * 100) / 100
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,121 @@
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* Single dependency entry with metadata.
|
||||
*/
|
||||
export interface DependencyEntry {
|
||||
/** Relative path to the dependency */
|
||||
path: string
|
||||
/** Whether the file exists in the project */
|
||||
exists: boolean
|
||||
/** Whether it's an entry point */
|
||||
isEntryPoint: boolean
|
||||
/** Whether it's a hub file */
|
||||
isHub: boolean
|
||||
/** File type classification */
|
||||
fileType: "source" | "test" | "config" | "types" | "unknown"
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from get_dependencies tool.
|
||||
*/
|
||||
export interface GetDependenciesResult {
|
||||
/** The file being analyzed */
|
||||
file: string
|
||||
/** Total number of dependencies */
|
||||
totalDependencies: number
|
||||
/** List of dependencies with metadata */
|
||||
dependencies: DependencyEntry[]
|
||||
/** File type of the source file */
|
||||
fileType: "source" | "test" | "config" | "types" | "unknown"
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for getting files that a specific file imports.
|
||||
* Returns the list of internal dependencies from FileMeta.
|
||||
*/
|
||||
export class GetDependenciesTool implements ITool {
|
||||
readonly name = "get_dependencies"
|
||||
readonly description =
|
||||
"Get files that a specific file imports. " +
|
||||
"Returns internal dependencies resolved to file paths."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path to analyze (relative to project root or absolute)",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "analysis" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = (params.path as string).trim()
|
||||
|
||||
try {
|
||||
const relativePath = this.normalizePathToRelative(inputPath, ctx.projectRoot)
|
||||
|
||||
const meta = await ctx.storage.getMeta(relativePath)
|
||||
if (!meta) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`File not found or not indexed: ${relativePath}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const dependencies: DependencyEntry[] = []
|
||||
for (const depPath of meta.dependencies) {
|
||||
const depMeta = await ctx.storage.getMeta(depPath)
|
||||
dependencies.push({
|
||||
path: depPath,
|
||||
exists: depMeta !== null,
|
||||
isEntryPoint: depMeta?.isEntryPoint ?? false,
|
||||
isHub: depMeta?.isHub ?? false,
|
||||
fileType: depMeta?.fileType ?? "unknown",
|
||||
})
|
||||
}
|
||||
|
||||
dependencies.sort((a, b) => a.path.localeCompare(b.path))
|
||||
|
||||
const result: GetDependenciesResult = {
|
||||
file: relativePath,
|
||||
totalDependencies: dependencies.length,
|
||||
dependencies,
|
||||
fileType: meta.fileType,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize input path to relative path from project root.
|
||||
*/
|
||||
private normalizePathToRelative(inputPath: string, projectRoot: string): string {
|
||||
if (path.isAbsolute(inputPath)) {
|
||||
return path.relative(projectRoot, inputPath)
|
||||
}
|
||||
return inputPath
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,124 @@
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* Single dependent entry with metadata.
|
||||
*/
|
||||
export interface DependentEntry {
|
||||
/** Relative path to the dependent file */
|
||||
path: string
|
||||
/** Whether the file is an entry point */
|
||||
isEntryPoint: boolean
|
||||
/** Whether the file is a hub */
|
||||
isHub: boolean
|
||||
/** File type classification */
|
||||
fileType: "source" | "test" | "config" | "types" | "unknown"
|
||||
/** Complexity score of the dependent */
|
||||
complexityScore: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from get_dependents tool.
|
||||
*/
|
||||
export interface GetDependentsResult {
|
||||
/** The file being analyzed */
|
||||
file: string
|
||||
/** Total number of dependents */
|
||||
totalDependents: number
|
||||
/** Whether this file is a hub (>5 dependents) */
|
||||
isHub: boolean
|
||||
/** List of files that import this file */
|
||||
dependents: DependentEntry[]
|
||||
/** File type of the source file */
|
||||
fileType: "source" | "test" | "config" | "types" | "unknown"
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for getting files that import a specific file.
|
||||
* Returns the list of files that depend on the target file.
|
||||
*/
|
||||
export class GetDependentsTool implements ITool {
|
||||
readonly name = "get_dependents"
|
||||
readonly description =
|
||||
"Get files that import a specific file. " +
|
||||
"Returns list of files that depend on the target."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path to analyze (relative to project root or absolute)",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "analysis" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = (params.path as string).trim()
|
||||
|
||||
try {
|
||||
const relativePath = this.normalizePathToRelative(inputPath, ctx.projectRoot)
|
||||
|
||||
const meta = await ctx.storage.getMeta(relativePath)
|
||||
if (!meta) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`File not found or not indexed: ${relativePath}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const dependents: DependentEntry[] = []
|
||||
for (const depPath of meta.dependents) {
|
||||
const depMeta = await ctx.storage.getMeta(depPath)
|
||||
dependents.push({
|
||||
path: depPath,
|
||||
isEntryPoint: depMeta?.isEntryPoint ?? false,
|
||||
isHub: depMeta?.isHub ?? false,
|
||||
fileType: depMeta?.fileType ?? "unknown",
|
||||
complexityScore: depMeta?.complexity.score ?? 0,
|
||||
})
|
||||
}
|
||||
|
||||
dependents.sort((a, b) => a.path.localeCompare(b.path))
|
||||
|
||||
const result: GetDependentsResult = {
|
||||
file: relativePath,
|
||||
totalDependents: dependents.length,
|
||||
isHub: meta.isHub,
|
||||
dependents,
|
||||
fileType: meta.fileType,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize input path to relative path from project root.
|
||||
*/
|
||||
private normalizePathToRelative(inputPath: string, projectRoot: string): string {
|
||||
if (path.isAbsolute(inputPath)) {
|
||||
return path.relative(projectRoot, inputPath)
|
||||
}
|
||||
return inputPath
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,276 @@
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import type { FileData } from "../../../domain/value-objects/FileData.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* Types of TODO markers to search for.
|
||||
*/
|
||||
export type TodoType = "TODO" | "FIXME" | "HACK" | "XXX" | "BUG" | "NOTE"
|
||||
|
||||
/**
|
||||
* A single TODO entry found in the codebase.
|
||||
*/
|
||||
export interface TodoEntry {
|
||||
/** Relative path to the file */
|
||||
path: string
|
||||
/** Line number where the TODO is found */
|
||||
line: number
|
||||
/** Type of TODO marker (TODO, FIXME, etc.) */
|
||||
type: TodoType
|
||||
/** The TODO text content */
|
||||
text: string
|
||||
/** Full line content for context */
|
||||
context: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from get_todos tool.
|
||||
*/
|
||||
export interface GetTodosResult {
|
||||
/** The path that was searched (file or directory) */
|
||||
searchedPath: string | null
|
||||
/** Total number of TODOs found */
|
||||
totalTodos: number
|
||||
/** Number of files with TODOs */
|
||||
filesWithTodos: number
|
||||
/** TODOs grouped by type */
|
||||
byType: Record<TodoType, number>
|
||||
/** List of TODO entries */
|
||||
todos: TodoEntry[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Supported TODO marker patterns.
|
||||
*/
|
||||
const TODO_MARKERS: TodoType[] = ["TODO", "FIXME", "HACK", "XXX", "BUG", "NOTE"]
|
||||
|
||||
/**
|
||||
* Regex pattern for matching TODO markers in comments.
|
||||
*/
|
||||
const TODO_PATTERN = new RegExp(
|
||||
`(?://|/\\*|\\*|#)\\s*(${TODO_MARKERS.join("|")})(?:\\([^)]*\\))?:?\\s*(.*)`,
|
||||
"i",
|
||||
)
|
||||
|
||||
/**
|
||||
* Tool for finding TODO/FIXME/HACK comments in the codebase.
|
||||
* Searches through indexed files for common task markers.
|
||||
*/
|
||||
export class GetTodosTool implements ITool {
|
||||
readonly name = "get_todos"
|
||||
readonly description =
|
||||
"Find TODO, FIXME, HACK, XXX, BUG, and NOTE comments in the codebase. " +
|
||||
"Returns list of locations with context."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File or directory to search (optional, defaults to entire project)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "type",
|
||||
type: "string",
|
||||
description:
|
||||
"Filter by TODO type: TODO, FIXME, HACK, XXX, BUG, NOTE (optional, defaults to all)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "analysis" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.path !== undefined && typeof params.path !== "string") {
|
||||
return "Parameter 'path' must be a string"
|
||||
}
|
||||
if (params.type !== undefined) {
|
||||
if (typeof params.type !== "string") {
|
||||
return "Parameter 'type' must be a string"
|
||||
}
|
||||
const upperType = params.type.toUpperCase()
|
||||
if (!TODO_MARKERS.includes(upperType as TodoType)) {
|
||||
return `Parameter 'type' must be one of: ${TODO_MARKERS.join(", ")}`
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string | undefined
|
||||
const filterType = params.type ? ((params.type as string).toUpperCase() as TodoType) : null
|
||||
|
||||
try {
|
||||
const allFiles = await ctx.storage.getAllFiles()
|
||||
|
||||
if (allFiles.size === 0) {
|
||||
return createSuccessResult(
|
||||
callId,
|
||||
this.createEmptyResult(inputPath ?? null),
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
let filesToSearch = allFiles
|
||||
let searchedPath: string | null = null
|
||||
|
||||
if (inputPath) {
|
||||
const relativePath = this.normalizePathToRelative(inputPath, ctx.projectRoot)
|
||||
searchedPath = relativePath
|
||||
filesToSearch = this.filterByPath(allFiles, relativePath)
|
||||
|
||||
if (filesToSearch.size === 0) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`No files found at path: ${relativePath}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const todos: TodoEntry[] = []
|
||||
const filesWithTodos = new Set<string>()
|
||||
|
||||
for (const [filePath, fileData] of filesToSearch) {
|
||||
const fileTodos = this.findTodosInFile(filePath, fileData.lines, filterType)
|
||||
if (fileTodos.length > 0) {
|
||||
filesWithTodos.add(filePath)
|
||||
todos.push(...fileTodos)
|
||||
}
|
||||
}
|
||||
|
||||
todos.sort((a, b) => {
|
||||
const pathCompare = a.path.localeCompare(b.path)
|
||||
if (pathCompare !== 0) {
|
||||
return pathCompare
|
||||
}
|
||||
return a.line - b.line
|
||||
})
|
||||
|
||||
const byType = this.countByType(todos)
|
||||
|
||||
const result: GetTodosResult = {
|
||||
searchedPath,
|
||||
totalTodos: todos.length,
|
||||
filesWithTodos: filesWithTodos.size,
|
||||
byType,
|
||||
todos,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize input path to relative path from project root.
|
||||
*/
|
||||
private normalizePathToRelative(inputPath: string, projectRoot: string): string {
|
||||
if (path.isAbsolute(inputPath)) {
|
||||
return path.relative(projectRoot, inputPath)
|
||||
}
|
||||
return inputPath
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter files by path prefix.
|
||||
*/
|
||||
private filterByPath(
|
||||
allFiles: Map<string, FileData>,
|
||||
targetPath: string,
|
||||
): Map<string, FileData> {
|
||||
const filtered = new Map<string, FileData>()
|
||||
|
||||
for (const [filePath, fileData] of allFiles) {
|
||||
if (filePath === targetPath || filePath.startsWith(`${targetPath}/`)) {
|
||||
filtered.set(filePath, fileData)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all TODOs in a file.
|
||||
*/
|
||||
private findTodosInFile(
|
||||
filePath: string,
|
||||
lines: string[],
|
||||
filterType: TodoType | null,
|
||||
): TodoEntry[] {
|
||||
const todos: TodoEntry[] = []
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i]
|
||||
const match = TODO_PATTERN.exec(line)
|
||||
|
||||
if (match) {
|
||||
const type = match[1].toUpperCase() as TodoType
|
||||
const text = match[2].trim()
|
||||
|
||||
if (filterType && type !== filterType) {
|
||||
continue
|
||||
}
|
||||
|
||||
todos.push({
|
||||
path: filePath,
|
||||
line: i + 1,
|
||||
type,
|
||||
text: text || "(no description)",
|
||||
context: line.trim(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return todos
|
||||
}
|
||||
|
||||
/**
|
||||
* Count TODOs by type.
|
||||
*/
|
||||
private countByType(todos: TodoEntry[]): Record<TodoType, number> {
|
||||
const counts: Record<TodoType, number> = {
|
||||
TODO: 0,
|
||||
FIXME: 0,
|
||||
HACK: 0,
|
||||
XXX: 0,
|
||||
BUG: 0,
|
||||
NOTE: 0,
|
||||
}
|
||||
|
||||
for (const todo of todos) {
|
||||
counts[todo.type]++
|
||||
}
|
||||
|
||||
return counts
|
||||
}
|
||||
|
||||
/**
|
||||
* Create empty result structure.
|
||||
*/
|
||||
private createEmptyResult(searchedPath: string | null): GetTodosResult {
|
||||
return {
|
||||
searchedPath,
|
||||
totalTodos: 0,
|
||||
filesWithTodos: 0,
|
||||
byType: {
|
||||
TODO: 0,
|
||||
FIXME: 0,
|
||||
HACK: 0,
|
||||
XXX: 0,
|
||||
BUG: 0,
|
||||
NOTE: 0,
|
||||
},
|
||||
todos: [],
|
||||
}
|
||||
}
|
||||
}
|
||||
20
packages/ipuaro/src/infrastructure/tools/analysis/index.ts
Normal file
20
packages/ipuaro/src/infrastructure/tools/analysis/index.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
// Analysis tools module exports
|
||||
export {
|
||||
GetDependenciesTool,
|
||||
type GetDependenciesResult,
|
||||
type DependencyEntry,
|
||||
} from "./GetDependenciesTool.js"
|
||||
|
||||
export {
|
||||
GetDependentsTool,
|
||||
type GetDependentsResult,
|
||||
type DependentEntry,
|
||||
} from "./GetDependentsTool.js"
|
||||
|
||||
export {
|
||||
GetComplexityTool,
|
||||
type GetComplexityResult,
|
||||
type ComplexityEntry,
|
||||
} from "./GetComplexityTool.js"
|
||||
|
||||
export { GetTodosTool, type GetTodosResult, type TodoEntry, type TodoType } from "./GetTodosTool.js"
|
||||
142
packages/ipuaro/src/infrastructure/tools/edit/CreateFileTool.ts
Normal file
142
packages/ipuaro/src/infrastructure/tools/edit/CreateFileTool.ts
Normal file
@@ -0,0 +1,142 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import { createFileData } from "../../../domain/value-objects/FileData.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { hashLines } from "../../../shared/utils/hash.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Result data from create_file tool.
|
||||
*/
|
||||
export interface CreateFileResult {
|
||||
path: string
|
||||
lines: number
|
||||
size: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for creating new files.
|
||||
* Creates a new file with the specified content.
|
||||
* Requires user confirmation before creating.
|
||||
*/
|
||||
export class CreateFileTool implements ITool {
|
||||
readonly name = "create_file"
|
||||
readonly description =
|
||||
"Create a new file with the specified content. " +
|
||||
"The file path must be within the project root. " +
|
||||
"Requires confirmation before creating."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "content",
|
||||
type: "string",
|
||||
description: "File content",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = true
|
||||
readonly category = "edit" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
if (typeof params.content !== "string") {
|
||||
return "Parameter 'content' is required and must be a string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string
|
||||
const content = params.content as string
|
||||
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const exists = await this.fileExists(absolutePath)
|
||||
if (exists) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`File already exists: ${relativePath}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const lines = content.split("\n")
|
||||
|
||||
const confirmed = await ctx.requestConfirmation(
|
||||
`Create new file: ${relativePath} (${String(lines.length)} lines)`,
|
||||
{
|
||||
filePath: relativePath,
|
||||
oldLines: [],
|
||||
newLines: lines,
|
||||
startLine: 1,
|
||||
},
|
||||
)
|
||||
|
||||
if (!confirmed) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"File creation cancelled by user",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const dirPath = path.dirname(absolutePath)
|
||||
await fs.mkdir(dirPath, { recursive: true })
|
||||
await fs.writeFile(absolutePath, content, "utf-8")
|
||||
|
||||
const stats = await fs.stat(absolutePath)
|
||||
const fileData = createFileData(lines, hashLines(lines), stats.size, stats.mtimeMs)
|
||||
await ctx.storage.setFile(relativePath, fileData)
|
||||
|
||||
const result: CreateFileResult = {
|
||||
path: relativePath,
|
||||
lines: lines.length,
|
||||
size: stats.size,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if file exists.
|
||||
*/
|
||||
private async fileExists(filePath: string): Promise<boolean> {
|
||||
try {
|
||||
await fs.access(filePath)
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
137
packages/ipuaro/src/infrastructure/tools/edit/DeleteFileTool.ts
Normal file
137
packages/ipuaro/src/infrastructure/tools/edit/DeleteFileTool.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Result data from delete_file tool.
|
||||
*/
|
||||
export interface DeleteFileResult {
|
||||
path: string
|
||||
deleted: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for deleting files.
|
||||
* Deletes a file from the filesystem and storage.
|
||||
* Requires user confirmation before deleting.
|
||||
*/
|
||||
export class DeleteFileTool implements ITool {
|
||||
readonly name = "delete_file"
|
||||
readonly description =
|
||||
"Delete a file from the project. " +
|
||||
"The file path must be within the project root. " +
|
||||
"Requires confirmation before deleting."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = true
|
||||
readonly category = "edit" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const exists = await this.fileExists(absolutePath)
|
||||
if (!exists) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`File not found: ${relativePath}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const fileContent = await this.getFileContent(absolutePath, relativePath, ctx)
|
||||
|
||||
const confirmed = await ctx.requestConfirmation(`Delete file: ${relativePath}`, {
|
||||
filePath: relativePath,
|
||||
oldLines: fileContent,
|
||||
newLines: [],
|
||||
startLine: 1,
|
||||
})
|
||||
|
||||
if (!confirmed) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"File deletion cancelled by user",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
await fs.unlink(absolutePath)
|
||||
|
||||
await ctx.storage.deleteFile(relativePath)
|
||||
await ctx.storage.deleteAST(relativePath)
|
||||
await ctx.storage.deleteMeta(relativePath)
|
||||
|
||||
const result: DeleteFileResult = {
|
||||
path: relativePath,
|
||||
deleted: true,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if file exists.
|
||||
*/
|
||||
private async fileExists(filePath: string): Promise<boolean> {
|
||||
try {
|
||||
const stats = await fs.stat(filePath)
|
||||
return stats.isFile()
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file content for diff display.
|
||||
*/
|
||||
private async getFileContent(
|
||||
absolutePath: string,
|
||||
relativePath: string,
|
||||
ctx: ToolContext,
|
||||
): Promise<string[]> {
|
||||
const fileData = await ctx.storage.getFile(relativePath)
|
||||
if (fileData) {
|
||||
return fileData.lines
|
||||
}
|
||||
|
||||
const content = await fs.readFile(absolutePath, "utf-8")
|
||||
return content.split("\n")
|
||||
}
|
||||
}
|
||||
227
packages/ipuaro/src/infrastructure/tools/edit/EditLinesTool.ts
Normal file
227
packages/ipuaro/src/infrastructure/tools/edit/EditLinesTool.ts
Normal file
@@ -0,0 +1,227 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import { createFileData } from "../../../domain/value-objects/FileData.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { hashLines } from "../../../shared/utils/hash.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Result data from edit_lines tool.
|
||||
*/
|
||||
export interface EditLinesResult {
|
||||
path: string
|
||||
startLine: number
|
||||
endLine: number
|
||||
linesReplaced: number
|
||||
linesInserted: number
|
||||
totalLines: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for editing specific lines in a file.
|
||||
* Replaces lines from start to end with new content.
|
||||
* Requires user confirmation before applying changes.
|
||||
*/
|
||||
export class EditLinesTool implements ITool {
|
||||
readonly name = "edit_lines"
|
||||
readonly description =
|
||||
"Replace lines in a file. Replaces lines from start to end (inclusive) with new content. " +
|
||||
"Requires confirmation before applying changes."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "start",
|
||||
type: "number",
|
||||
description: "Start line number (1-based, inclusive)",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "end",
|
||||
type: "number",
|
||||
description: "End line number (1-based, inclusive)",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "content",
|
||||
type: "string",
|
||||
description: "New content to insert (can be multi-line)",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = true
|
||||
readonly category = "edit" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
if (typeof params.start !== "number" || !Number.isInteger(params.start)) {
|
||||
return "Parameter 'start' is required and must be an integer"
|
||||
}
|
||||
if (params.start < 1) {
|
||||
return "Parameter 'start' must be >= 1"
|
||||
}
|
||||
|
||||
if (typeof params.end !== "number" || !Number.isInteger(params.end)) {
|
||||
return "Parameter 'end' is required and must be an integer"
|
||||
}
|
||||
if (params.end < 1) {
|
||||
return "Parameter 'end' must be >= 1"
|
||||
}
|
||||
|
||||
if (params.start > params.end) {
|
||||
return "Parameter 'start' must be <= 'end'"
|
||||
}
|
||||
|
||||
if (typeof params.content !== "string") {
|
||||
return "Parameter 'content' is required and must be a string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string
|
||||
const startLine = params.start as number
|
||||
const endLine = params.end as number
|
||||
const newContent = params.content as string
|
||||
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const currentLines = await this.getCurrentLines(absolutePath, relativePath, ctx)
|
||||
const totalLines = currentLines.length
|
||||
|
||||
if (startLine > totalLines) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Start line ${String(startLine)} exceeds file length (${String(totalLines)} lines)`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const adjustedEnd = Math.min(endLine, totalLines)
|
||||
const conflictCheck = await this.checkHashConflict(relativePath, currentLines, ctx)
|
||||
if (conflictCheck) {
|
||||
return createErrorResult(callId, conflictCheck, Date.now() - startTime)
|
||||
}
|
||||
|
||||
const oldLines = currentLines.slice(startLine - 1, adjustedEnd)
|
||||
const newLines = newContent.split("\n")
|
||||
|
||||
const confirmed = await ctx.requestConfirmation(
|
||||
`Replace lines ${String(startLine)}-${String(adjustedEnd)} in ${relativePath}`,
|
||||
{
|
||||
filePath: relativePath,
|
||||
oldLines,
|
||||
newLines,
|
||||
startLine,
|
||||
},
|
||||
)
|
||||
|
||||
if (!confirmed) {
|
||||
return createErrorResult(callId, "Edit cancelled by user", Date.now() - startTime)
|
||||
}
|
||||
|
||||
const updatedLines = [
|
||||
...currentLines.slice(0, startLine - 1),
|
||||
...newLines,
|
||||
...currentLines.slice(adjustedEnd),
|
||||
]
|
||||
|
||||
await this.applyChanges(absolutePath, relativePath, updatedLines, ctx)
|
||||
|
||||
const result: EditLinesResult = {
|
||||
path: relativePath,
|
||||
startLine,
|
||||
endLine: adjustedEnd,
|
||||
linesReplaced: adjustedEnd - startLine + 1,
|
||||
linesInserted: newLines.length,
|
||||
totalLines: updatedLines.length,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current file lines from storage or filesystem.
|
||||
*/
|
||||
private async getCurrentLines(
|
||||
absolutePath: string,
|
||||
relativePath: string,
|
||||
ctx: ToolContext,
|
||||
): Promise<string[]> {
|
||||
const fileData = await ctx.storage.getFile(relativePath)
|
||||
if (fileData) {
|
||||
return fileData.lines
|
||||
}
|
||||
|
||||
const content = await fs.readFile(absolutePath, "utf-8")
|
||||
return content.split("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if file has changed since it was indexed.
|
||||
* Returns error message if conflict detected, null otherwise.
|
||||
*/
|
||||
private async checkHashConflict(
|
||||
relativePath: string,
|
||||
currentLines: string[],
|
||||
ctx: ToolContext,
|
||||
): Promise<string | null> {
|
||||
const storedFile = await ctx.storage.getFile(relativePath)
|
||||
if (!storedFile) {
|
||||
return null
|
||||
}
|
||||
|
||||
const currentHash = hashLines(currentLines)
|
||||
if (storedFile.hash !== currentHash) {
|
||||
return "File has been modified externally. Please refresh the file before editing."
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply changes to filesystem and storage.
|
||||
*/
|
||||
private async applyChanges(
|
||||
absolutePath: string,
|
||||
relativePath: string,
|
||||
lines: string[],
|
||||
ctx: ToolContext,
|
||||
): Promise<void> {
|
||||
const content = lines.join("\n")
|
||||
await fs.writeFile(absolutePath, content, "utf-8")
|
||||
|
||||
const stats = await fs.stat(absolutePath)
|
||||
const fileData = createFileData(lines, hashLines(lines), stats.size, stats.mtimeMs)
|
||||
await ctx.storage.setFile(relativePath, fileData)
|
||||
}
|
||||
}
|
||||
4
packages/ipuaro/src/infrastructure/tools/edit/index.ts
Normal file
4
packages/ipuaro/src/infrastructure/tools/edit/index.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
// Edit tools exports
|
||||
export { EditLinesTool, type EditLinesResult } from "./EditLinesTool.js"
|
||||
export { CreateFileTool, type CreateFileResult } from "./CreateFileTool.js"
|
||||
export { DeleteFileTool, type DeleteFileResult } from "./DeleteFileTool.js"
|
||||
155
packages/ipuaro/src/infrastructure/tools/git/GitCommitTool.ts
Normal file
155
packages/ipuaro/src/infrastructure/tools/git/GitCommitTool.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import { type CommitResult, type SimpleGit, simpleGit } from "simple-git"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* Author information.
|
||||
*/
|
||||
export interface CommitAuthor {
|
||||
name: string
|
||||
email: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from git_commit tool.
|
||||
*/
|
||||
export interface GitCommitResult {
|
||||
/** Commit hash */
|
||||
hash: string
|
||||
/** Current branch */
|
||||
branch: string
|
||||
/** Commit message */
|
||||
message: string
|
||||
/** Number of files changed */
|
||||
filesChanged: number
|
||||
/** Number of insertions */
|
||||
insertions: number
|
||||
/** Number of deletions */
|
||||
deletions: number
|
||||
/** Author information */
|
||||
author: CommitAuthor | null
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for creating git commits.
|
||||
* Requires confirmation before execution.
|
||||
*/
|
||||
export class GitCommitTool implements ITool {
|
||||
readonly name = "git_commit"
|
||||
readonly description =
|
||||
"Create a git commit with the specified message. " +
|
||||
"Will ask for confirmation. Optionally stage specific files first."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "message",
|
||||
type: "string",
|
||||
description: "Commit message",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
type: "array",
|
||||
description: "Files to stage before commit (optional, defaults to all staged)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = true
|
||||
readonly category = "git" as const
|
||||
|
||||
private readonly gitFactory: (basePath: string) => SimpleGit
|
||||
|
||||
constructor(gitFactory?: (basePath: string) => SimpleGit) {
|
||||
this.gitFactory = gitFactory ?? ((basePath: string) => simpleGit(basePath))
|
||||
}
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.message === undefined) {
|
||||
return "Parameter 'message' is required"
|
||||
}
|
||||
if (typeof params.message !== "string") {
|
||||
return "Parameter 'message' must be a string"
|
||||
}
|
||||
if (params.message.trim() === "") {
|
||||
return "Parameter 'message' cannot be empty"
|
||||
}
|
||||
if (params.files !== undefined) {
|
||||
if (!Array.isArray(params.files)) {
|
||||
return "Parameter 'files' must be an array"
|
||||
}
|
||||
for (const file of params.files) {
|
||||
if (typeof file !== "string") {
|
||||
return "Parameter 'files' must be an array of strings"
|
||||
}
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const message = params.message as string
|
||||
const files = params.files as string[] | undefined
|
||||
|
||||
try {
|
||||
const git = this.gitFactory(ctx.projectRoot)
|
||||
|
||||
const isRepo = await git.checkIsRepo()
|
||||
if (!isRepo) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"Not a git repository. Initialize with 'git init' first.",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
if (files && files.length > 0) {
|
||||
await git.add(files)
|
||||
}
|
||||
|
||||
const status = await git.status()
|
||||
if (status.staged.length === 0 && (!files || files.length === 0)) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"Nothing to commit. Stage files first with 'git add' or provide 'files' parameter.",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const commitSummary = `Committing ${String(status.staged.length)} file(s): ${message}`
|
||||
const confirmed = await ctx.requestConfirmation(commitSummary)
|
||||
|
||||
if (!confirmed) {
|
||||
return createErrorResult(callId, "Commit cancelled by user", Date.now() - startTime)
|
||||
}
|
||||
|
||||
const commitResult = await git.commit(message)
|
||||
const result = this.formatCommitResult(commitResult, message)
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message_ = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message_, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format simple-git CommitResult into our result structure.
|
||||
*/
|
||||
private formatCommitResult(commit: CommitResult, message: string): GitCommitResult {
|
||||
return {
|
||||
hash: commit.commit,
|
||||
branch: commit.branch,
|
||||
message,
|
||||
filesChanged: commit.summary.changes,
|
||||
insertions: commit.summary.insertions,
|
||||
deletions: commit.summary.deletions,
|
||||
author: commit.author ?? null,
|
||||
}
|
||||
}
|
||||
}
|
||||
155
packages/ipuaro/src/infrastructure/tools/git/GitDiffTool.ts
Normal file
155
packages/ipuaro/src/infrastructure/tools/git/GitDiffTool.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import { simpleGit, type SimpleGit } from "simple-git"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* A single file diff entry.
|
||||
*/
|
||||
export interface DiffEntry {
|
||||
/** File path */
|
||||
file: string
|
||||
/** Number of insertions */
|
||||
insertions: number
|
||||
/** Number of deletions */
|
||||
deletions: number
|
||||
/** Whether the file is binary */
|
||||
binary: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from git_diff tool.
|
||||
*/
|
||||
export interface GitDiffResult {
|
||||
/** Whether showing staged or all changes */
|
||||
staged: boolean
|
||||
/** Path filter applied (null if all files) */
|
||||
pathFilter: string | null
|
||||
/** Whether there are any changes */
|
||||
hasChanges: boolean
|
||||
/** Summary of changes */
|
||||
summary: {
|
||||
/** Number of files changed */
|
||||
filesChanged: number
|
||||
/** Total insertions */
|
||||
insertions: number
|
||||
/** Total deletions */
|
||||
deletions: number
|
||||
}
|
||||
/** List of changed files */
|
||||
files: DiffEntry[]
|
||||
/** Full diff text */
|
||||
diff: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for getting uncommitted git changes (diff).
|
||||
* Shows what has changed but not yet committed.
|
||||
*/
|
||||
export class GitDiffTool implements ITool {
|
||||
readonly name = "git_diff"
|
||||
readonly description =
|
||||
"Get uncommitted changes (diff). " + "Shows what has changed but not yet committed."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Limit diff to specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "staged",
|
||||
type: "boolean",
|
||||
description: "Show only staged changes (default: false, shows all)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "git" as const
|
||||
|
||||
private readonly gitFactory: (basePath: string) => SimpleGit
|
||||
|
||||
constructor(gitFactory?: (basePath: string) => SimpleGit) {
|
||||
this.gitFactory = gitFactory ?? ((basePath: string) => simpleGit(basePath))
|
||||
}
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.path !== undefined && typeof params.path !== "string") {
|
||||
return "Parameter 'path' must be a string"
|
||||
}
|
||||
if (params.staged !== undefined && typeof params.staged !== "boolean") {
|
||||
return "Parameter 'staged' must be a boolean"
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const pathFilter = (params.path as string) ?? null
|
||||
const staged = (params.staged as boolean) ?? false
|
||||
|
||||
try {
|
||||
const git = this.gitFactory(ctx.projectRoot)
|
||||
|
||||
const isRepo = await git.checkIsRepo()
|
||||
if (!isRepo) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"Not a git repository. Initialize with 'git init' first.",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const diffArgs = this.buildDiffArgs(staged, pathFilter)
|
||||
const diffSummary = await git.diffSummary(diffArgs)
|
||||
const diffText = await git.diff(diffArgs)
|
||||
|
||||
const files: DiffEntry[] = diffSummary.files.map((f) => ({
|
||||
file: f.file,
|
||||
insertions: "insertions" in f ? f.insertions : 0,
|
||||
deletions: "deletions" in f ? f.deletions : 0,
|
||||
binary: f.binary,
|
||||
}))
|
||||
|
||||
const result: GitDiffResult = {
|
||||
staged,
|
||||
pathFilter,
|
||||
hasChanges: diffSummary.files.length > 0,
|
||||
summary: {
|
||||
filesChanged: diffSummary.files.length,
|
||||
insertions: diffSummary.insertions,
|
||||
deletions: diffSummary.deletions,
|
||||
},
|
||||
files,
|
||||
diff: diffText,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build diff arguments array.
|
||||
*/
|
||||
private buildDiffArgs(staged: boolean, pathFilter: string | null): string[] {
|
||||
const args: string[] = []
|
||||
|
||||
if (staged) {
|
||||
args.push("--cached")
|
||||
}
|
||||
|
||||
if (pathFilter) {
|
||||
args.push("--", pathFilter)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
}
|
||||
129
packages/ipuaro/src/infrastructure/tools/git/GitStatusTool.ts
Normal file
129
packages/ipuaro/src/infrastructure/tools/git/GitStatusTool.ts
Normal file
@@ -0,0 +1,129 @@
|
||||
import { simpleGit, type SimpleGit, type StatusResult } from "simple-git"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* File status entry in git status.
|
||||
*/
|
||||
export interface FileStatusEntry {
|
||||
/** Relative file path */
|
||||
path: string
|
||||
/** Working directory status (modified, deleted, etc.) */
|
||||
workingDir: string
|
||||
/** Index/staging status */
|
||||
index: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from git_status tool.
|
||||
*/
|
||||
export interface GitStatusResult {
|
||||
/** Current branch name */
|
||||
branch: string
|
||||
/** Tracking branch (e.g., origin/main) */
|
||||
tracking: string | null
|
||||
/** Number of commits ahead of tracking */
|
||||
ahead: number
|
||||
/** Number of commits behind tracking */
|
||||
behind: number
|
||||
/** Files staged for commit */
|
||||
staged: FileStatusEntry[]
|
||||
/** Modified files not staged */
|
||||
modified: FileStatusEntry[]
|
||||
/** Untracked files */
|
||||
untracked: string[]
|
||||
/** Files with merge conflicts */
|
||||
conflicted: string[]
|
||||
/** Whether working directory is clean */
|
||||
isClean: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for getting git repository status.
|
||||
* Returns branch info, staged/modified/untracked files.
|
||||
*/
|
||||
export class GitStatusTool implements ITool {
|
||||
readonly name = "git_status"
|
||||
readonly description =
|
||||
"Get current git repository status. " +
|
||||
"Returns branch name, staged files, modified files, and untracked files."
|
||||
readonly parameters: ToolParameterSchema[] = []
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "git" as const
|
||||
|
||||
private readonly gitFactory: (basePath: string) => SimpleGit
|
||||
|
||||
constructor(gitFactory?: (basePath: string) => SimpleGit) {
|
||||
this.gitFactory = gitFactory ?? ((basePath: string) => simpleGit(basePath))
|
||||
}
|
||||
|
||||
validateParams(_params: Record<string, unknown>): string | null {
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(_params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
try {
|
||||
const git = this.gitFactory(ctx.projectRoot)
|
||||
|
||||
const isRepo = await git.checkIsRepo()
|
||||
if (!isRepo) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"Not a git repository. Initialize with 'git init' first.",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const status = await git.status()
|
||||
const result = this.formatStatus(status)
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format simple-git StatusResult into our result structure.
|
||||
*/
|
||||
private formatStatus(status: StatusResult): GitStatusResult {
|
||||
const staged: FileStatusEntry[] = []
|
||||
const modified: FileStatusEntry[] = []
|
||||
|
||||
for (const file of status.files) {
|
||||
const entry: FileStatusEntry = {
|
||||
path: file.path,
|
||||
workingDir: file.working_dir,
|
||||
index: file.index,
|
||||
}
|
||||
|
||||
if (file.index !== " " && file.index !== "?") {
|
||||
staged.push(entry)
|
||||
}
|
||||
|
||||
if (file.working_dir !== " " && file.working_dir !== "?") {
|
||||
modified.push(entry)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
branch: status.current ?? "HEAD (detached)",
|
||||
tracking: status.tracking ?? null,
|
||||
ahead: status.ahead,
|
||||
behind: status.behind,
|
||||
staged,
|
||||
modified,
|
||||
untracked: status.not_added,
|
||||
conflicted: status.conflicted,
|
||||
isClean: status.isClean(),
|
||||
}
|
||||
}
|
||||
}
|
||||
6
packages/ipuaro/src/infrastructure/tools/git/index.ts
Normal file
6
packages/ipuaro/src/infrastructure/tools/git/index.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
// Git tools exports
|
||||
export { GitStatusTool, type GitStatusResult, type FileStatusEntry } from "./GitStatusTool.js"
|
||||
|
||||
export { GitDiffTool, type GitDiffResult, type DiffEntry } from "./GitDiffTool.js"
|
||||
|
||||
export { GitCommitTool, type GitCommitResult, type CommitAuthor } from "./GitCommitTool.js"
|
||||
75
packages/ipuaro/src/infrastructure/tools/index.ts
Normal file
75
packages/ipuaro/src/infrastructure/tools/index.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
// Tools module exports
|
||||
export { ToolRegistry } from "./registry.js"
|
||||
|
||||
// Read tools
|
||||
export { GetLinesTool, type GetLinesResult } from "./read/GetLinesTool.js"
|
||||
export { GetFunctionTool, type GetFunctionResult } from "./read/GetFunctionTool.js"
|
||||
export { GetClassTool, type GetClassResult } from "./read/GetClassTool.js"
|
||||
export {
|
||||
GetStructureTool,
|
||||
type GetStructureResult,
|
||||
type TreeNode,
|
||||
} from "./read/GetStructureTool.js"
|
||||
|
||||
// Edit tools
|
||||
export { EditLinesTool, type EditLinesResult } from "./edit/EditLinesTool.js"
|
||||
export { CreateFileTool, type CreateFileResult } from "./edit/CreateFileTool.js"
|
||||
export { DeleteFileTool, type DeleteFileResult } from "./edit/DeleteFileTool.js"
|
||||
|
||||
// Search tools
|
||||
export {
|
||||
FindReferencesTool,
|
||||
type FindReferencesResult,
|
||||
type SymbolReference,
|
||||
} from "./search/FindReferencesTool.js"
|
||||
export {
|
||||
FindDefinitionTool,
|
||||
type FindDefinitionResult,
|
||||
type DefinitionLocation,
|
||||
} from "./search/FindDefinitionTool.js"
|
||||
|
||||
// Analysis tools
|
||||
export {
|
||||
GetDependenciesTool,
|
||||
type GetDependenciesResult,
|
||||
type DependencyEntry,
|
||||
} from "./analysis/GetDependenciesTool.js"
|
||||
|
||||
export {
|
||||
GetDependentsTool,
|
||||
type GetDependentsResult,
|
||||
type DependentEntry,
|
||||
} from "./analysis/GetDependentsTool.js"
|
||||
|
||||
export {
|
||||
GetComplexityTool,
|
||||
type GetComplexityResult,
|
||||
type ComplexityEntry,
|
||||
} from "./analysis/GetComplexityTool.js"
|
||||
|
||||
export {
|
||||
GetTodosTool,
|
||||
type GetTodosResult,
|
||||
type TodoEntry,
|
||||
type TodoType,
|
||||
} from "./analysis/GetTodosTool.js"
|
||||
|
||||
// Git tools
|
||||
export { GitStatusTool, type GitStatusResult, type FileStatusEntry } from "./git/GitStatusTool.js"
|
||||
|
||||
export { GitDiffTool, type GitDiffResult, type DiffEntry } from "./git/GitDiffTool.js"
|
||||
|
||||
export { GitCommitTool, type GitCommitResult, type CommitAuthor } from "./git/GitCommitTool.js"
|
||||
|
||||
// Run tools
|
||||
export {
|
||||
CommandSecurity,
|
||||
DEFAULT_BLACKLIST,
|
||||
DEFAULT_WHITELIST,
|
||||
type CommandClassification,
|
||||
type SecurityCheckResult,
|
||||
} from "./run/CommandSecurity.js"
|
||||
|
||||
export { RunCommandTool, type RunCommandResult } from "./run/RunCommandTool.js"
|
||||
|
||||
export { RunTestsTool, type RunTestsResult, type TestRunner } from "./run/RunTestsTool.js"
|
||||
166
packages/ipuaro/src/infrastructure/tools/read/GetClassTool.ts
Normal file
166
packages/ipuaro/src/infrastructure/tools/read/GetClassTool.ts
Normal file
@@ -0,0 +1,166 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import type { ClassInfo } from "../../../domain/value-objects/FileAST.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Result data from get_class tool.
|
||||
*/
|
||||
export interface GetClassResult {
|
||||
path: string
|
||||
name: string
|
||||
startLine: number
|
||||
endLine: number
|
||||
isExported: boolean
|
||||
isAbstract: boolean
|
||||
extends?: string
|
||||
implements: string[]
|
||||
methods: string[]
|
||||
properties: string[]
|
||||
content: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for retrieving a class's source code by name.
|
||||
* Uses AST to find exact line range.
|
||||
*/
|
||||
export class GetClassTool implements ITool {
|
||||
readonly name = "get_class"
|
||||
readonly description =
|
||||
"Get a class's source code by name. Uses AST to find exact line range. " +
|
||||
"Returns the class code with line numbers."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "name",
|
||||
type: "string",
|
||||
description: "Class name to retrieve",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "read" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
if (typeof params.name !== "string" || params.name.trim() === "") {
|
||||
return "Parameter 'name' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string
|
||||
const className = params.name as string
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const ast = await ctx.storage.getAST(relativePath)
|
||||
if (!ast) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`AST not found for "${relativePath}". File may not be indexed.`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const classInfo = this.findClass(ast.classes, className)
|
||||
if (!classInfo) {
|
||||
const available = ast.classes.map((c) => c.name).join(", ") || "none"
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Class "${className}" not found in "${relativePath}". Available: ${available}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const lines = await this.getFileLines(absolutePath, relativePath, ctx)
|
||||
const classLines = lines.slice(classInfo.lineStart - 1, classInfo.lineEnd)
|
||||
const content = this.formatLinesWithNumbers(classLines, classInfo.lineStart)
|
||||
|
||||
const result: GetClassResult = {
|
||||
path: relativePath,
|
||||
name: classInfo.name,
|
||||
startLine: classInfo.lineStart,
|
||||
endLine: classInfo.lineEnd,
|
||||
isExported: classInfo.isExported,
|
||||
isAbstract: classInfo.isAbstract,
|
||||
extends: classInfo.extends,
|
||||
implements: classInfo.implements,
|
||||
methods: classInfo.methods.map((m) => m.name),
|
||||
properties: classInfo.properties.map((p) => p.name),
|
||||
content,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find class by name in AST.
|
||||
*/
|
||||
private findClass(classes: ClassInfo[], name: string): ClassInfo | undefined {
|
||||
return classes.find((c) => c.name === name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file lines from storage or filesystem.
|
||||
*/
|
||||
private async getFileLines(
|
||||
absolutePath: string,
|
||||
relativePath: string,
|
||||
ctx: ToolContext,
|
||||
): Promise<string[]> {
|
||||
const fileData = await ctx.storage.getFile(relativePath)
|
||||
if (fileData) {
|
||||
return fileData.lines
|
||||
}
|
||||
|
||||
const content = await fs.readFile(absolutePath, "utf-8")
|
||||
return content.split("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format lines with line numbers.
|
||||
*/
|
||||
private formatLinesWithNumbers(lines: string[], startLine: number): string {
|
||||
const maxLineNum = startLine + lines.length - 1
|
||||
const padWidth = String(maxLineNum).length
|
||||
|
||||
return lines
|
||||
.map((line, index) => {
|
||||
const lineNum = String(startLine + index).padStart(padWidth, " ")
|
||||
return `${lineNum}│${line}`
|
||||
})
|
||||
.join("\n")
|
||||
}
|
||||
}
|
||||
162
packages/ipuaro/src/infrastructure/tools/read/GetFunctionTool.ts
Normal file
162
packages/ipuaro/src/infrastructure/tools/read/GetFunctionTool.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import type { FunctionInfo } from "../../../domain/value-objects/FileAST.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Result data from get_function tool.
|
||||
*/
|
||||
export interface GetFunctionResult {
|
||||
path: string
|
||||
name: string
|
||||
startLine: number
|
||||
endLine: number
|
||||
isAsync: boolean
|
||||
isExported: boolean
|
||||
params: string[]
|
||||
returnType?: string
|
||||
content: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for retrieving a function's source code by name.
|
||||
* Uses AST to find exact line range.
|
||||
*/
|
||||
export class GetFunctionTool implements ITool {
|
||||
readonly name = "get_function"
|
||||
readonly description =
|
||||
"Get a function's source code by name. Uses AST to find exact line range. " +
|
||||
"Returns the function code with line numbers."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "name",
|
||||
type: "string",
|
||||
description: "Function name to retrieve",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "read" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
if (typeof params.name !== "string" || params.name.trim() === "") {
|
||||
return "Parameter 'name' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string
|
||||
const functionName = params.name as string
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const ast = await ctx.storage.getAST(relativePath)
|
||||
if (!ast) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`AST not found for "${relativePath}". File may not be indexed.`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const functionInfo = this.findFunction(ast.functions, functionName)
|
||||
if (!functionInfo) {
|
||||
const available = ast.functions.map((f) => f.name).join(", ") || "none"
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Function "${functionName}" not found in "${relativePath}". Available: ${available}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const lines = await this.getFileLines(absolutePath, relativePath, ctx)
|
||||
const functionLines = lines.slice(functionInfo.lineStart - 1, functionInfo.lineEnd)
|
||||
const content = this.formatLinesWithNumbers(functionLines, functionInfo.lineStart)
|
||||
|
||||
const result: GetFunctionResult = {
|
||||
path: relativePath,
|
||||
name: functionInfo.name,
|
||||
startLine: functionInfo.lineStart,
|
||||
endLine: functionInfo.lineEnd,
|
||||
isAsync: functionInfo.isAsync,
|
||||
isExported: functionInfo.isExported,
|
||||
params: functionInfo.params.map((p) => p.name),
|
||||
returnType: functionInfo.returnType,
|
||||
content,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find function by name in AST.
|
||||
*/
|
||||
private findFunction(functions: FunctionInfo[], name: string): FunctionInfo | undefined {
|
||||
return functions.find((f) => f.name === name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file lines from storage or filesystem.
|
||||
*/
|
||||
private async getFileLines(
|
||||
absolutePath: string,
|
||||
relativePath: string,
|
||||
ctx: ToolContext,
|
||||
): Promise<string[]> {
|
||||
const fileData = await ctx.storage.getFile(relativePath)
|
||||
if (fileData) {
|
||||
return fileData.lines
|
||||
}
|
||||
|
||||
const content = await fs.readFile(absolutePath, "utf-8")
|
||||
return content.split("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format lines with line numbers.
|
||||
*/
|
||||
private formatLinesWithNumbers(lines: string[], startLine: number): string {
|
||||
const maxLineNum = startLine + lines.length - 1
|
||||
const padWidth = String(maxLineNum).length
|
||||
|
||||
return lines
|
||||
.map((line, index) => {
|
||||
const lineNum = String(startLine + index).padStart(padWidth, " ")
|
||||
return `${lineNum}│${line}`
|
||||
})
|
||||
.join("\n")
|
||||
}
|
||||
}
|
||||
159
packages/ipuaro/src/infrastructure/tools/read/GetLinesTool.ts
Normal file
159
packages/ipuaro/src/infrastructure/tools/read/GetLinesTool.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Result data from get_lines tool.
|
||||
*/
|
||||
export interface GetLinesResult {
|
||||
path: string
|
||||
startLine: number
|
||||
endLine: number
|
||||
totalLines: number
|
||||
content: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for reading specific lines from a file.
|
||||
* Returns content with line numbers.
|
||||
*/
|
||||
export class GetLinesTool implements ITool {
|
||||
readonly name = "get_lines"
|
||||
readonly description =
|
||||
"Get specific lines from a file. Returns the content with line numbers. " +
|
||||
"If no range is specified, returns the entire file."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "File path relative to project root",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "start",
|
||||
type: "number",
|
||||
description: "Start line number (1-based, inclusive)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "end",
|
||||
type: "number",
|
||||
description: "End line number (1-based, inclusive)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "read" as const
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.path !== "string" || params.path.trim() === "") {
|
||||
return "Parameter 'path' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
if (params.start !== undefined) {
|
||||
if (typeof params.start !== "number" || !Number.isInteger(params.start)) {
|
||||
return "Parameter 'start' must be an integer"
|
||||
}
|
||||
if (params.start < 1) {
|
||||
return "Parameter 'start' must be >= 1"
|
||||
}
|
||||
}
|
||||
|
||||
if (params.end !== undefined) {
|
||||
if (typeof params.end !== "number" || !Number.isInteger(params.end)) {
|
||||
return "Parameter 'end' must be an integer"
|
||||
}
|
||||
if (params.end < 1) {
|
||||
return "Parameter 'end' must be >= 1"
|
||||
}
|
||||
}
|
||||
|
||||
if (params.start !== undefined && params.end !== undefined && params.start > params.end) {
|
||||
return "Parameter 'start' must be <= 'end'"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = params.path as string
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const lines = await this.getFileLines(absolutePath, relativePath, ctx)
|
||||
const totalLines = lines.length
|
||||
|
||||
let startLine = (params.start as number | undefined) ?? 1
|
||||
let endLine = (params.end as number | undefined) ?? totalLines
|
||||
|
||||
startLine = Math.max(1, Math.min(startLine, totalLines))
|
||||
endLine = Math.max(startLine, Math.min(endLine, totalLines))
|
||||
|
||||
const selectedLines = lines.slice(startLine - 1, endLine)
|
||||
const content = this.formatLinesWithNumbers(selectedLines, startLine)
|
||||
|
||||
const result: GetLinesResult = {
|
||||
path: relativePath,
|
||||
startLine,
|
||||
endLine,
|
||||
totalLines,
|
||||
content,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file lines from storage or filesystem.
|
||||
*/
|
||||
private async getFileLines(
|
||||
absolutePath: string,
|
||||
relativePath: string,
|
||||
ctx: ToolContext,
|
||||
): Promise<string[]> {
|
||||
const fileData = await ctx.storage.getFile(relativePath)
|
||||
if (fileData) {
|
||||
return fileData.lines
|
||||
}
|
||||
|
||||
const content = await fs.readFile(absolutePath, "utf-8")
|
||||
return content.split("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Format lines with line numbers.
|
||||
* Example: " 1│const x = 1"
|
||||
*/
|
||||
private formatLinesWithNumbers(lines: string[], startLine: number): string {
|
||||
const maxLineNum = startLine + lines.length - 1
|
||||
const padWidth = String(maxLineNum).length
|
||||
|
||||
return lines
|
||||
.map((line, index) => {
|
||||
const lineNum = String(startLine + index).padStart(padWidth, " ")
|
||||
return `${lineNum}│${line}`
|
||||
})
|
||||
.join("\n")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { DEFAULT_IGNORE_PATTERNS } from "../../../domain/constants/index.js"
|
||||
import { PathValidator } from "../../security/PathValidator.js"
|
||||
|
||||
/**
|
||||
* Tree node representing a file or directory.
|
||||
*/
|
||||
export interface TreeNode {
|
||||
name: string
|
||||
type: "file" | "directory"
|
||||
children?: TreeNode[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from get_structure tool.
|
||||
*/
|
||||
export interface GetStructureResult {
|
||||
path: string
|
||||
tree: TreeNode
|
||||
content: string
|
||||
stats: {
|
||||
directories: number
|
||||
files: number
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for getting project directory structure as a tree.
|
||||
*/
|
||||
export class GetStructureTool implements ITool {
|
||||
readonly name = "get_structure"
|
||||
readonly description =
|
||||
"Get project directory structure as a tree. " +
|
||||
"If path is specified, shows structure of that subdirectory only."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Subdirectory path relative to project root (optional, defaults to root)",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "depth",
|
||||
type: "number",
|
||||
description: "Maximum depth to traverse (default: unlimited)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "read" as const
|
||||
|
||||
private readonly defaultIgnorePatterns = new Set([
|
||||
...DEFAULT_IGNORE_PATTERNS,
|
||||
".git",
|
||||
".idea",
|
||||
".vscode",
|
||||
"__pycache__",
|
||||
".pytest_cache",
|
||||
".nyc_output",
|
||||
"coverage",
|
||||
])
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.path !== undefined) {
|
||||
if (typeof params.path !== "string") {
|
||||
return "Parameter 'path' must be a string"
|
||||
}
|
||||
}
|
||||
|
||||
if (params.depth !== undefined) {
|
||||
if (typeof params.depth !== "number" || !Number.isInteger(params.depth)) {
|
||||
return "Parameter 'depth' must be an integer"
|
||||
}
|
||||
if (params.depth < 1) {
|
||||
return "Parameter 'depth' must be >= 1"
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const inputPath = (params.path as string | undefined) ?? "."
|
||||
const maxDepth = params.depth as number | undefined
|
||||
const pathValidator = new PathValidator(ctx.projectRoot)
|
||||
|
||||
let absolutePath: string
|
||||
let relativePath: string
|
||||
try {
|
||||
;[absolutePath, relativePath] = pathValidator.resolveOrThrow(inputPath)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
try {
|
||||
const stat = await fs.stat(absolutePath)
|
||||
if (!stat.isDirectory()) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Path "${relativePath}" is not a directory`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const stats = { directories: 0, files: 0 }
|
||||
const tree = await this.buildTree(absolutePath, maxDepth, 0, stats)
|
||||
const content = this.formatTree(tree)
|
||||
|
||||
const result: GetStructureResult = {
|
||||
path: relativePath || ".",
|
||||
tree,
|
||||
content,
|
||||
stats,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build tree structure recursively.
|
||||
*/
|
||||
private async buildTree(
|
||||
dirPath: string,
|
||||
maxDepth: number | undefined,
|
||||
currentDepth: number,
|
||||
stats: { directories: number; files: number },
|
||||
): Promise<TreeNode> {
|
||||
const name = path.basename(dirPath) || dirPath
|
||||
const node: TreeNode = { name, type: "directory", children: [] }
|
||||
stats.directories++
|
||||
|
||||
if (maxDepth !== undefined && currentDepth >= maxDepth) {
|
||||
return node
|
||||
}
|
||||
|
||||
const entries = await fs.readdir(dirPath, { withFileTypes: true })
|
||||
const sortedEntries = entries
|
||||
.filter((e) => !this.shouldIgnore(e.name))
|
||||
.sort((a, b) => {
|
||||
if (a.isDirectory() && !b.isDirectory()) {
|
||||
return -1
|
||||
}
|
||||
if (!a.isDirectory() && b.isDirectory()) {
|
||||
return 1
|
||||
}
|
||||
return a.name.localeCompare(b.name)
|
||||
})
|
||||
|
||||
for (const entry of sortedEntries) {
|
||||
const entryPath = path.join(dirPath, entry.name)
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
const childNode = await this.buildTree(entryPath, maxDepth, currentDepth + 1, stats)
|
||||
node.children?.push(childNode)
|
||||
} else if (entry.isFile()) {
|
||||
node.children?.push({ name: entry.name, type: "file" })
|
||||
stats.files++
|
||||
}
|
||||
}
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if entry should be ignored.
|
||||
*/
|
||||
private shouldIgnore(name: string): boolean {
|
||||
return this.defaultIgnorePatterns.has(name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Format tree as ASCII art.
|
||||
*/
|
||||
private formatTree(node: TreeNode, prefix = "", isLast = true): string {
|
||||
const lines: string[] = []
|
||||
const connector = isLast ? "└── " : "├── "
|
||||
const icon = node.type === "directory" ? "📁 " : "📄 "
|
||||
|
||||
lines.push(`${prefix}${connector}${icon}${node.name}`)
|
||||
|
||||
if (node.children) {
|
||||
const childPrefix = prefix + (isLast ? " " : "│ ")
|
||||
const childCount = node.children.length
|
||||
node.children.forEach((child, index) => {
|
||||
const childIsLast = index === childCount - 1
|
||||
lines.push(this.formatTree(child, childPrefix, childIsLast))
|
||||
})
|
||||
}
|
||||
|
||||
return lines.join("\n")
|
||||
}
|
||||
}
|
||||
190
packages/ipuaro/src/infrastructure/tools/registry.ts
Normal file
190
packages/ipuaro/src/infrastructure/tools/registry.ts
Normal file
@@ -0,0 +1,190 @@
|
||||
import type { IToolRegistry } from "../../application/interfaces/IToolRegistry.js"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../domain/services/ITool.js"
|
||||
import { createErrorResult, type ToolResult } from "../../domain/value-objects/ToolResult.js"
|
||||
import { IpuaroError } from "../../shared/errors/IpuaroError.js"
|
||||
|
||||
/**
|
||||
* Tool registry implementation.
|
||||
* Manages registration and execution of tools.
|
||||
*/
|
||||
export class ToolRegistry implements IToolRegistry {
|
||||
private readonly tools = new Map<string, ITool>()
|
||||
|
||||
/**
|
||||
* Register a tool.
|
||||
* @throws IpuaroError if tool with same name already registered
|
||||
*/
|
||||
register(tool: ITool): void {
|
||||
if (this.tools.has(tool.name)) {
|
||||
throw new IpuaroError(
|
||||
"validation",
|
||||
`Tool "${tool.name}" is already registered`,
|
||||
true,
|
||||
"Use a different tool name or unregister the existing tool first",
|
||||
)
|
||||
}
|
||||
this.tools.set(tool.name, tool)
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a tool by name.
|
||||
* @returns true if tool was removed, false if not found
|
||||
*/
|
||||
unregister(name: string): boolean {
|
||||
return this.tools.delete(name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool by name.
|
||||
*/
|
||||
get(name: string): ITool | undefined {
|
||||
return this.tools.get(name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered tools.
|
||||
*/
|
||||
getAll(): ITool[] {
|
||||
return Array.from(this.tools.values())
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tools by category.
|
||||
*/
|
||||
getByCategory(category: ITool["category"]): ITool[] {
|
||||
return this.getAll().filter((tool) => tool.category === category)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if tool exists.
|
||||
*/
|
||||
has(name: string): boolean {
|
||||
return this.tools.has(name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get number of registered tools.
|
||||
*/
|
||||
get size(): number {
|
||||
return this.tools.size
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute tool by name.
|
||||
* @throws IpuaroError if tool not found
|
||||
*/
|
||||
async execute(
|
||||
name: string,
|
||||
params: Record<string, unknown>,
|
||||
ctx: ToolContext,
|
||||
): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${name}-${String(startTime)}`
|
||||
|
||||
const tool = this.tools.get(name)
|
||||
if (!tool) {
|
||||
return createErrorResult(callId, `Tool "${name}" not found`, Date.now() - startTime)
|
||||
}
|
||||
|
||||
const validationError = tool.validateParams(params)
|
||||
if (validationError) {
|
||||
return createErrorResult(callId, validationError, Date.now() - startTime)
|
||||
}
|
||||
|
||||
if (tool.requiresConfirmation) {
|
||||
const confirmed = await ctx.requestConfirmation(
|
||||
`Execute "${name}" with params: ${JSON.stringify(params)}`,
|
||||
)
|
||||
if (!confirmed) {
|
||||
return createErrorResult(callId, "User cancelled operation", Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await tool.execute(params, ctx)
|
||||
return {
|
||||
...result,
|
||||
callId,
|
||||
}
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool definitions for LLM.
|
||||
* Converts ITool[] to LLM-compatible format.
|
||||
*/
|
||||
getToolDefinitions(): {
|
||||
name: string
|
||||
description: string
|
||||
parameters: {
|
||||
type: "object"
|
||||
properties: Record<string, { type: string; description: string }>
|
||||
required: string[]
|
||||
}
|
||||
}[] {
|
||||
return this.getAll().map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: this.convertParametersToSchema(tool.parameters),
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert tool parameters to JSON Schema format.
|
||||
*/
|
||||
private convertParametersToSchema(params: ToolParameterSchema[]): {
|
||||
type: "object"
|
||||
properties: Record<string, { type: string; description: string }>
|
||||
required: string[]
|
||||
} {
|
||||
const properties: Record<string, { type: string; description: string }> = {}
|
||||
const required: string[] = []
|
||||
|
||||
for (const param of params) {
|
||||
properties[param.name] = {
|
||||
type: param.type,
|
||||
description: param.description,
|
||||
}
|
||||
if (param.required) {
|
||||
required.push(param.name)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
type: "object",
|
||||
properties,
|
||||
required,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all registered tools.
|
||||
*/
|
||||
clear(): void {
|
||||
this.tools.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tool names.
|
||||
*/
|
||||
getNames(): string[] {
|
||||
return Array.from(this.tools.keys())
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tools that require confirmation.
|
||||
*/
|
||||
getConfirmationTools(): ITool[] {
|
||||
return this.getAll().filter((tool) => tool.requiresConfirmation)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tools that don't require confirmation.
|
||||
*/
|
||||
getSafeTools(): ITool[] {
|
||||
return this.getAll().filter((tool) => !tool.requiresConfirmation)
|
||||
}
|
||||
}
|
||||
257
packages/ipuaro/src/infrastructure/tools/run/CommandSecurity.ts
Normal file
257
packages/ipuaro/src/infrastructure/tools/run/CommandSecurity.ts
Normal file
@@ -0,0 +1,257 @@
|
||||
/**
|
||||
* Command security classification.
|
||||
*/
|
||||
export type CommandClassification = "allowed" | "blocked" | "requires_confirmation"
|
||||
|
||||
/**
|
||||
* Result of command security check.
|
||||
*/
|
||||
export interface SecurityCheckResult {
|
||||
/** Classification of the command */
|
||||
classification: CommandClassification
|
||||
/** Reason for the classification */
|
||||
reason: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Dangerous commands that are always blocked.
|
||||
* These commands can cause data loss or security issues.
|
||||
*/
|
||||
export const DEFAULT_BLACKLIST: string[] = [
|
||||
// Destructive file operations
|
||||
"rm -rf",
|
||||
"rm -r",
|
||||
"rm -fr",
|
||||
"rmdir",
|
||||
// Dangerous git operations
|
||||
"git push --force",
|
||||
"git push -f",
|
||||
"git reset --hard",
|
||||
"git clean -fd",
|
||||
"git clean -f",
|
||||
// Publishing/deployment
|
||||
"npm publish",
|
||||
"yarn publish",
|
||||
"pnpm publish",
|
||||
// System commands
|
||||
"sudo",
|
||||
"su ",
|
||||
"chmod",
|
||||
"chown",
|
||||
// Network/download commands that could be dangerous
|
||||
"| sh",
|
||||
"| bash",
|
||||
// Environment manipulation
|
||||
"export ",
|
||||
"unset ",
|
||||
// Process control
|
||||
"kill -9",
|
||||
"killall",
|
||||
"pkill",
|
||||
// Disk operations (require exact command start)
|
||||
"mkfs",
|
||||
"fdisk",
|
||||
// Other dangerous
|
||||
":(){ :|:& };:",
|
||||
"eval ",
|
||||
]
|
||||
|
||||
/**
|
||||
* Safe commands that don't require confirmation.
|
||||
* Matched by first word (command name).
|
||||
*/
|
||||
export const DEFAULT_WHITELIST: string[] = [
|
||||
// Package managers
|
||||
"npm",
|
||||
"pnpm",
|
||||
"yarn",
|
||||
"npx",
|
||||
"bun",
|
||||
// Node.js
|
||||
"node",
|
||||
"tsx",
|
||||
"ts-node",
|
||||
// Git (read operations)
|
||||
"git",
|
||||
// Build tools
|
||||
"tsc",
|
||||
"tsup",
|
||||
"esbuild",
|
||||
"vite",
|
||||
"webpack",
|
||||
"rollup",
|
||||
// Testing
|
||||
"vitest",
|
||||
"jest",
|
||||
"mocha",
|
||||
"playwright",
|
||||
"cypress",
|
||||
// Linting/formatting
|
||||
"eslint",
|
||||
"prettier",
|
||||
"biome",
|
||||
// Utilities
|
||||
"echo",
|
||||
"cat",
|
||||
"ls",
|
||||
"pwd",
|
||||
"which",
|
||||
"head",
|
||||
"tail",
|
||||
"grep",
|
||||
"find",
|
||||
"wc",
|
||||
"sort",
|
||||
"diff",
|
||||
]
|
||||
|
||||
/**
|
||||
* Git subcommands that are safe and don't need confirmation.
|
||||
*/
|
||||
const SAFE_GIT_SUBCOMMANDS: string[] = [
|
||||
"status",
|
||||
"log",
|
||||
"diff",
|
||||
"show",
|
||||
"branch",
|
||||
"remote",
|
||||
"fetch",
|
||||
"pull",
|
||||
"stash",
|
||||
"tag",
|
||||
"blame",
|
||||
"ls-files",
|
||||
"ls-tree",
|
||||
"rev-parse",
|
||||
"describe",
|
||||
]
|
||||
|
||||
/**
|
||||
* Command security checker.
|
||||
* Determines if a command is safe to execute, blocked, or requires confirmation.
|
||||
*/
|
||||
export class CommandSecurity {
|
||||
private readonly blacklist: string[]
|
||||
private readonly whitelist: string[]
|
||||
|
||||
constructor(blacklist: string[] = DEFAULT_BLACKLIST, whitelist: string[] = DEFAULT_WHITELIST) {
|
||||
this.blacklist = blacklist.map((cmd) => cmd.toLowerCase())
|
||||
this.whitelist = whitelist.map((cmd) => cmd.toLowerCase())
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a command is safe to execute.
|
||||
*/
|
||||
check(command: string): SecurityCheckResult {
|
||||
const normalized = command.trim().toLowerCase()
|
||||
|
||||
const blacklistMatch = this.isBlacklisted(normalized)
|
||||
if (blacklistMatch) {
|
||||
return {
|
||||
classification: "blocked",
|
||||
reason: `Command contains blocked pattern: '${blacklistMatch}'`,
|
||||
}
|
||||
}
|
||||
|
||||
if (this.isWhitelisted(normalized)) {
|
||||
return {
|
||||
classification: "allowed",
|
||||
reason: "Command is in the whitelist",
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
classification: "requires_confirmation",
|
||||
reason: "Command is not in the whitelist and requires user confirmation",
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if command matches any blacklist pattern.
|
||||
* Returns the matched pattern or null.
|
||||
*/
|
||||
private isBlacklisted(command: string): string | null {
|
||||
for (const pattern of this.blacklist) {
|
||||
if (command.includes(pattern)) {
|
||||
return pattern
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if command's first word is in the whitelist.
|
||||
*/
|
||||
private isWhitelisted(command: string): boolean {
|
||||
const firstWord = this.getFirstWord(command)
|
||||
|
||||
if (!this.whitelist.includes(firstWord)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (firstWord === "git") {
|
||||
return this.isGitCommandSafe(command)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if git command is safe (read-only operations).
|
||||
*/
|
||||
private isGitCommandSafe(command: string): boolean {
|
||||
const parts = command.split(/\s+/)
|
||||
if (parts.length < 2) {
|
||||
return false
|
||||
}
|
||||
|
||||
const subcommand = parts[1]
|
||||
return SAFE_GIT_SUBCOMMANDS.includes(subcommand)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get first word from command.
|
||||
*/
|
||||
private getFirstWord(command: string): string {
|
||||
const match = /^(\S+)/.exec(command)
|
||||
return match ? match[1] : ""
|
||||
}
|
||||
|
||||
/**
|
||||
* Add patterns to the blacklist.
|
||||
*/
|
||||
addToBlacklist(patterns: string[]): void {
|
||||
for (const pattern of patterns) {
|
||||
const normalized = pattern.toLowerCase()
|
||||
if (!this.blacklist.includes(normalized)) {
|
||||
this.blacklist.push(normalized)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add commands to the whitelist.
|
||||
*/
|
||||
addToWhitelist(commands: string[]): void {
|
||||
for (const cmd of commands) {
|
||||
const normalized = cmd.toLowerCase()
|
||||
if (!this.whitelist.includes(normalized)) {
|
||||
this.whitelist.push(normalized)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current blacklist.
|
||||
*/
|
||||
getBlacklist(): string[] {
|
||||
return [...this.blacklist]
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current whitelist.
|
||||
*/
|
||||
getWhitelist(): string[] {
|
||||
return [...this.whitelist]
|
||||
}
|
||||
}
|
||||
227
packages/ipuaro/src/infrastructure/tools/run/RunCommandTool.ts
Normal file
227
packages/ipuaro/src/infrastructure/tools/run/RunCommandTool.ts
Normal file
@@ -0,0 +1,227 @@
|
||||
import { exec } from "node:child_process"
|
||||
import { promisify } from "node:util"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
import { CommandSecurity } from "./CommandSecurity.js"
|
||||
|
||||
const execAsync = promisify(exec)
|
||||
|
||||
/**
|
||||
* Result data from run_command tool.
|
||||
*/
|
||||
export interface RunCommandResult {
|
||||
/** The command that was executed */
|
||||
command: string
|
||||
/** Exit code (0 = success) */
|
||||
exitCode: number
|
||||
/** Standard output */
|
||||
stdout: string
|
||||
/** Standard error output */
|
||||
stderr: string
|
||||
/** Whether command was successful (exit code 0) */
|
||||
success: boolean
|
||||
/** Execution time in milliseconds */
|
||||
durationMs: number
|
||||
/** Whether user confirmation was required */
|
||||
requiredConfirmation: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Default command timeout in milliseconds.
|
||||
*/
|
||||
const DEFAULT_TIMEOUT = 30000
|
||||
|
||||
/**
|
||||
* Maximum output size in characters.
|
||||
*/
|
||||
const MAX_OUTPUT_SIZE = 100000
|
||||
|
||||
/**
|
||||
* Tool for executing shell commands.
|
||||
* Commands are checked against blacklist/whitelist for security.
|
||||
*/
|
||||
export class RunCommandTool implements ITool {
|
||||
readonly name = "run_command"
|
||||
readonly description =
|
||||
"Execute a shell command in the project directory. " +
|
||||
"Commands are checked against blacklist/whitelist for security. " +
|
||||
"Unknown commands require user confirmation."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "command",
|
||||
type: "string",
|
||||
description: "Shell command to execute",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "timeout",
|
||||
type: "number",
|
||||
description: "Timeout in milliseconds (default: 30000)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "run" as const
|
||||
|
||||
private readonly security: CommandSecurity
|
||||
private readonly execFn: typeof execAsync
|
||||
|
||||
constructor(security?: CommandSecurity, execFn?: typeof execAsync) {
|
||||
this.security = security ?? new CommandSecurity()
|
||||
this.execFn = execFn ?? execAsync
|
||||
}
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.command === undefined) {
|
||||
return "Parameter 'command' is required"
|
||||
}
|
||||
if (typeof params.command !== "string") {
|
||||
return "Parameter 'command' must be a string"
|
||||
}
|
||||
if (params.command.trim() === "") {
|
||||
return "Parameter 'command' cannot be empty"
|
||||
}
|
||||
if (params.timeout !== undefined) {
|
||||
if (typeof params.timeout !== "number") {
|
||||
return "Parameter 'timeout' must be a number"
|
||||
}
|
||||
if (params.timeout <= 0) {
|
||||
return "Parameter 'timeout' must be positive"
|
||||
}
|
||||
if (params.timeout > 600000) {
|
||||
return "Parameter 'timeout' cannot exceed 600000ms (10 minutes)"
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const command = params.command as string
|
||||
const timeout = (params.timeout as number) ?? DEFAULT_TIMEOUT
|
||||
|
||||
const securityCheck = this.security.check(command)
|
||||
|
||||
if (securityCheck.classification === "blocked") {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Command blocked for security: ${securityCheck.reason}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
let requiredConfirmation = false
|
||||
|
||||
if (securityCheck.classification === "requires_confirmation") {
|
||||
requiredConfirmation = true
|
||||
const confirmed = await ctx.requestConfirmation(
|
||||
`Execute command: ${command}\n\nReason: ${securityCheck.reason}`,
|
||||
)
|
||||
|
||||
if (!confirmed) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"Command execution cancelled by user",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const execStartTime = Date.now()
|
||||
|
||||
const { stdout, stderr } = await this.execFn(command, {
|
||||
cwd: ctx.projectRoot,
|
||||
timeout,
|
||||
maxBuffer: MAX_OUTPUT_SIZE,
|
||||
env: { ...process.env, FORCE_COLOR: "0" },
|
||||
})
|
||||
|
||||
const durationMs = Date.now() - execStartTime
|
||||
|
||||
const result: RunCommandResult = {
|
||||
command,
|
||||
exitCode: 0,
|
||||
stdout: this.truncateOutput(stdout),
|
||||
stderr: this.truncateOutput(stderr),
|
||||
success: true,
|
||||
durationMs,
|
||||
requiredConfirmation,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
return this.handleExecError(callId, command, error, requiredConfirmation, startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle exec errors and return appropriate result.
|
||||
*/
|
||||
private handleExecError(
|
||||
callId: string,
|
||||
command: string,
|
||||
error: unknown,
|
||||
requiredConfirmation: boolean,
|
||||
startTime: number,
|
||||
): ToolResult {
|
||||
if (this.isExecError(error)) {
|
||||
const result: RunCommandResult = {
|
||||
command,
|
||||
exitCode: error.code ?? 1,
|
||||
stdout: this.truncateOutput(error.stdout ?? ""),
|
||||
stderr: this.truncateOutput(error.stderr ?? error.message),
|
||||
success: false,
|
||||
durationMs: Date.now() - startTime,
|
||||
requiredConfirmation,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
if (error.message.includes("ETIMEDOUT") || error.message.includes("timed out")) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Command timed out: ${command}`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
return createErrorResult(callId, error.message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
return createErrorResult(callId, String(error), Date.now() - startTime)
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard for exec error.
|
||||
*/
|
||||
private isExecError(
|
||||
error: unknown,
|
||||
): error is Error & { code?: number; stdout?: string; stderr?: string } {
|
||||
return error instanceof Error && "code" in error
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate output if too large.
|
||||
*/
|
||||
private truncateOutput(output: string): string {
|
||||
if (output.length <= MAX_OUTPUT_SIZE) {
|
||||
return output
|
||||
}
|
||||
return `${output.slice(0, MAX_OUTPUT_SIZE)}\n... (output truncated)`
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the security checker instance.
|
||||
*/
|
||||
getSecurity(): CommandSecurity {
|
||||
return this.security
|
||||
}
|
||||
}
|
||||
365
packages/ipuaro/src/infrastructure/tools/run/RunTestsTool.ts
Normal file
365
packages/ipuaro/src/infrastructure/tools/run/RunTestsTool.ts
Normal file
@@ -0,0 +1,365 @@
|
||||
import { exec } from "node:child_process"
|
||||
import { promisify } from "node:util"
|
||||
import * as path from "node:path"
|
||||
import * as fs from "node:fs/promises"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
const execAsync = promisify(exec)
|
||||
|
||||
/**
|
||||
* Supported test runners.
|
||||
*/
|
||||
export type TestRunner = "vitest" | "jest" | "mocha" | "npm"
|
||||
|
||||
/**
|
||||
* Result data from run_tests tool.
|
||||
*/
|
||||
export interface RunTestsResult {
|
||||
/** Test runner that was used */
|
||||
runner: TestRunner
|
||||
/** Command that was executed */
|
||||
command: string
|
||||
/** Whether all tests passed */
|
||||
passed: boolean
|
||||
/** Exit code */
|
||||
exitCode: number
|
||||
/** Standard output */
|
||||
stdout: string
|
||||
/** Standard error output */
|
||||
stderr: string
|
||||
/** Execution time in milliseconds */
|
||||
durationMs: number
|
||||
}
|
||||
|
||||
/**
|
||||
* Default test timeout in milliseconds (5 minutes).
|
||||
*/
|
||||
const DEFAULT_TIMEOUT = 300000
|
||||
|
||||
/**
|
||||
* Maximum output size in characters.
|
||||
*/
|
||||
const MAX_OUTPUT_SIZE = 200000
|
||||
|
||||
/**
|
||||
* Tool for running project tests.
|
||||
* Auto-detects test runner (vitest, jest, mocha, npm test).
|
||||
*/
|
||||
export class RunTestsTool implements ITool {
|
||||
readonly name = "run_tests"
|
||||
readonly description =
|
||||
"Run the project's test suite. Auto-detects test runner (vitest, jest, npm test). " +
|
||||
"Returns test results summary."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Run tests for specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "filter",
|
||||
type: "string",
|
||||
description: "Filter tests by name pattern",
|
||||
required: false,
|
||||
},
|
||||
{
|
||||
name: "watch",
|
||||
type: "boolean",
|
||||
description: "Run in watch mode (default: false)",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "run" as const
|
||||
|
||||
private readonly execFn: typeof execAsync
|
||||
private readonly fsAccess: typeof fs.access
|
||||
private readonly fsReadFile: typeof fs.readFile
|
||||
|
||||
constructor(
|
||||
execFn?: typeof execAsync,
|
||||
fsAccess?: typeof fs.access,
|
||||
fsReadFile?: typeof fs.readFile,
|
||||
) {
|
||||
this.execFn = execFn ?? execAsync
|
||||
this.fsAccess = fsAccess ?? fs.access
|
||||
this.fsReadFile = fsReadFile ?? fs.readFile
|
||||
}
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (params.path !== undefined && typeof params.path !== "string") {
|
||||
return "Parameter 'path' must be a string"
|
||||
}
|
||||
if (params.filter !== undefined && typeof params.filter !== "string") {
|
||||
return "Parameter 'filter' must be a string"
|
||||
}
|
||||
if (params.watch !== undefined && typeof params.watch !== "boolean") {
|
||||
return "Parameter 'watch' must be a boolean"
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const testPath = params.path as string | undefined
|
||||
const filter = params.filter as string | undefined
|
||||
const watch = (params.watch as boolean) ?? false
|
||||
|
||||
try {
|
||||
const runner = await this.detectTestRunner(ctx.projectRoot)
|
||||
|
||||
if (!runner) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
"No test runner detected. Ensure vitest, jest, or mocha is installed, or 'test' script exists in package.json.",
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const command = this.buildCommand(runner, testPath, filter, watch)
|
||||
const execStartTime = Date.now()
|
||||
|
||||
try {
|
||||
const { stdout, stderr } = await this.execFn(command, {
|
||||
cwd: ctx.projectRoot,
|
||||
timeout: DEFAULT_TIMEOUT,
|
||||
maxBuffer: MAX_OUTPUT_SIZE,
|
||||
env: { ...process.env, FORCE_COLOR: "0", CI: "true" },
|
||||
})
|
||||
|
||||
const durationMs = Date.now() - execStartTime
|
||||
|
||||
const result: RunTestsResult = {
|
||||
runner,
|
||||
command,
|
||||
passed: true,
|
||||
exitCode: 0,
|
||||
stdout: this.truncateOutput(stdout),
|
||||
stderr: this.truncateOutput(stderr),
|
||||
durationMs,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
return this.handleExecError(
|
||||
{ callId, runner, command, startTime },
|
||||
error,
|
||||
execStartTime,
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect which test runner is available in the project.
|
||||
*/
|
||||
async detectTestRunner(projectRoot: string): Promise<TestRunner | null> {
|
||||
const configRunner = await this.detectByConfigFile(projectRoot)
|
||||
if (configRunner) {
|
||||
return configRunner
|
||||
}
|
||||
|
||||
return this.detectByPackageJson(projectRoot)
|
||||
}
|
||||
|
||||
private async detectByConfigFile(projectRoot: string): Promise<TestRunner | null> {
|
||||
const configFiles: { files: string[]; runner: TestRunner }[] = [
|
||||
{
|
||||
files: ["vitest.config.ts", "vitest.config.js", "vitest.config.mts"],
|
||||
runner: "vitest",
|
||||
},
|
||||
{
|
||||
files: ["jest.config.js", "jest.config.ts", "jest.config.json"],
|
||||
runner: "jest",
|
||||
},
|
||||
]
|
||||
|
||||
for (const { files, runner } of configFiles) {
|
||||
for (const file of files) {
|
||||
if (await this.hasFile(projectRoot, file)) {
|
||||
return runner
|
||||
}
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
private async detectByPackageJson(projectRoot: string): Promise<TestRunner | null> {
|
||||
const packageJsonPath = path.join(projectRoot, "package.json")
|
||||
try {
|
||||
const content = await this.fsReadFile(packageJsonPath, "utf-8")
|
||||
const pkg = JSON.parse(content) as {
|
||||
scripts?: Record<string, string>
|
||||
devDependencies?: Record<string, string>
|
||||
dependencies?: Record<string, string>
|
||||
}
|
||||
|
||||
const deps = { ...pkg.devDependencies, ...pkg.dependencies }
|
||||
if (deps.vitest) {
|
||||
return "vitest"
|
||||
}
|
||||
if (deps.jest) {
|
||||
return "jest"
|
||||
}
|
||||
if (deps.mocha) {
|
||||
return "mocha"
|
||||
}
|
||||
if (pkg.scripts?.test) {
|
||||
return "npm"
|
||||
}
|
||||
} catch {
|
||||
// package.json doesn't exist or is invalid
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the test command based on runner and options.
|
||||
*/
|
||||
buildCommand(runner: TestRunner, testPath?: string, filter?: string, watch?: boolean): string {
|
||||
const builders: Record<TestRunner, () => string[]> = {
|
||||
vitest: () => this.buildVitestCommand(testPath, filter, watch),
|
||||
jest: () => this.buildJestCommand(testPath, filter, watch),
|
||||
mocha: () => this.buildMochaCommand(testPath, filter, watch),
|
||||
npm: () => this.buildNpmCommand(testPath, filter),
|
||||
}
|
||||
return builders[runner]().join(" ")
|
||||
}
|
||||
|
||||
private buildVitestCommand(testPath?: string, filter?: string, watch?: boolean): string[] {
|
||||
const parts = ["npx vitest"]
|
||||
if (!watch) {
|
||||
parts.push("run")
|
||||
}
|
||||
if (testPath) {
|
||||
parts.push(testPath)
|
||||
}
|
||||
if (filter) {
|
||||
parts.push("-t", `"${filter}"`)
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
private buildJestCommand(testPath?: string, filter?: string, watch?: boolean): string[] {
|
||||
const parts = ["npx jest"]
|
||||
if (testPath) {
|
||||
parts.push(testPath)
|
||||
}
|
||||
if (filter) {
|
||||
parts.push("-t", `"${filter}"`)
|
||||
}
|
||||
if (watch) {
|
||||
parts.push("--watch")
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
private buildMochaCommand(testPath?: string, filter?: string, watch?: boolean): string[] {
|
||||
const parts = ["npx mocha"]
|
||||
if (testPath) {
|
||||
parts.push(testPath)
|
||||
}
|
||||
if (filter) {
|
||||
parts.push("--grep", `"${filter}"`)
|
||||
}
|
||||
if (watch) {
|
||||
parts.push("--watch")
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
private buildNpmCommand(testPath?: string, filter?: string): string[] {
|
||||
const parts = ["npm test"]
|
||||
if (testPath || filter) {
|
||||
parts.push("--")
|
||||
if (testPath) {
|
||||
parts.push(testPath)
|
||||
}
|
||||
if (filter) {
|
||||
parts.push(`"${filter}"`)
|
||||
}
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a file exists.
|
||||
*/
|
||||
private async hasFile(projectRoot: string, filename: string): Promise<boolean> {
|
||||
try {
|
||||
await this.fsAccess(path.join(projectRoot, filename))
|
||||
return true
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle exec errors and return appropriate result.
|
||||
*/
|
||||
private handleExecError(
|
||||
ctx: { callId: string; runner: TestRunner; command: string; startTime: number },
|
||||
error: unknown,
|
||||
execStartTime: number,
|
||||
): ToolResult {
|
||||
const { callId, runner, command, startTime } = ctx
|
||||
const durationMs = Date.now() - execStartTime
|
||||
|
||||
if (this.isExecError(error)) {
|
||||
const result: RunTestsResult = {
|
||||
runner,
|
||||
command,
|
||||
passed: false,
|
||||
exitCode: error.code ?? 1,
|
||||
stdout: this.truncateOutput(error.stdout ?? ""),
|
||||
stderr: this.truncateOutput(error.stderr ?? error.message),
|
||||
durationMs,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
if (error.message.includes("ETIMEDOUT") || error.message.includes("timed out")) {
|
||||
return createErrorResult(
|
||||
callId,
|
||||
`Tests timed out after ${String(DEFAULT_TIMEOUT / 1000)} seconds`,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
return createErrorResult(callId, error.message, Date.now() - startTime)
|
||||
}
|
||||
|
||||
return createErrorResult(callId, String(error), Date.now() - startTime)
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard for exec error.
|
||||
*/
|
||||
private isExecError(
|
||||
error: unknown,
|
||||
): error is Error & { code?: number; stdout?: string; stderr?: string } {
|
||||
return error instanceof Error && "code" in error
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate output if too large.
|
||||
*/
|
||||
private truncateOutput(output: string): string {
|
||||
if (output.length <= MAX_OUTPUT_SIZE) {
|
||||
return output
|
||||
}
|
||||
return `${output.slice(0, MAX_OUTPUT_SIZE)}\n... (output truncated)`
|
||||
}
|
||||
}
|
||||
12
packages/ipuaro/src/infrastructure/tools/run/index.ts
Normal file
12
packages/ipuaro/src/infrastructure/tools/run/index.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
// Run tools exports
|
||||
export {
|
||||
CommandSecurity,
|
||||
DEFAULT_BLACKLIST,
|
||||
DEFAULT_WHITELIST,
|
||||
type CommandClassification,
|
||||
type SecurityCheckResult,
|
||||
} from "./CommandSecurity.js"
|
||||
|
||||
export { RunCommandTool, type RunCommandResult } from "./RunCommandTool.js"
|
||||
|
||||
export { RunTestsTool, type RunTestsResult, type TestRunner } from "./RunTestsTool.js"
|
||||
@@ -0,0 +1,221 @@
|
||||
import { promises as fs } from "node:fs"
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import type { SymbolLocation } from "../../../domain/services/IStorage.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* A single definition location with context.
|
||||
*/
|
||||
export interface DefinitionLocation {
|
||||
path: string
|
||||
line: number
|
||||
type: SymbolLocation["type"]
|
||||
context: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from find_definition tool.
|
||||
*/
|
||||
export interface FindDefinitionResult {
|
||||
symbol: string
|
||||
found: boolean
|
||||
definitions: DefinitionLocation[]
|
||||
suggestions?: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for finding where a symbol is defined.
|
||||
* Uses the SymbolIndex to locate definitions.
|
||||
*/
|
||||
export class FindDefinitionTool implements ITool {
|
||||
readonly name = "find_definition"
|
||||
readonly description =
|
||||
"Find where a symbol is defined. " + "Returns file path, line number, and symbol type."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "symbol",
|
||||
type: "string",
|
||||
description: "Symbol name to find definition for",
|
||||
required: true,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "search" as const
|
||||
|
||||
private readonly contextLines = 2
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.symbol !== "string" || params.symbol.trim() === "") {
|
||||
return "Parameter 'symbol' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const symbol = (params.symbol as string).trim()
|
||||
|
||||
try {
|
||||
const symbolIndex = await ctx.storage.getSymbolIndex()
|
||||
const locations = symbolIndex.get(symbol)
|
||||
|
||||
if (!locations || locations.length === 0) {
|
||||
const suggestions = this.findSimilarSymbols(symbol, symbolIndex)
|
||||
return createSuccessResult(
|
||||
callId,
|
||||
{
|
||||
symbol,
|
||||
found: false,
|
||||
definitions: [],
|
||||
suggestions: suggestions.length > 0 ? suggestions : undefined,
|
||||
} satisfies FindDefinitionResult,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const definitions: DefinitionLocation[] = []
|
||||
for (const loc of locations) {
|
||||
const context = await this.getContext(loc, ctx)
|
||||
definitions.push({
|
||||
path: loc.path,
|
||||
line: loc.line,
|
||||
type: loc.type,
|
||||
context,
|
||||
})
|
||||
}
|
||||
|
||||
definitions.sort((a, b) => {
|
||||
const pathCompare = a.path.localeCompare(b.path)
|
||||
if (pathCompare !== 0) {
|
||||
return pathCompare
|
||||
}
|
||||
return a.line - b.line
|
||||
})
|
||||
|
||||
const result: FindDefinitionResult = {
|
||||
symbol,
|
||||
found: true,
|
||||
definitions,
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get context lines around the definition.
|
||||
*/
|
||||
private async getContext(loc: SymbolLocation, ctx: ToolContext): Promise<string> {
|
||||
try {
|
||||
const lines = await this.getFileLines(loc.path, ctx)
|
||||
if (lines.length === 0) {
|
||||
return ""
|
||||
}
|
||||
|
||||
const lineIndex = loc.line - 1
|
||||
const startIndex = Math.max(0, lineIndex - this.contextLines)
|
||||
const endIndex = Math.min(lines.length - 1, lineIndex + this.contextLines)
|
||||
|
||||
const contextLines: string[] = []
|
||||
for (let i = startIndex; i <= endIndex; i++) {
|
||||
const lineNum = i + 1
|
||||
const prefix = i === lineIndex ? ">" : " "
|
||||
contextLines.push(`${prefix}${String(lineNum).padStart(4)}│${lines[i]}`)
|
||||
}
|
||||
|
||||
return contextLines.join("\n")
|
||||
} catch {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file lines from storage or filesystem.
|
||||
*/
|
||||
private async getFileLines(relativePath: string, ctx: ToolContext): Promise<string[]> {
|
||||
const fileData = await ctx.storage.getFile(relativePath)
|
||||
if (fileData) {
|
||||
return fileData.lines
|
||||
}
|
||||
|
||||
const absolutePath = path.resolve(ctx.projectRoot, relativePath)
|
||||
try {
|
||||
const content = await fs.readFile(absolutePath, "utf-8")
|
||||
return content.split("\n")
|
||||
} catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find similar symbol names for suggestions.
|
||||
*/
|
||||
private findSimilarSymbols(symbol: string, symbolIndex: Map<string, unknown>): string[] {
|
||||
const suggestions: string[] = []
|
||||
const lowerSymbol = symbol.toLowerCase()
|
||||
const maxSuggestions = 5
|
||||
|
||||
for (const name of symbolIndex.keys()) {
|
||||
if (suggestions.length >= maxSuggestions) {
|
||||
break
|
||||
}
|
||||
|
||||
const lowerName = name.toLowerCase()
|
||||
if (lowerName.includes(lowerSymbol) || lowerSymbol.includes(lowerName)) {
|
||||
suggestions.push(name)
|
||||
} else if (this.levenshteinDistance(lowerSymbol, lowerName) <= 2) {
|
||||
suggestions.push(name)
|
||||
}
|
||||
}
|
||||
|
||||
return suggestions.sort()
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate Levenshtein distance between two strings.
|
||||
*/
|
||||
private levenshteinDistance(a: string, b: string): number {
|
||||
if (a.length === 0) {
|
||||
return b.length
|
||||
}
|
||||
if (b.length === 0) {
|
||||
return a.length
|
||||
}
|
||||
|
||||
const matrix: number[][] = []
|
||||
|
||||
for (let i = 0; i <= b.length; i++) {
|
||||
matrix[i] = [i]
|
||||
}
|
||||
for (let j = 0; j <= a.length; j++) {
|
||||
matrix[0][j] = j
|
||||
}
|
||||
|
||||
for (let i = 1; i <= b.length; i++) {
|
||||
for (let j = 1; j <= a.length; j++) {
|
||||
if (b.charAt(i - 1) === a.charAt(j - 1)) {
|
||||
matrix[i][j] = matrix[i - 1][j - 1]
|
||||
} else {
|
||||
matrix[i][j] = Math.min(
|
||||
matrix[i - 1][j - 1] + 1,
|
||||
matrix[i][j - 1] + 1,
|
||||
matrix[i - 1][j] + 1,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matrix[b.length][a.length]
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,260 @@
|
||||
import * as path from "node:path"
|
||||
import type { ITool, ToolContext, ToolParameterSchema } from "../../../domain/services/ITool.js"
|
||||
import {
|
||||
createErrorResult,
|
||||
createSuccessResult,
|
||||
type ToolResult,
|
||||
} from "../../../domain/value-objects/ToolResult.js"
|
||||
|
||||
/**
|
||||
* A single reference to a symbol.
|
||||
*/
|
||||
export interface SymbolReference {
|
||||
path: string
|
||||
line: number
|
||||
column: number
|
||||
context: string
|
||||
isDefinition: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Result data from find_references tool.
|
||||
*/
|
||||
export interface FindReferencesResult {
|
||||
symbol: string
|
||||
totalReferences: number
|
||||
files: number
|
||||
references: SymbolReference[]
|
||||
definitionLocations: {
|
||||
path: string
|
||||
line: number
|
||||
type: string
|
||||
}[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for finding all usages of a symbol across the codebase.
|
||||
* Searches through indexed files for symbol references.
|
||||
*/
|
||||
export class FindReferencesTool implements ITool {
|
||||
readonly name = "find_references"
|
||||
readonly description =
|
||||
"Find all usages of a symbol across the codebase. " +
|
||||
"Returns list of file paths, line numbers, and context."
|
||||
readonly parameters: ToolParameterSchema[] = [
|
||||
{
|
||||
name: "symbol",
|
||||
type: "string",
|
||||
description: "Symbol name to search for (function, class, variable, etc.)",
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "path",
|
||||
type: "string",
|
||||
description: "Limit search to specific file or directory",
|
||||
required: false,
|
||||
},
|
||||
]
|
||||
readonly requiresConfirmation = false
|
||||
readonly category = "search" as const
|
||||
|
||||
private readonly contextLines = 1
|
||||
|
||||
validateParams(params: Record<string, unknown>): string | null {
|
||||
if (typeof params.symbol !== "string" || params.symbol.trim() === "") {
|
||||
return "Parameter 'symbol' is required and must be a non-empty string"
|
||||
}
|
||||
|
||||
if (params.path !== undefined && typeof params.path !== "string") {
|
||||
return "Parameter 'path' must be a string"
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
async execute(params: Record<string, unknown>, ctx: ToolContext): Promise<ToolResult> {
|
||||
const startTime = Date.now()
|
||||
const callId = `${this.name}-${String(startTime)}`
|
||||
|
||||
const symbol = (params.symbol as string).trim()
|
||||
const filterPath = params.path as string | undefined
|
||||
|
||||
try {
|
||||
const symbolIndex = await ctx.storage.getSymbolIndex()
|
||||
const definitionLocations = symbolIndex.get(symbol) ?? []
|
||||
|
||||
const allFiles = await ctx.storage.getAllFiles()
|
||||
const filesToSearch = this.filterFiles(allFiles, filterPath, ctx.projectRoot)
|
||||
|
||||
if (filesToSearch.size === 0) {
|
||||
return createSuccessResult(
|
||||
callId,
|
||||
{
|
||||
symbol,
|
||||
totalReferences: 0,
|
||||
files: 0,
|
||||
references: [],
|
||||
definitionLocations: definitionLocations.map((loc) => ({
|
||||
path: loc.path,
|
||||
line: loc.line,
|
||||
type: loc.type,
|
||||
})),
|
||||
} satisfies FindReferencesResult,
|
||||
Date.now() - startTime,
|
||||
)
|
||||
}
|
||||
|
||||
const references: SymbolReference[] = []
|
||||
const filesWithReferences = new Set<string>()
|
||||
|
||||
for (const [filePath, fileData] of filesToSearch) {
|
||||
const fileRefs = this.findReferencesInFile(
|
||||
filePath,
|
||||
fileData.lines,
|
||||
symbol,
|
||||
definitionLocations,
|
||||
)
|
||||
|
||||
if (fileRefs.length > 0) {
|
||||
filesWithReferences.add(filePath)
|
||||
references.push(...fileRefs)
|
||||
}
|
||||
}
|
||||
|
||||
references.sort((a, b) => {
|
||||
const pathCompare = a.path.localeCompare(b.path)
|
||||
if (pathCompare !== 0) {
|
||||
return pathCompare
|
||||
}
|
||||
return a.line - b.line
|
||||
})
|
||||
|
||||
const result: FindReferencesResult = {
|
||||
symbol,
|
||||
totalReferences: references.length,
|
||||
files: filesWithReferences.size,
|
||||
references,
|
||||
definitionLocations: definitionLocations.map((loc) => ({
|
||||
path: loc.path,
|
||||
line: loc.line,
|
||||
type: loc.type,
|
||||
})),
|
||||
}
|
||||
|
||||
return createSuccessResult(callId, result, Date.now() - startTime)
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error)
|
||||
return createErrorResult(callId, message, Date.now() - startTime)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter files by path prefix if specified.
|
||||
*/
|
||||
private filterFiles(
|
||||
allFiles: Map<string, { lines: string[] }>,
|
||||
filterPath: string | undefined,
|
||||
projectRoot: string,
|
||||
): Map<string, { lines: string[] }> {
|
||||
if (!filterPath) {
|
||||
return allFiles
|
||||
}
|
||||
|
||||
const normalizedFilter = filterPath.startsWith("/")
|
||||
? path.relative(projectRoot, filterPath)
|
||||
: filterPath
|
||||
|
||||
const filtered = new Map<string, { lines: string[] }>()
|
||||
for (const [filePath, fileData] of allFiles) {
|
||||
if (filePath === normalizedFilter || filePath.startsWith(`${normalizedFilter}/`)) {
|
||||
filtered.set(filePath, fileData)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
/**
|
||||
* Find all references to the symbol in a file.
|
||||
*/
|
||||
private findReferencesInFile(
|
||||
filePath: string,
|
||||
lines: string[],
|
||||
symbol: string,
|
||||
definitionLocations: { path: string; line: number }[],
|
||||
): SymbolReference[] {
|
||||
const references: SymbolReference[] = []
|
||||
const symbolRegex = this.createSymbolRegex(symbol)
|
||||
|
||||
for (let lineIndex = 0; lineIndex < lines.length; lineIndex++) {
|
||||
const line = lines[lineIndex]
|
||||
const lineNumber = lineIndex + 1
|
||||
let match: RegExpExecArray | null
|
||||
|
||||
symbolRegex.lastIndex = 0
|
||||
while ((match = symbolRegex.exec(line)) !== null) {
|
||||
const column = match.index + 1
|
||||
const context = this.buildContext(lines, lineIndex)
|
||||
const isDefinition = this.isDefinitionLine(
|
||||
filePath,
|
||||
lineNumber,
|
||||
definitionLocations,
|
||||
)
|
||||
|
||||
references.push({
|
||||
path: filePath,
|
||||
line: lineNumber,
|
||||
column,
|
||||
context,
|
||||
isDefinition,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return references
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a regex for matching the symbol with appropriate boundaries.
|
||||
* Handles symbols that start or end with non-word characters (like $value).
|
||||
*/
|
||||
private createSymbolRegex(symbol: string): RegExp {
|
||||
const escaped = symbol.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
|
||||
|
||||
const startsWithWordChar = /^\w/.test(symbol)
|
||||
const endsWithWordChar = /\w$/.test(symbol)
|
||||
|
||||
const prefix = startsWithWordChar ? "\\b" : "(?<![\\w$])"
|
||||
const suffix = endsWithWordChar ? "\\b" : "(?![\\w$])"
|
||||
|
||||
return new RegExp(`${prefix}${escaped}${suffix}`, "g")
|
||||
}
|
||||
|
||||
/**
|
||||
* Build context string with surrounding lines.
|
||||
*/
|
||||
private buildContext(lines: string[], currentIndex: number): string {
|
||||
const startIndex = Math.max(0, currentIndex - this.contextLines)
|
||||
const endIndex = Math.min(lines.length - 1, currentIndex + this.contextLines)
|
||||
|
||||
const contextLines: string[] = []
|
||||
for (let i = startIndex; i <= endIndex; i++) {
|
||||
const lineNum = i + 1
|
||||
const prefix = i === currentIndex ? ">" : " "
|
||||
contextLines.push(`${prefix}${String(lineNum).padStart(4)}│${lines[i]}`)
|
||||
}
|
||||
|
||||
return contextLines.join("\n")
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this line is a definition location.
|
||||
*/
|
||||
private isDefinitionLine(
|
||||
filePath: string,
|
||||
lineNumber: number,
|
||||
definitionLocations: { path: string; line: number }[],
|
||||
): boolean {
|
||||
return definitionLocations.some((loc) => loc.path === filePath && loc.line === lineNumber)
|
||||
}
|
||||
}
|
||||
12
packages/ipuaro/src/infrastructure/tools/search/index.ts
Normal file
12
packages/ipuaro/src/infrastructure/tools/search/index.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
// Search tools exports
|
||||
export {
|
||||
FindReferencesTool,
|
||||
type FindReferencesResult,
|
||||
type SymbolReference,
|
||||
} from "./FindReferencesTool.js"
|
||||
|
||||
export {
|
||||
FindDefinitionTool,
|
||||
type FindDefinitionResult,
|
||||
type DefinitionLocation,
|
||||
} from "./FindDefinitionTool.js"
|
||||
167
packages/ipuaro/src/tui/App.tsx
Normal file
167
packages/ipuaro/src/tui/App.tsx
Normal file
@@ -0,0 +1,167 @@
|
||||
/**
|
||||
* Main TUI App component.
|
||||
* Orchestrates the terminal user interface.
|
||||
*/
|
||||
|
||||
import { Box, Text, useApp } from "ink"
|
||||
import React, { useCallback, useEffect, useState } from "react"
|
||||
import type { ILLMClient } from "../domain/services/ILLMClient.js"
|
||||
import type { ISessionStorage } from "../domain/services/ISessionStorage.js"
|
||||
import type { IStorage } from "../domain/services/IStorage.js"
|
||||
import type { DiffInfo } from "../domain/services/ITool.js"
|
||||
import type { ErrorChoice } from "../shared/types/index.js"
|
||||
import type { IToolRegistry } from "../application/interfaces/IToolRegistry.js"
|
||||
import type { ProjectStructure } from "../infrastructure/llm/prompts.js"
|
||||
import { Chat, Input, StatusBar } from "./components/index.js"
|
||||
import { useHotkeys, useSession } from "./hooks/index.js"
|
||||
import type { AppProps, BranchInfo } from "./types.js"
|
||||
|
||||
export interface AppDependencies {
|
||||
storage: IStorage
|
||||
sessionStorage: ISessionStorage
|
||||
llm: ILLMClient
|
||||
tools: IToolRegistry
|
||||
projectStructure?: ProjectStructure
|
||||
}
|
||||
|
||||
export interface ExtendedAppProps extends AppProps {
|
||||
deps: AppDependencies
|
||||
onExit?: () => void
|
||||
}
|
||||
|
||||
function LoadingScreen(): React.JSX.Element {
|
||||
return (
|
||||
<Box flexDirection="column" padding={1}>
|
||||
<Text color="cyan">Loading session...</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function ErrorScreen({ error }: { error: Error }): React.JSX.Element {
|
||||
return (
|
||||
<Box flexDirection="column" padding={1}>
|
||||
<Text color="red" bold>
|
||||
Error
|
||||
</Text>
|
||||
<Text color="red">{error.message}</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
async function handleConfirmationDefault(_message: string, _diff?: DiffInfo): Promise<boolean> {
|
||||
return Promise.resolve(true)
|
||||
}
|
||||
|
||||
async function handleErrorDefault(_error: Error): Promise<ErrorChoice> {
|
||||
return Promise.resolve("skip")
|
||||
}
|
||||
|
||||
export function App({
|
||||
projectPath,
|
||||
autoApply = false,
|
||||
deps,
|
||||
onExit,
|
||||
}: ExtendedAppProps): React.JSX.Element {
|
||||
const { exit } = useApp()
|
||||
|
||||
const [branch] = useState<BranchInfo>({ name: "main", isDetached: false })
|
||||
const [sessionTime, setSessionTime] = useState("0m")
|
||||
|
||||
const projectName = projectPath.split("/").pop() ?? "unknown"
|
||||
|
||||
const { session, messages, status, isLoading, error, sendMessage, undo, abort } = useSession(
|
||||
{
|
||||
storage: deps.storage,
|
||||
sessionStorage: deps.sessionStorage,
|
||||
llm: deps.llm,
|
||||
tools: deps.tools,
|
||||
projectRoot: projectPath,
|
||||
projectName,
|
||||
projectStructure: deps.projectStructure,
|
||||
},
|
||||
{
|
||||
autoApply,
|
||||
onConfirmation: handleConfirmationDefault,
|
||||
onError: handleErrorDefault,
|
||||
},
|
||||
)
|
||||
|
||||
const handleExit = useCallback((): void => {
|
||||
onExit?.()
|
||||
exit()
|
||||
}, [exit, onExit])
|
||||
|
||||
const handleInterrupt = useCallback((): void => {
|
||||
if (status === "thinking" || status === "tool_call") {
|
||||
abort()
|
||||
}
|
||||
}, [status, abort])
|
||||
|
||||
const handleUndo = useCallback((): void => {
|
||||
void undo()
|
||||
}, [undo])
|
||||
|
||||
useHotkeys(
|
||||
{
|
||||
onInterrupt: handleInterrupt,
|
||||
onExit: handleExit,
|
||||
onUndo: handleUndo,
|
||||
},
|
||||
{ enabled: !isLoading },
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
if (!session) {
|
||||
return
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
setSessionTime(session.getSessionDurationFormatted())
|
||||
}, 60_000)
|
||||
|
||||
setSessionTime(session.getSessionDurationFormatted())
|
||||
|
||||
return (): void => {
|
||||
clearInterval(interval)
|
||||
}
|
||||
}, [session])
|
||||
|
||||
const handleSubmit = useCallback(
|
||||
(text: string): void => {
|
||||
if (text.startsWith("/")) {
|
||||
return
|
||||
}
|
||||
void sendMessage(text)
|
||||
},
|
||||
[sendMessage],
|
||||
)
|
||||
|
||||
if (isLoading) {
|
||||
return <LoadingScreen />
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return <ErrorScreen error={error} />
|
||||
}
|
||||
|
||||
const isInputDisabled = status === "thinking" || status === "tool_call"
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" height="100%">
|
||||
<StatusBar
|
||||
contextUsage={session?.context.tokenUsage ?? 0}
|
||||
projectName={projectName}
|
||||
branch={branch}
|
||||
sessionTime={sessionTime}
|
||||
status={status}
|
||||
/>
|
||||
<Chat messages={messages} isThinking={status === "thinking"} />
|
||||
<Input
|
||||
onSubmit={handleSubmit}
|
||||
history={session?.inputHistory ?? []}
|
||||
disabled={isInputDisabled}
|
||||
placeholder={isInputDisabled ? "Processing..." : "Type a message..."}
|
||||
/>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
170
packages/ipuaro/src/tui/components/Chat.tsx
Normal file
170
packages/ipuaro/src/tui/components/Chat.tsx
Normal file
@@ -0,0 +1,170 @@
|
||||
/**
|
||||
* Chat component for TUI.
|
||||
* Displays message history with tool calls and stats.
|
||||
*/
|
||||
|
||||
import { Box, Text } from "ink"
|
||||
import type React from "react"
|
||||
import type { ChatMessage } from "../../domain/value-objects/ChatMessage.js"
|
||||
import type { ToolCall } from "../../domain/value-objects/ToolCall.js"
|
||||
|
||||
export interface ChatProps {
|
||||
messages: ChatMessage[]
|
||||
isThinking: boolean
|
||||
}
|
||||
|
||||
function formatTimestamp(timestamp: number): string {
|
||||
const date = new Date(timestamp)
|
||||
const hours = String(date.getHours()).padStart(2, "0")
|
||||
const minutes = String(date.getMinutes()).padStart(2, "0")
|
||||
return `${hours}:${minutes}`
|
||||
}
|
||||
|
||||
function formatStats(stats: ChatMessage["stats"]): string {
|
||||
if (!stats) {
|
||||
return ""
|
||||
}
|
||||
const time = (stats.timeMs / 1000).toFixed(1)
|
||||
const tokens = stats.tokens.toLocaleString()
|
||||
const tools = stats.toolCalls
|
||||
|
||||
const parts = [`${time}s`, `${tokens} tokens`]
|
||||
if (tools > 0) {
|
||||
parts.push(`${String(tools)} tool${tools > 1 ? "s" : ""}`)
|
||||
}
|
||||
return parts.join(" | ")
|
||||
}
|
||||
|
||||
function formatToolCall(call: ToolCall): string {
|
||||
const params = Object.entries(call.params)
|
||||
.map(([k, v]) => `${k}=${JSON.stringify(v)}`)
|
||||
.join(" ")
|
||||
return `[${call.name} ${params}]`
|
||||
}
|
||||
|
||||
function UserMessage({ message }: { message: ChatMessage }): React.JSX.Element {
|
||||
return (
|
||||
<Box flexDirection="column" marginBottom={1}>
|
||||
<Box gap={1}>
|
||||
<Text color="green" bold>
|
||||
You
|
||||
</Text>
|
||||
<Text color="gray" dimColor>
|
||||
{formatTimestamp(message.timestamp)}
|
||||
</Text>
|
||||
</Box>
|
||||
<Box marginLeft={2}>
|
||||
<Text>{message.content}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function AssistantMessage({ message }: { message: ChatMessage }): React.JSX.Element {
|
||||
const stats = formatStats(message.stats)
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" marginBottom={1}>
|
||||
<Box gap={1}>
|
||||
<Text color="cyan" bold>
|
||||
Assistant
|
||||
</Text>
|
||||
<Text color="gray" dimColor>
|
||||
{formatTimestamp(message.timestamp)}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
{message.toolCalls && message.toolCalls.length > 0 && (
|
||||
<Box flexDirection="column" marginLeft={2} marginBottom={1}>
|
||||
{message.toolCalls.map((call) => (
|
||||
<Text key={call.id} color="yellow">
|
||||
{formatToolCall(call)}
|
||||
</Text>
|
||||
))}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{message.content && (
|
||||
<Box marginLeft={2}>
|
||||
<Text>{message.content}</Text>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{stats && (
|
||||
<Box marginLeft={2} marginTop={1}>
|
||||
<Text color="gray" dimColor>
|
||||
{stats}
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function ToolMessage({ message }: { message: ChatMessage }): React.JSX.Element {
|
||||
return (
|
||||
<Box flexDirection="column" marginBottom={1} marginLeft={2}>
|
||||
{message.toolResults?.map((result) => (
|
||||
<Box key={result.callId} flexDirection="column">
|
||||
<Text color={result.success ? "green" : "red"}>
|
||||
{result.success ? "+" : "x"} {result.callId.slice(0, 8)}
|
||||
</Text>
|
||||
</Box>
|
||||
))}
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function SystemMessage({ message }: { message: ChatMessage }): React.JSX.Element {
|
||||
const isError = message.content.toLowerCase().startsWith("error")
|
||||
|
||||
return (
|
||||
<Box marginBottom={1} marginLeft={2}>
|
||||
<Text color={isError ? "red" : "gray"} dimColor={!isError}>
|
||||
{message.content}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function MessageComponent({ message }: { message: ChatMessage }): React.JSX.Element {
|
||||
switch (message.role) {
|
||||
case "user": {
|
||||
return <UserMessage message={message} />
|
||||
}
|
||||
case "assistant": {
|
||||
return <AssistantMessage message={message} />
|
||||
}
|
||||
case "tool": {
|
||||
return <ToolMessage message={message} />
|
||||
}
|
||||
case "system": {
|
||||
return <SystemMessage message={message} />
|
||||
}
|
||||
default: {
|
||||
return <></>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function ThinkingIndicator(): React.JSX.Element {
|
||||
return (
|
||||
<Box marginBottom={1}>
|
||||
<Text color="yellow">Thinking...</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
export function Chat({ messages, isThinking }: ChatProps): React.JSX.Element {
|
||||
return (
|
||||
<Box flexDirection="column" flexGrow={1} paddingX={1}>
|
||||
{messages.map((message, index) => (
|
||||
<MessageComponent
|
||||
key={`${String(message.timestamp)}-${String(index)}`}
|
||||
message={message}
|
||||
/>
|
||||
))}
|
||||
{isThinking && <ThinkingIndicator />}
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
83
packages/ipuaro/src/tui/components/ConfirmDialog.tsx
Normal file
83
packages/ipuaro/src/tui/components/ConfirmDialog.tsx
Normal file
@@ -0,0 +1,83 @@
|
||||
/**
|
||||
* ConfirmDialog component for TUI.
|
||||
* Displays a confirmation dialog with [Y] Apply / [N] Cancel / [E] Edit options.
|
||||
*/
|
||||
|
||||
import { Box, Text, useInput } from "ink"
|
||||
import React, { useState } from "react"
|
||||
import type { ConfirmChoice } from "../../shared/types/index.js"
|
||||
import { DiffView, type DiffViewProps } from "./DiffView.js"
|
||||
|
||||
export interface ConfirmDialogProps {
|
||||
message: string
|
||||
diff?: DiffViewProps
|
||||
onSelect: (choice: ConfirmChoice) => void
|
||||
}
|
||||
|
||||
function ChoiceButton({
|
||||
hotkey,
|
||||
label,
|
||||
isSelected,
|
||||
}: {
|
||||
hotkey: string
|
||||
label: string
|
||||
isSelected: boolean
|
||||
}): React.JSX.Element {
|
||||
return (
|
||||
<Box>
|
||||
<Text color={isSelected ? "cyan" : "gray"}>
|
||||
[<Text bold>{hotkey}</Text>] {label}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
export function ConfirmDialog({ message, diff, onSelect }: ConfirmDialogProps): React.JSX.Element {
|
||||
const [selected, setSelected] = useState<ConfirmChoice | null>(null)
|
||||
|
||||
useInput((input, key) => {
|
||||
const lowerInput = input.toLowerCase()
|
||||
|
||||
if (lowerInput === "y") {
|
||||
setSelected("apply")
|
||||
onSelect("apply")
|
||||
} else if (lowerInput === "n") {
|
||||
setSelected("cancel")
|
||||
onSelect("cancel")
|
||||
} else if (lowerInput === "e") {
|
||||
setSelected("edit")
|
||||
onSelect("edit")
|
||||
} else if (key.escape) {
|
||||
setSelected("cancel")
|
||||
onSelect("cancel")
|
||||
}
|
||||
})
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor="yellow"
|
||||
paddingX={1}
|
||||
paddingY={1}
|
||||
>
|
||||
<Box marginBottom={1}>
|
||||
<Text color="yellow" bold>
|
||||
⚠ {message}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
{diff && (
|
||||
<Box marginBottom={1}>
|
||||
<DiffView {...diff} />
|
||||
</Box>
|
||||
)}
|
||||
|
||||
<Box gap={2}>
|
||||
<ChoiceButton hotkey="Y" label="Apply" isSelected={selected === "apply"} />
|
||||
<ChoiceButton hotkey="N" label="Cancel" isSelected={selected === "cancel"} />
|
||||
<ChoiceButton hotkey="E" label="Edit" isSelected={selected === "edit"} />
|
||||
</Box>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
193
packages/ipuaro/src/tui/components/DiffView.tsx
Normal file
193
packages/ipuaro/src/tui/components/DiffView.tsx
Normal file
@@ -0,0 +1,193 @@
|
||||
/**
|
||||
* DiffView component for TUI.
|
||||
* Displays inline diff with green (added) and red (removed) highlighting.
|
||||
*/
|
||||
|
||||
import { Box, Text } from "ink"
|
||||
import type React from "react"
|
||||
|
||||
export interface DiffViewProps {
|
||||
filePath: string
|
||||
oldLines: string[]
|
||||
newLines: string[]
|
||||
startLine: number
|
||||
}
|
||||
|
||||
interface DiffLine {
|
||||
type: "add" | "remove" | "context"
|
||||
content: string
|
||||
lineNumber?: number
|
||||
}
|
||||
|
||||
function computeDiff(oldLines: string[], newLines: string[], startLine: number): DiffLine[] {
|
||||
const result: DiffLine[] = []
|
||||
|
||||
let oldIdx = 0
|
||||
let newIdx = 0
|
||||
|
||||
while (oldIdx < oldLines.length || newIdx < newLines.length) {
|
||||
const oldLine = oldIdx < oldLines.length ? oldLines[oldIdx] : undefined
|
||||
const newLine = newIdx < newLines.length ? newLines[newIdx] : undefined
|
||||
|
||||
if (oldLine === newLine) {
|
||||
result.push({
|
||||
type: "context",
|
||||
content: oldLine ?? "",
|
||||
lineNumber: startLine + newIdx,
|
||||
})
|
||||
oldIdx++
|
||||
newIdx++
|
||||
} else {
|
||||
if (oldLine !== undefined) {
|
||||
result.push({
|
||||
type: "remove",
|
||||
content: oldLine,
|
||||
})
|
||||
oldIdx++
|
||||
}
|
||||
if (newLine !== undefined) {
|
||||
result.push({
|
||||
type: "add",
|
||||
content: newLine,
|
||||
lineNumber: startLine + newIdx,
|
||||
})
|
||||
newIdx++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
function getLinePrefix(line: DiffLine): string {
|
||||
switch (line.type) {
|
||||
case "add": {
|
||||
return "+"
|
||||
}
|
||||
case "remove": {
|
||||
return "-"
|
||||
}
|
||||
case "context": {
|
||||
return " "
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getLineColor(line: DiffLine): string {
|
||||
switch (line.type) {
|
||||
case "add": {
|
||||
return "green"
|
||||
}
|
||||
case "remove": {
|
||||
return "red"
|
||||
}
|
||||
case "context": {
|
||||
return "gray"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function formatLineNumber(num: number | undefined, width: number): string {
|
||||
if (num === undefined) {
|
||||
return " ".repeat(width)
|
||||
}
|
||||
return String(num).padStart(width, " ")
|
||||
}
|
||||
|
||||
function DiffLine({
|
||||
line,
|
||||
lineNumberWidth,
|
||||
}: {
|
||||
line: DiffLine
|
||||
lineNumberWidth: number
|
||||
}): React.JSX.Element {
|
||||
const prefix = getLinePrefix(line)
|
||||
const color = getLineColor(line)
|
||||
const lineNum = formatLineNumber(line.lineNumber, lineNumberWidth)
|
||||
|
||||
return (
|
||||
<Box>
|
||||
<Text color="gray">{lineNum} </Text>
|
||||
<Text color={color}>
|
||||
{prefix} {line.content}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function DiffHeader({
|
||||
filePath,
|
||||
startLine,
|
||||
endLine,
|
||||
}: {
|
||||
filePath: string
|
||||
startLine: number
|
||||
endLine: number
|
||||
}): React.JSX.Element {
|
||||
const lineRange =
|
||||
startLine === endLine
|
||||
? `line ${String(startLine)}`
|
||||
: `lines ${String(startLine)}-${String(endLine)}`
|
||||
|
||||
return (
|
||||
<Box>
|
||||
<Text color="gray">┌─── </Text>
|
||||
<Text color="cyan">{filePath}</Text>
|
||||
<Text color="gray"> ({lineRange}) ───┐</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function DiffFooter(): React.JSX.Element {
|
||||
return (
|
||||
<Box>
|
||||
<Text color="gray">└───────────────────────────────────────┘</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
function DiffStats({
|
||||
additions,
|
||||
deletions,
|
||||
}: {
|
||||
additions: number
|
||||
deletions: number
|
||||
}): React.JSX.Element {
|
||||
return (
|
||||
<Box gap={1} marginTop={1}>
|
||||
<Text color="green">+{String(additions)}</Text>
|
||||
<Text color="red">-{String(deletions)}</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
export function DiffView({
|
||||
filePath,
|
||||
oldLines,
|
||||
newLines,
|
||||
startLine,
|
||||
}: DiffViewProps): React.JSX.Element {
|
||||
const diffLines = computeDiff(oldLines, newLines, startLine)
|
||||
const endLine = startLine + newLines.length - 1
|
||||
const lineNumberWidth = String(endLine).length
|
||||
|
||||
const additions = diffLines.filter((l) => l.type === "add").length
|
||||
const deletions = diffLines.filter((l) => l.type === "remove").length
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" paddingX={1}>
|
||||
<DiffHeader filePath={filePath} startLine={startLine} endLine={endLine} />
|
||||
<Box flexDirection="column" paddingX={1}>
|
||||
{diffLines.map((line, index) => (
|
||||
<DiffLine
|
||||
key={`${line.type}-${String(index)}`}
|
||||
line={line}
|
||||
lineNumberWidth={lineNumberWidth}
|
||||
/>
|
||||
))}
|
||||
</Box>
|
||||
<DiffFooter />
|
||||
<DiffStats additions={additions} deletions={deletions} />
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
105
packages/ipuaro/src/tui/components/ErrorDialog.tsx
Normal file
105
packages/ipuaro/src/tui/components/ErrorDialog.tsx
Normal file
@@ -0,0 +1,105 @@
|
||||
/**
|
||||
* ErrorDialog component for TUI.
|
||||
* Displays an error with [R] Retry / [S] Skip / [A] Abort options.
|
||||
*/
|
||||
|
||||
import { Box, Text, useInput } from "ink"
|
||||
import React, { useState } from "react"
|
||||
import type { ErrorChoice } from "../../shared/types/index.js"
|
||||
|
||||
export interface ErrorInfo {
|
||||
type: string
|
||||
message: string
|
||||
recoverable: boolean
|
||||
}
|
||||
|
||||
export interface ErrorDialogProps {
|
||||
error: ErrorInfo
|
||||
onChoice: (choice: ErrorChoice) => void
|
||||
}
|
||||
|
||||
function ChoiceButton({
|
||||
hotkey,
|
||||
label,
|
||||
isSelected,
|
||||
disabled,
|
||||
}: {
|
||||
hotkey: string
|
||||
label: string
|
||||
isSelected: boolean
|
||||
disabled?: boolean
|
||||
}): React.JSX.Element {
|
||||
if (disabled) {
|
||||
return (
|
||||
<Box>
|
||||
<Text color="gray" dimColor>
|
||||
[{hotkey}] {label}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Box>
|
||||
<Text color={isSelected ? "cyan" : "gray"}>
|
||||
[<Text bold>{hotkey}</Text>] {label}
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
|
||||
export function ErrorDialog({ error, onChoice }: ErrorDialogProps): React.JSX.Element {
|
||||
const [selected, setSelected] = useState<ErrorChoice | null>(null)
|
||||
|
||||
useInput((input, key) => {
|
||||
const lowerInput = input.toLowerCase()
|
||||
|
||||
if (lowerInput === "r" && error.recoverable) {
|
||||
setSelected("retry")
|
||||
onChoice("retry")
|
||||
} else if (lowerInput === "s" && error.recoverable) {
|
||||
setSelected("skip")
|
||||
onChoice("skip")
|
||||
} else if (lowerInput === "a") {
|
||||
setSelected("abort")
|
||||
onChoice("abort")
|
||||
} else if (key.escape) {
|
||||
setSelected("abort")
|
||||
onChoice("abort")
|
||||
}
|
||||
})
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" borderStyle="round" borderColor="red" paddingX={1} paddingY={1}>
|
||||
<Box marginBottom={1}>
|
||||
<Text color="red" bold>
|
||||
x {error.type}: {error.message}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
<Box gap={2}>
|
||||
<ChoiceButton
|
||||
hotkey="R"
|
||||
label="Retry"
|
||||
isSelected={selected === "retry"}
|
||||
disabled={!error.recoverable}
|
||||
/>
|
||||
<ChoiceButton
|
||||
hotkey="S"
|
||||
label="Skip"
|
||||
isSelected={selected === "skip"}
|
||||
disabled={!error.recoverable}
|
||||
/>
|
||||
<ChoiceButton hotkey="A" label="Abort" isSelected={selected === "abort"} />
|
||||
</Box>
|
||||
|
||||
{!error.recoverable && (
|
||||
<Box marginTop={1}>
|
||||
<Text color="gray" dimColor>
|
||||
This error is not recoverable. Press [A] to abort.
|
||||
</Text>
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
99
packages/ipuaro/src/tui/components/Input.tsx
Normal file
99
packages/ipuaro/src/tui/components/Input.tsx
Normal file
@@ -0,0 +1,99 @@
|
||||
/**
|
||||
* Input component for TUI.
|
||||
* Prompt with history navigation (up/down) and path autocomplete (tab).
|
||||
*/
|
||||
|
||||
import { Box, Text, useInput } from "ink"
|
||||
import TextInput from "ink-text-input"
|
||||
import React, { useCallback, useState } from "react"
|
||||
|
||||
export interface InputProps {
|
||||
onSubmit: (text: string) => void
|
||||
history: string[]
|
||||
disabled: boolean
|
||||
placeholder?: string
|
||||
}
|
||||
|
||||
export function Input({
|
||||
onSubmit,
|
||||
history,
|
||||
disabled,
|
||||
placeholder = "Type a message...",
|
||||
}: InputProps): React.JSX.Element {
|
||||
const [value, setValue] = useState("")
|
||||
const [historyIndex, setHistoryIndex] = useState(-1)
|
||||
const [savedInput, setSavedInput] = useState("")
|
||||
|
||||
const handleChange = useCallback((newValue: string) => {
|
||||
setValue(newValue)
|
||||
setHistoryIndex(-1)
|
||||
}, [])
|
||||
|
||||
const handleSubmit = useCallback(
|
||||
(text: string) => {
|
||||
if (disabled || !text.trim()) {
|
||||
return
|
||||
}
|
||||
onSubmit(text)
|
||||
setValue("")
|
||||
setHistoryIndex(-1)
|
||||
setSavedInput("")
|
||||
},
|
||||
[disabled, onSubmit],
|
||||
)
|
||||
|
||||
useInput(
|
||||
(input, key) => {
|
||||
if (disabled) {
|
||||
return
|
||||
}
|
||||
|
||||
if (key.upArrow && history.length > 0) {
|
||||
if (historyIndex === -1) {
|
||||
setSavedInput(value)
|
||||
}
|
||||
|
||||
const newIndex =
|
||||
historyIndex === -1 ? history.length - 1 : Math.max(0, historyIndex - 1)
|
||||
setHistoryIndex(newIndex)
|
||||
setValue(history[newIndex] ?? "")
|
||||
}
|
||||
|
||||
if (key.downArrow) {
|
||||
if (historyIndex === -1) {
|
||||
return
|
||||
}
|
||||
|
||||
if (historyIndex >= history.length - 1) {
|
||||
setHistoryIndex(-1)
|
||||
setValue(savedInput)
|
||||
} else {
|
||||
const newIndex = historyIndex + 1
|
||||
setHistoryIndex(newIndex)
|
||||
setValue(history[newIndex] ?? "")
|
||||
}
|
||||
}
|
||||
},
|
||||
{ isActive: !disabled },
|
||||
)
|
||||
|
||||
return (
|
||||
<Box borderStyle="single" borderColor={disabled ? "gray" : "cyan"} paddingX={1}>
|
||||
<Text color={disabled ? "gray" : "green"} bold>
|
||||
{">"}{" "}
|
||||
</Text>
|
||||
{disabled ? (
|
||||
<Text color="gray" dimColor>
|
||||
{placeholder}
|
||||
</Text>
|
||||
) : (
|
||||
<TextInput
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
onSubmit={handleSubmit}
|
||||
placeholder={placeholder}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
62
packages/ipuaro/src/tui/components/Progress.tsx
Normal file
62
packages/ipuaro/src/tui/components/Progress.tsx
Normal file
@@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Progress component for TUI.
|
||||
* Displays a progress bar: [=====> ] 45% (120/267 files)
|
||||
*/
|
||||
|
||||
import { Box, Text } from "ink"
|
||||
import type React from "react"
|
||||
|
||||
export interface ProgressProps {
|
||||
current: number
|
||||
total: number
|
||||
label: string
|
||||
width?: number
|
||||
}
|
||||
|
||||
function calculatePercentage(current: number, total: number): number {
|
||||
if (total === 0) {
|
||||
return 0
|
||||
}
|
||||
return Math.min(100, Math.round((current / total) * 100))
|
||||
}
|
||||
|
||||
function createProgressBar(percentage: number, width: number): { filled: string; empty: string } {
|
||||
const filledWidth = Math.round((percentage / 100) * width)
|
||||
const emptyWidth = width - filledWidth
|
||||
|
||||
const filled = "=".repeat(Math.max(0, filledWidth - 1)) + (filledWidth > 0 ? ">" : "")
|
||||
const empty = " ".repeat(Math.max(0, emptyWidth))
|
||||
|
||||
return { filled, empty }
|
||||
}
|
||||
|
||||
function getProgressColor(percentage: number): string {
|
||||
if (percentage >= 100) {
|
||||
return "green"
|
||||
}
|
||||
if (percentage >= 50) {
|
||||
return "yellow"
|
||||
}
|
||||
return "cyan"
|
||||
}
|
||||
|
||||
export function Progress({ current, total, label, width = 30 }: ProgressProps): React.JSX.Element {
|
||||
const percentage = calculatePercentage(current, total)
|
||||
const { filled, empty } = createProgressBar(percentage, width)
|
||||
const color = getProgressColor(percentage)
|
||||
|
||||
return (
|
||||
<Box gap={1}>
|
||||
<Text color="gray">[</Text>
|
||||
<Text color={color}>{filled}</Text>
|
||||
<Text color="gray">{empty}</Text>
|
||||
<Text color="gray">]</Text>
|
||||
<Text color={color} bold>
|
||||
{String(percentage)}%
|
||||
</Text>
|
||||
<Text color="gray">
|
||||
({String(current)}/{String(total)} {label})
|
||||
</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
81
packages/ipuaro/src/tui/components/StatusBar.tsx
Normal file
81
packages/ipuaro/src/tui/components/StatusBar.tsx
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* StatusBar component for TUI.
|
||||
* Displays: [ipuaro] [ctx: 12%] [project: myapp] [main] [47m] status
|
||||
*/
|
||||
|
||||
import { Box, Text } from "ink"
|
||||
import type React from "react"
|
||||
import type { BranchInfo, TuiStatus } from "../types.js"
|
||||
|
||||
export interface StatusBarProps {
|
||||
contextUsage: number
|
||||
projectName: string
|
||||
branch: BranchInfo
|
||||
sessionTime: string
|
||||
status: TuiStatus
|
||||
}
|
||||
|
||||
function getStatusIndicator(status: TuiStatus): { text: string; color: string } {
|
||||
switch (status) {
|
||||
case "ready": {
|
||||
return { text: "ready", color: "green" }
|
||||
}
|
||||
case "thinking": {
|
||||
return { text: "thinking...", color: "yellow" }
|
||||
}
|
||||
case "tool_call": {
|
||||
return { text: "executing...", color: "cyan" }
|
||||
}
|
||||
case "awaiting_confirmation": {
|
||||
return { text: "confirm?", color: "magenta" }
|
||||
}
|
||||
case "error": {
|
||||
return { text: "error", color: "red" }
|
||||
}
|
||||
default: {
|
||||
return { text: "ready", color: "green" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function formatContextUsage(usage: number): string {
|
||||
return `${String(Math.round(usage * 100))}%`
|
||||
}
|
||||
|
||||
export function StatusBar({
|
||||
contextUsage,
|
||||
projectName,
|
||||
branch,
|
||||
sessionTime,
|
||||
status,
|
||||
}: StatusBarProps): React.JSX.Element {
|
||||
const statusIndicator = getStatusIndicator(status)
|
||||
const branchDisplay = branch.isDetached ? `HEAD@${branch.name.slice(0, 7)}` : branch.name
|
||||
|
||||
return (
|
||||
<Box borderStyle="single" borderColor="gray" paddingX={1} justifyContent="space-between">
|
||||
<Box gap={1}>
|
||||
<Text color="cyan" bold>
|
||||
[ipuaro]
|
||||
</Text>
|
||||
<Text color="gray">
|
||||
[ctx:{" "}
|
||||
<Text color={contextUsage > 0.8 ? "red" : "white"}>
|
||||
{formatContextUsage(contextUsage)}
|
||||
</Text>
|
||||
]
|
||||
</Text>
|
||||
<Text color="gray">
|
||||
[<Text color="blue">{projectName}</Text>]
|
||||
</Text>
|
||||
<Text color="gray">
|
||||
[<Text color="green">{branchDisplay}</Text>]
|
||||
</Text>
|
||||
<Text color="gray">
|
||||
[<Text color="white">{sessionTime}</Text>]
|
||||
</Text>
|
||||
</Box>
|
||||
<Text color={statusIndicator.color}>{statusIndicator.text}</Text>
|
||||
</Box>
|
||||
)
|
||||
}
|
||||
11
packages/ipuaro/src/tui/components/index.ts
Normal file
11
packages/ipuaro/src/tui/components/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
/**
|
||||
* TUI components.
|
||||
*/
|
||||
|
||||
export { StatusBar, type StatusBarProps } from "./StatusBar.js"
|
||||
export { Chat, type ChatProps } from "./Chat.js"
|
||||
export { Input, type InputProps } from "./Input.js"
|
||||
export { DiffView, type DiffViewProps } from "./DiffView.js"
|
||||
export { ConfirmDialog, type ConfirmDialogProps } from "./ConfirmDialog.js"
|
||||
export { ErrorDialog, type ErrorDialogProps, type ErrorInfo } from "./ErrorDialog.js"
|
||||
export { Progress, type ProgressProps } from "./Progress.js"
|
||||
11
packages/ipuaro/src/tui/hooks/index.ts
Normal file
11
packages/ipuaro/src/tui/hooks/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
/**
|
||||
* TUI hooks.
|
||||
*/
|
||||
|
||||
export {
|
||||
useSession,
|
||||
type UseSessionDependencies,
|
||||
type UseSessionOptions,
|
||||
type UseSessionReturn,
|
||||
} from "./useSession.js"
|
||||
export { useHotkeys, type HotkeyHandlers, type UseHotkeysOptions } from "./useHotkeys.js"
|
||||
59
packages/ipuaro/src/tui/hooks/useHotkeys.ts
Normal file
59
packages/ipuaro/src/tui/hooks/useHotkeys.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* useHotkeys hook for TUI.
|
||||
* Handles global keyboard shortcuts.
|
||||
*/
|
||||
|
||||
import { useInput } from "ink"
|
||||
import { useCallback, useRef } from "react"
|
||||
|
||||
export interface HotkeyHandlers {
|
||||
onInterrupt?: () => void
|
||||
onExit?: () => void
|
||||
onUndo?: () => void
|
||||
}
|
||||
|
||||
export interface UseHotkeysOptions {
|
||||
enabled?: boolean
|
||||
}
|
||||
|
||||
export function useHotkeys(handlers: HotkeyHandlers, options: UseHotkeysOptions = {}): void {
|
||||
const { enabled = true } = options
|
||||
const interruptCount = useRef(0)
|
||||
const interruptTimer = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
const resetInterruptCount = useCallback((): void => {
|
||||
interruptCount.current = 0
|
||||
if (interruptTimer.current) {
|
||||
clearTimeout(interruptTimer.current)
|
||||
interruptTimer.current = null
|
||||
}
|
||||
}, [])
|
||||
|
||||
useInput(
|
||||
(_input, key) => {
|
||||
if (key.ctrl && _input === "c") {
|
||||
interruptCount.current++
|
||||
|
||||
if (interruptCount.current === 1) {
|
||||
handlers.onInterrupt?.()
|
||||
|
||||
interruptTimer.current = setTimeout(() => {
|
||||
resetInterruptCount()
|
||||
}, 1000)
|
||||
} else if (interruptCount.current >= 2) {
|
||||
resetInterruptCount()
|
||||
handlers.onExit?.()
|
||||
}
|
||||
}
|
||||
|
||||
if (key.ctrl && _input === "d") {
|
||||
handlers.onExit?.()
|
||||
}
|
||||
|
||||
if (key.ctrl && _input === "z") {
|
||||
handlers.onUndo?.()
|
||||
}
|
||||
},
|
||||
{ isActive: enabled },
|
||||
)
|
||||
}
|
||||
205
packages/ipuaro/src/tui/hooks/useSession.ts
Normal file
205
packages/ipuaro/src/tui/hooks/useSession.ts
Normal file
@@ -0,0 +1,205 @@
|
||||
/**
|
||||
* useSession hook for TUI.
|
||||
* Manages session state and message handling.
|
||||
*/
|
||||
|
||||
import { useCallback, useEffect, useRef, useState } from "react"
|
||||
import type { Session } from "../../domain/entities/Session.js"
|
||||
import type { ILLMClient } from "../../domain/services/ILLMClient.js"
|
||||
import type { ISessionStorage } from "../../domain/services/ISessionStorage.js"
|
||||
import type { IStorage } from "../../domain/services/IStorage.js"
|
||||
import type { DiffInfo } from "../../domain/services/ITool.js"
|
||||
import type { ChatMessage } from "../../domain/value-objects/ChatMessage.js"
|
||||
import type { ErrorChoice } from "../../shared/types/index.js"
|
||||
import type { IToolRegistry } from "../../application/interfaces/IToolRegistry.js"
|
||||
import {
|
||||
HandleMessage,
|
||||
type HandleMessageStatus,
|
||||
} from "../../application/use-cases/HandleMessage.js"
|
||||
import { StartSession } from "../../application/use-cases/StartSession.js"
|
||||
import { UndoChange } from "../../application/use-cases/UndoChange.js"
|
||||
import type { ProjectStructure } from "../../infrastructure/llm/prompts.js"
|
||||
import type { TuiStatus } from "../types.js"
|
||||
|
||||
export interface UseSessionDependencies {
|
||||
storage: IStorage
|
||||
sessionStorage: ISessionStorage
|
||||
llm: ILLMClient
|
||||
tools: IToolRegistry
|
||||
projectRoot: string
|
||||
projectName: string
|
||||
projectStructure?: ProjectStructure
|
||||
}
|
||||
|
||||
export interface UseSessionOptions {
|
||||
autoApply?: boolean
|
||||
onConfirmation?: (message: string, diff?: DiffInfo) => Promise<boolean>
|
||||
onError?: (error: Error) => Promise<ErrorChoice>
|
||||
}
|
||||
|
||||
export interface UseSessionReturn {
|
||||
session: Session | null
|
||||
messages: ChatMessage[]
|
||||
status: TuiStatus
|
||||
isLoading: boolean
|
||||
error: Error | null
|
||||
sendMessage: (message: string) => Promise<void>
|
||||
undo: () => Promise<boolean>
|
||||
clearHistory: () => void
|
||||
abort: () => void
|
||||
}
|
||||
|
||||
interface SessionRefs {
|
||||
session: Session | null
|
||||
handleMessage: HandleMessage | null
|
||||
undoChange: UndoChange | null
|
||||
}
|
||||
|
||||
type SetStatus = React.Dispatch<React.SetStateAction<TuiStatus>>
|
||||
type SetMessages = React.Dispatch<React.SetStateAction<ChatMessage[]>>
|
||||
|
||||
interface StateSetters {
|
||||
setMessages: SetMessages
|
||||
setStatus: SetStatus
|
||||
forceUpdate: () => void
|
||||
}
|
||||
|
||||
function createEventHandlers(
|
||||
setters: StateSetters,
|
||||
options: UseSessionOptions,
|
||||
): Parameters<HandleMessage["setEvents"]>[0] {
|
||||
return {
|
||||
onMessage: (msg) => {
|
||||
setters.setMessages((prev) => [...prev, msg])
|
||||
},
|
||||
onToolCall: () => {
|
||||
setters.setStatus("tool_call")
|
||||
},
|
||||
onToolResult: () => {
|
||||
setters.setStatus("thinking")
|
||||
},
|
||||
onConfirmation: options.onConfirmation,
|
||||
onError: options.onError,
|
||||
onStatusChange: (s: HandleMessageStatus) => {
|
||||
setters.setStatus(s)
|
||||
},
|
||||
onUndoEntry: () => {
|
||||
setters.forceUpdate()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function initializeSession(
|
||||
deps: UseSessionDependencies,
|
||||
options: UseSessionOptions,
|
||||
refs: React.MutableRefObject<SessionRefs>,
|
||||
setters: StateSetters,
|
||||
): Promise<void> {
|
||||
const startSession = new StartSession(deps.sessionStorage)
|
||||
const result = await startSession.execute(deps.projectName)
|
||||
refs.current.session = result.session
|
||||
setters.setMessages([...result.session.history])
|
||||
|
||||
const handleMessage = new HandleMessage(
|
||||
deps.storage,
|
||||
deps.sessionStorage,
|
||||
deps.llm,
|
||||
deps.tools,
|
||||
deps.projectRoot,
|
||||
)
|
||||
if (deps.projectStructure) {
|
||||
handleMessage.setProjectStructure(deps.projectStructure)
|
||||
}
|
||||
handleMessage.setOptions({ autoApply: options.autoApply })
|
||||
handleMessage.setEvents(createEventHandlers(setters, options))
|
||||
refs.current.handleMessage = handleMessage
|
||||
refs.current.undoChange = new UndoChange(deps.sessionStorage, deps.storage)
|
||||
setters.forceUpdate()
|
||||
}
|
||||
|
||||
export function useSession(
|
||||
deps: UseSessionDependencies,
|
||||
options: UseSessionOptions = {},
|
||||
): UseSessionReturn {
|
||||
const [messages, setMessages] = useState<ChatMessage[]>([])
|
||||
const [status, setStatus] = useState<TuiStatus>("ready")
|
||||
const [isLoading, setIsLoading] = useState(true)
|
||||
const [error, setError] = useState<Error | null>(null)
|
||||
const [, setTrigger] = useState(0)
|
||||
const refs = useRef<SessionRefs>({ session: null, handleMessage: null, undoChange: null })
|
||||
const forceUpdate = useCallback(() => {
|
||||
setTrigger((v) => v + 1)
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
setIsLoading(true)
|
||||
const setters: StateSetters = { setMessages, setStatus, forceUpdate }
|
||||
initializeSession(deps, options, refs, setters)
|
||||
.then(() => {
|
||||
setError(null)
|
||||
})
|
||||
.catch((err: unknown) => {
|
||||
setError(err instanceof Error ? err : new Error(String(err)))
|
||||
})
|
||||
.finally(() => {
|
||||
setIsLoading(false)
|
||||
})
|
||||
}, [deps.projectName, forceUpdate])
|
||||
|
||||
const sendMessage = useCallback(async (message: string): Promise<void> => {
|
||||
const { session, handleMessage } = refs.current
|
||||
if (!session || !handleMessage) {
|
||||
return
|
||||
}
|
||||
try {
|
||||
setStatus("thinking")
|
||||
await handleMessage.execute(session, message)
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err : new Error(String(err)))
|
||||
setStatus("error")
|
||||
}
|
||||
}, [])
|
||||
|
||||
const undo = useCallback(async (): Promise<boolean> => {
|
||||
const { session, undoChange } = refs.current
|
||||
if (!session || !undoChange) {
|
||||
return false
|
||||
}
|
||||
try {
|
||||
const result = await undoChange.execute(session)
|
||||
if (result.success) {
|
||||
forceUpdate()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}, [forceUpdate])
|
||||
|
||||
const clearHistory = useCallback(() => {
|
||||
if (!refs.current.session) {
|
||||
return
|
||||
}
|
||||
refs.current.session.clearHistory()
|
||||
setMessages([])
|
||||
forceUpdate()
|
||||
}, [forceUpdate])
|
||||
|
||||
const abort = useCallback(() => {
|
||||
refs.current.handleMessage?.abort()
|
||||
setStatus("ready")
|
||||
}, [])
|
||||
|
||||
return {
|
||||
session: refs.current.session,
|
||||
messages,
|
||||
status,
|
||||
isLoading,
|
||||
error,
|
||||
sendMessage,
|
||||
undo,
|
||||
clearHistory,
|
||||
abort,
|
||||
}
|
||||
}
|
||||
8
packages/ipuaro/src/tui/index.ts
Normal file
8
packages/ipuaro/src/tui/index.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
/**
|
||||
* TUI module - Terminal User Interface.
|
||||
*/
|
||||
|
||||
export { App, type AppDependencies, type ExtendedAppProps } from "./App.js"
|
||||
export * from "./components/index.js"
|
||||
export * from "./hooks/index.js"
|
||||
export * from "./types.js"
|
||||
38
packages/ipuaro/src/tui/types.ts
Normal file
38
packages/ipuaro/src/tui/types.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
/**
|
||||
* TUI types and interfaces.
|
||||
*/
|
||||
|
||||
import type { HandleMessageStatus } from "../application/use-cases/HandleMessage.js"
|
||||
|
||||
/**
|
||||
* TUI status - maps to HandleMessageStatus.
|
||||
*/
|
||||
export type TuiStatus = HandleMessageStatus
|
||||
|
||||
/**
|
||||
* Git branch information.
|
||||
*/
|
||||
export interface BranchInfo {
|
||||
name: string
|
||||
isDetached: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Props for the main App component.
|
||||
*/
|
||||
export interface AppProps {
|
||||
projectPath: string
|
||||
autoApply?: boolean
|
||||
model?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Status bar display data.
|
||||
*/
|
||||
export interface StatusBarData {
|
||||
contextUsage: number
|
||||
projectName: string
|
||||
branch: BranchInfo
|
||||
sessionTime: string
|
||||
status: TuiStatus
|
||||
}
|
||||
@@ -0,0 +1,248 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import { ContextManager } from "../../../../src/application/use-cases/ContextManager.js"
|
||||
import { Session } from "../../../../src/domain/entities/Session.js"
|
||||
import type { ILLMClient, LLMResponse } from "../../../../src/domain/services/ILLMClient.js"
|
||||
import {
|
||||
createUserMessage,
|
||||
createAssistantMessage,
|
||||
} from "../../../../src/domain/value-objects/ChatMessage.js"
|
||||
|
||||
describe("ContextManager", () => {
|
||||
let manager: ContextManager
|
||||
const CONTEXT_SIZE = 128_000
|
||||
|
||||
beforeEach(() => {
|
||||
manager = new ContextManager(CONTEXT_SIZE)
|
||||
})
|
||||
|
||||
describe("addToContext", () => {
|
||||
it("should add file to context", () => {
|
||||
manager.addToContext("test.ts", 100)
|
||||
|
||||
expect(manager.getFilesInContext()).toContain("test.ts")
|
||||
expect(manager.getTokenCount()).toBe(100)
|
||||
})
|
||||
|
||||
it("should update token count when same file added", () => {
|
||||
manager.addToContext("test.ts", 100)
|
||||
manager.addToContext("test.ts", 200)
|
||||
|
||||
expect(manager.getFilesInContext()).toHaveLength(1)
|
||||
expect(manager.getTokenCount()).toBe(200)
|
||||
})
|
||||
|
||||
it("should accumulate tokens for different files", () => {
|
||||
manager.addToContext("a.ts", 100)
|
||||
manager.addToContext("b.ts", 200)
|
||||
|
||||
expect(manager.getFilesInContext()).toHaveLength(2)
|
||||
expect(manager.getTokenCount()).toBe(300)
|
||||
})
|
||||
})
|
||||
|
||||
describe("removeFromContext", () => {
|
||||
it("should remove file from context", () => {
|
||||
manager.addToContext("test.ts", 100)
|
||||
manager.removeFromContext("test.ts")
|
||||
|
||||
expect(manager.getFilesInContext()).not.toContain("test.ts")
|
||||
expect(manager.getTokenCount()).toBe(0)
|
||||
})
|
||||
|
||||
it("should handle removing non-existent file", () => {
|
||||
manager.removeFromContext("non-existent.ts")
|
||||
|
||||
expect(manager.getTokenCount()).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("getUsage", () => {
|
||||
it("should return 0 for empty context", () => {
|
||||
expect(manager.getUsage()).toBe(0)
|
||||
})
|
||||
|
||||
it("should calculate usage ratio correctly", () => {
|
||||
manager.addToContext("test.ts", CONTEXT_SIZE / 2)
|
||||
|
||||
expect(manager.getUsage()).toBe(0.5)
|
||||
})
|
||||
})
|
||||
|
||||
describe("getAvailableTokens", () => {
|
||||
it("should return full context when empty", () => {
|
||||
expect(manager.getAvailableTokens()).toBe(CONTEXT_SIZE)
|
||||
})
|
||||
|
||||
it("should calculate available tokens correctly", () => {
|
||||
manager.addToContext("test.ts", 1000)
|
||||
|
||||
expect(manager.getAvailableTokens()).toBe(CONTEXT_SIZE - 1000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("needsCompression", () => {
|
||||
it("should return false when under threshold", () => {
|
||||
manager.addToContext("test.ts", CONTEXT_SIZE * 0.5)
|
||||
|
||||
expect(manager.needsCompression()).toBe(false)
|
||||
})
|
||||
|
||||
it("should return true when over threshold", () => {
|
||||
manager.addToContext("test.ts", CONTEXT_SIZE * 0.85)
|
||||
|
||||
expect(manager.needsCompression()).toBe(true)
|
||||
})
|
||||
|
||||
it("should return false at exactly threshold", () => {
|
||||
manager.addToContext("test.ts", CONTEXT_SIZE * 0.8)
|
||||
|
||||
expect(manager.needsCompression()).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("addTokens", () => {
|
||||
it("should add tokens to current count", () => {
|
||||
manager.addTokens(500)
|
||||
|
||||
expect(manager.getTokenCount()).toBe(500)
|
||||
})
|
||||
|
||||
it("should accumulate tokens", () => {
|
||||
manager.addTokens(100)
|
||||
manager.addTokens(200)
|
||||
|
||||
expect(manager.getTokenCount()).toBe(300)
|
||||
})
|
||||
})
|
||||
|
||||
describe("syncFromSession", () => {
|
||||
it("should sync files from session context", () => {
|
||||
const session = new Session("test", "project")
|
||||
session.context.filesInContext = ["a.ts", "b.ts"]
|
||||
session.context.tokenUsage = 0.5
|
||||
|
||||
manager.syncFromSession(session)
|
||||
|
||||
expect(manager.getFilesInContext()).toContain("a.ts")
|
||||
expect(manager.getFilesInContext()).toContain("b.ts")
|
||||
expect(manager.getTokenCount()).toBe(Math.floor(0.5 * CONTEXT_SIZE))
|
||||
})
|
||||
|
||||
it("should clear previous state on sync", () => {
|
||||
manager.addToContext("old.ts", 1000)
|
||||
|
||||
const session = new Session("test", "project")
|
||||
session.context.filesInContext = ["new.ts"]
|
||||
session.context.tokenUsage = 0.1
|
||||
|
||||
manager.syncFromSession(session)
|
||||
|
||||
expect(manager.getFilesInContext()).not.toContain("old.ts")
|
||||
expect(manager.getFilesInContext()).toContain("new.ts")
|
||||
})
|
||||
})
|
||||
|
||||
describe("updateSession", () => {
|
||||
it("should update session with current context state", () => {
|
||||
const session = new Session("test", "project")
|
||||
|
||||
manager.addToContext("test.ts", 1000)
|
||||
manager.updateSession(session)
|
||||
|
||||
expect(session.context.filesInContext).toContain("test.ts")
|
||||
expect(session.context.tokenUsage).toBeCloseTo(1000 / CONTEXT_SIZE)
|
||||
})
|
||||
|
||||
it("should set needsCompression flag", () => {
|
||||
const session = new Session("test", "project")
|
||||
|
||||
manager.addToContext("large.ts", CONTEXT_SIZE * 0.9)
|
||||
manager.updateSession(session)
|
||||
|
||||
expect(session.context.needsCompression).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("compress", () => {
|
||||
let mockLLM: ILLMClient
|
||||
let session: Session
|
||||
|
||||
beforeEach(() => {
|
||||
mockLLM = {
|
||||
chat: vi.fn().mockResolvedValue({
|
||||
content: "Summary of previous conversation",
|
||||
toolCalls: [],
|
||||
tokens: 50,
|
||||
timeMs: 100,
|
||||
truncated: false,
|
||||
stopReason: "end",
|
||||
} as LLMResponse),
|
||||
countTokens: vi.fn().mockResolvedValue(10),
|
||||
isAvailable: vi.fn().mockResolvedValue(true),
|
||||
getModelName: vi.fn().mockReturnValue("test-model"),
|
||||
getContextWindowSize: vi.fn().mockReturnValue(CONTEXT_SIZE),
|
||||
pullModel: vi.fn().mockResolvedValue(undefined),
|
||||
abort: vi.fn(),
|
||||
}
|
||||
|
||||
session = new Session("test", "project")
|
||||
})
|
||||
|
||||
it("should not compress when history is short", async () => {
|
||||
for (let i = 0; i < 5; i++) {
|
||||
session.addMessage(createUserMessage(`Message ${String(i)}`))
|
||||
}
|
||||
|
||||
const result = await manager.compress(session, mockLLM)
|
||||
|
||||
expect(result.compressed).toBe(false)
|
||||
expect(result.removedMessages).toBe(0)
|
||||
})
|
||||
|
||||
it("should compress when history is long enough", async () => {
|
||||
for (let i = 0; i < 15; i++) {
|
||||
session.addMessage(createUserMessage(`Message ${String(i)}`))
|
||||
session.addMessage(createAssistantMessage(`Response ${String(i)}`))
|
||||
}
|
||||
manager.addToContext("test.ts", 10000)
|
||||
|
||||
const result = await manager.compress(session, mockLLM)
|
||||
|
||||
expect(result.compressed).toBe(true)
|
||||
expect(result.removedMessages).toBeGreaterThan(0)
|
||||
expect(result.summary).toBeDefined()
|
||||
})
|
||||
|
||||
it("should keep recent messages after compression", async () => {
|
||||
for (let i = 0; i < 15; i++) {
|
||||
session.addMessage(createUserMessage(`Message ${String(i)}`))
|
||||
}
|
||||
|
||||
await manager.compress(session, mockLLM)
|
||||
|
||||
expect(session.history.length).toBeLessThan(15)
|
||||
expect(session.history[session.history.length - 1].content).toContain("Message 14")
|
||||
})
|
||||
|
||||
it("should add summary as system message", async () => {
|
||||
for (let i = 0; i < 15; i++) {
|
||||
session.addMessage(createUserMessage(`Message ${String(i)}`))
|
||||
}
|
||||
|
||||
await manager.compress(session, mockLLM)
|
||||
|
||||
expect(session.history[0].role).toBe("system")
|
||||
expect(session.history[0].content).toContain("Summary")
|
||||
})
|
||||
})
|
||||
|
||||
describe("createInitialState", () => {
|
||||
it("should create empty initial state", () => {
|
||||
const state = ContextManager.createInitialState()
|
||||
|
||||
expect(state.filesInContext).toEqual([])
|
||||
expect(state.tokenUsage).toBe(0)
|
||||
expect(state.needsCompression).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,421 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import { HandleMessage } from "../../../../src/application/use-cases/HandleMessage.js"
|
||||
import type { IStorage } from "../../../../src/domain/services/IStorage.js"
|
||||
import type { ISessionStorage } from "../../../../src/domain/services/ISessionStorage.js"
|
||||
import type { ILLMClient, LLMResponse } from "../../../../src/domain/services/ILLMClient.js"
|
||||
import type { IToolRegistry } from "../../../../src/application/interfaces/IToolRegistry.js"
|
||||
import type { ITool, ToolContext } from "../../../../src/domain/services/ITool.js"
|
||||
import { Session } from "../../../../src/domain/entities/Session.js"
|
||||
import { createSuccessResult } from "../../../../src/domain/value-objects/ToolResult.js"
|
||||
|
||||
describe("HandleMessage", () => {
|
||||
let useCase: HandleMessage
|
||||
let mockStorage: IStorage
|
||||
let mockSessionStorage: ISessionStorage
|
||||
let mockLLM: ILLMClient
|
||||
let mockTools: IToolRegistry
|
||||
let session: Session
|
||||
|
||||
const createMockLLMResponse = (content: string, toolCalls = false): LLMResponse => ({
|
||||
content,
|
||||
toolCalls: [],
|
||||
tokens: 100,
|
||||
timeMs: 50,
|
||||
truncated: false,
|
||||
stopReason: toolCalls ? "tool_use" : "end",
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
mockStorage = {
|
||||
getFile: vi.fn().mockResolvedValue(null),
|
||||
setFile: vi.fn().mockResolvedValue(undefined),
|
||||
deleteFile: vi.fn().mockResolvedValue(undefined),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn().mockResolvedValue(null),
|
||||
setAST: vi.fn().mockResolvedValue(undefined),
|
||||
deleteAST: vi.fn().mockResolvedValue(undefined),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn().mockResolvedValue(null),
|
||||
setMeta: vi.fn().mockResolvedValue(undefined),
|
||||
deleteMeta: vi.fn().mockResolvedValue(undefined),
|
||||
getAllMetas: vi.fn().mockResolvedValue(new Map()),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn().mockResolvedValue(undefined),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn().mockResolvedValue(undefined),
|
||||
getProjectConfig: vi.fn().mockResolvedValue(null),
|
||||
setProjectConfig: vi.fn().mockResolvedValue(undefined),
|
||||
connect: vi.fn().mockResolvedValue(undefined),
|
||||
disconnect: vi.fn().mockResolvedValue(undefined),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn().mockResolvedValue(undefined),
|
||||
}
|
||||
|
||||
mockSessionStorage = {
|
||||
saveSession: vi.fn().mockResolvedValue(undefined),
|
||||
loadSession: vi.fn().mockResolvedValue(null),
|
||||
deleteSession: vi.fn().mockResolvedValue(undefined),
|
||||
listSessions: vi.fn().mockResolvedValue([]),
|
||||
getLatestSession: vi.fn().mockResolvedValue(null),
|
||||
sessionExists: vi.fn().mockResolvedValue(false),
|
||||
pushUndoEntry: vi.fn().mockResolvedValue(undefined),
|
||||
popUndoEntry: vi.fn().mockResolvedValue(null),
|
||||
getUndoStack: vi.fn().mockResolvedValue([]),
|
||||
touchSession: vi.fn().mockResolvedValue(undefined),
|
||||
clearAllSessions: vi.fn().mockResolvedValue(undefined),
|
||||
}
|
||||
|
||||
mockLLM = {
|
||||
chat: vi.fn().mockResolvedValue(createMockLLMResponse("Hello!")),
|
||||
countTokens: vi.fn().mockResolvedValue(10),
|
||||
isAvailable: vi.fn().mockResolvedValue(true),
|
||||
getModelName: vi.fn().mockReturnValue("test-model"),
|
||||
getContextWindowSize: vi.fn().mockReturnValue(128_000),
|
||||
pullModel: vi.fn().mockResolvedValue(undefined),
|
||||
abort: vi.fn(),
|
||||
}
|
||||
|
||||
mockTools = {
|
||||
register: vi.fn(),
|
||||
get: vi.fn().mockReturnValue(undefined),
|
||||
getAll: vi.fn().mockReturnValue([]),
|
||||
getByCategory: vi.fn().mockReturnValue([]),
|
||||
has: vi.fn().mockReturnValue(false),
|
||||
execute: vi.fn(),
|
||||
getToolDefinitions: vi.fn().mockReturnValue([]),
|
||||
}
|
||||
|
||||
session = new Session("test-session", "test-project")
|
||||
useCase = new HandleMessage(mockStorage, mockSessionStorage, mockLLM, mockTools, "/project")
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should add user message to session history", async () => {
|
||||
await useCase.execute(session, "Hello, assistant!")
|
||||
|
||||
expect(session.history.length).toBeGreaterThan(0)
|
||||
expect(session.history[0].role).toBe("user")
|
||||
expect(session.history[0].content).toBe("Hello, assistant!")
|
||||
})
|
||||
|
||||
it("should add user input to input history", async () => {
|
||||
await useCase.execute(session, "Test command")
|
||||
|
||||
expect(session.inputHistory).toContain("Test command")
|
||||
})
|
||||
|
||||
it("should save session after user message", async () => {
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
expect(mockSessionStorage.saveSession).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should send messages to LLM", async () => {
|
||||
await useCase.execute(session, "What is 2+2?")
|
||||
|
||||
expect(mockLLM.chat).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should add assistant response to history", async () => {
|
||||
vi.mocked(mockLLM.chat).mockResolvedValue(createMockLLMResponse("The answer is 4!"))
|
||||
|
||||
await useCase.execute(session, "What is 2+2?")
|
||||
|
||||
const assistantMessages = session.history.filter((m) => m.role === "assistant")
|
||||
expect(assistantMessages.length).toBeGreaterThan(0)
|
||||
expect(assistantMessages[0].content).toBe("The answer is 4!")
|
||||
})
|
||||
|
||||
it("should not add empty user messages", async () => {
|
||||
await useCase.execute(session, " ")
|
||||
|
||||
const userMessages = session.history.filter((m) => m.role === "user")
|
||||
expect(userMessages.length).toBe(0)
|
||||
})
|
||||
|
||||
it("should track token usage in message stats", async () => {
|
||||
vi.mocked(mockLLM.chat).mockResolvedValue({
|
||||
content: "Response",
|
||||
toolCalls: [],
|
||||
tokens: 150,
|
||||
timeMs: 200,
|
||||
truncated: false,
|
||||
stopReason: "end",
|
||||
})
|
||||
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
const assistantMessage = session.history.find((m) => m.role === "assistant")
|
||||
expect(assistantMessage?.stats?.tokens).toBe(150)
|
||||
expect(assistantMessage?.stats?.timeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool execution", () => {
|
||||
const mockTool: ITool = {
|
||||
name: "get_lines",
|
||||
description: "Get lines from file",
|
||||
parameters: [],
|
||||
requiresConfirmation: false,
|
||||
category: "read",
|
||||
validateParams: vi.fn().mockReturnValue(null),
|
||||
execute: vi.fn().mockResolvedValue(createSuccessResult("test", { lines: [] }, 10)),
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(mockTools.get).mockReturnValue(mockTool)
|
||||
})
|
||||
|
||||
it("should execute tools when LLM returns tool calls", async () => {
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="get_lines"><path>test.ts</path></tool_call>',
|
||||
true,
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Done!"))
|
||||
|
||||
await useCase.execute(session, "Show me test.ts")
|
||||
|
||||
expect(mockTool.execute).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should add tool results to session", async () => {
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="get_lines"><path>test.ts</path></tool_call>',
|
||||
true,
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Done!"))
|
||||
|
||||
await useCase.execute(session, "Show me test.ts")
|
||||
|
||||
const toolMessages = session.history.filter((m) => m.role === "tool")
|
||||
expect(toolMessages.length).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("should return error for unknown tools", async () => {
|
||||
vi.mocked(mockTools.get).mockReturnValue(undefined)
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="unknown_tool"><param>value</param></tool_call>',
|
||||
true,
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Sorry, that didn't work"))
|
||||
|
||||
await useCase.execute(session, "Do something")
|
||||
|
||||
const toolMessages = session.history.filter((m) => m.role === "tool")
|
||||
expect(toolMessages[0].content).toContain("Unknown tool")
|
||||
})
|
||||
|
||||
it("should stop after max tool calls exceeded", async () => {
|
||||
useCase.setOptions({ maxToolCalls: 2 })
|
||||
|
||||
vi.mocked(mockLLM.chat).mockResolvedValue(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="get_lines"><path>a.ts</path></tool_call>' +
|
||||
'<tool_call name="get_lines"><path>b.ts</path></tool_call>' +
|
||||
'<tool_call name="get_lines"><path>c.ts</path></tool_call>',
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
await useCase.execute(session, "Show many files")
|
||||
|
||||
const systemMessages = session.history.filter((m) => m.role === "system")
|
||||
const maxExceeded = systemMessages.some((m) => m.content.includes("Maximum tool calls"))
|
||||
expect(maxExceeded).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("events", () => {
|
||||
it("should emit message events", async () => {
|
||||
const onMessage = vi.fn()
|
||||
useCase.setEvents({ onMessage })
|
||||
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
expect(onMessage).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should emit status changes", async () => {
|
||||
const onStatusChange = vi.fn()
|
||||
useCase.setEvents({ onStatusChange })
|
||||
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
expect(onStatusChange).toHaveBeenCalledWith("thinking")
|
||||
expect(onStatusChange).toHaveBeenCalledWith("ready")
|
||||
})
|
||||
|
||||
it("should emit tool call events", async () => {
|
||||
const onToolCall = vi.fn()
|
||||
useCase.setEvents({ onToolCall })
|
||||
|
||||
const mockTool: ITool = {
|
||||
name: "get_lines",
|
||||
description: "Test",
|
||||
parameters: [],
|
||||
requiresConfirmation: false,
|
||||
category: "read",
|
||||
validateParams: vi.fn().mockReturnValue(null),
|
||||
execute: vi.fn().mockResolvedValue(createSuccessResult("test", {}, 10)),
|
||||
}
|
||||
vi.mocked(mockTools.get).mockReturnValue(mockTool)
|
||||
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="get_lines"><path>test.ts</path></tool_call>',
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Done"))
|
||||
|
||||
await useCase.execute(session, "Show file")
|
||||
|
||||
expect(onToolCall).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("confirmation handling", () => {
|
||||
const mockEditTool: ITool = {
|
||||
name: "edit_lines",
|
||||
description: "Edit lines",
|
||||
parameters: [],
|
||||
requiresConfirmation: true,
|
||||
category: "edit",
|
||||
validateParams: vi.fn().mockReturnValue(null),
|
||||
execute: vi
|
||||
.fn()
|
||||
.mockImplementation(async (_params: Record<string, unknown>, ctx: ToolContext) => {
|
||||
const confirmed = await ctx.requestConfirmation("Apply edit?", {
|
||||
filePath: "test.ts",
|
||||
oldLines: ["old"],
|
||||
newLines: ["new"],
|
||||
startLine: 1,
|
||||
})
|
||||
if (!confirmed) {
|
||||
return createSuccessResult("test", { cancelled: true }, 10)
|
||||
}
|
||||
return createSuccessResult("test", { applied: true }, 10)
|
||||
}),
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(mockTools.get).mockReturnValue(mockEditTool)
|
||||
})
|
||||
|
||||
it("should auto-apply when autoApply option is true", async () => {
|
||||
useCase.setOptions({ autoApply: true })
|
||||
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="edit_lines"><path>test.ts</path></tool_call>',
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Done"))
|
||||
|
||||
await useCase.execute(session, "Edit file")
|
||||
|
||||
expect(mockEditTool.execute).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should ask for confirmation via callback", async () => {
|
||||
const onConfirmation = vi.fn().mockResolvedValue(true)
|
||||
useCase.setEvents({ onConfirmation })
|
||||
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="edit_lines"><path>test.ts</path></tool_call>',
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Done"))
|
||||
|
||||
await useCase.execute(session, "Edit file")
|
||||
|
||||
expect(onConfirmation).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should create undo entry on confirmation", async () => {
|
||||
const onUndoEntry = vi.fn()
|
||||
useCase.setEvents({
|
||||
onConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onUndoEntry,
|
||||
})
|
||||
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockResolvedValueOnce(
|
||||
createMockLLMResponse(
|
||||
'<tool_call name="edit_lines"><path>test.ts</path></tool_call>',
|
||||
),
|
||||
)
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Done"))
|
||||
|
||||
await useCase.execute(session, "Edit file")
|
||||
|
||||
expect(onUndoEntry).toHaveBeenCalled()
|
||||
expect(mockSessionStorage.pushUndoEntry).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("abort", () => {
|
||||
it("should stop processing when aborted", async () => {
|
||||
vi.mocked(mockLLM.chat).mockImplementation(async () => {
|
||||
await new Promise((resolve) => setTimeout(resolve, 100))
|
||||
return createMockLLMResponse("Response")
|
||||
})
|
||||
|
||||
const promise = useCase.execute(session, "Hello")
|
||||
|
||||
setTimeout(() => useCase.abort(), 10)
|
||||
|
||||
await promise
|
||||
|
||||
expect(mockLLM.abort).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("error handling", () => {
|
||||
it("should handle LLM errors gracefully", async () => {
|
||||
vi.mocked(mockLLM.chat).mockRejectedValue(new Error("LLM unavailable"))
|
||||
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
const systemMessages = session.history.filter((m) => m.role === "system")
|
||||
expect(systemMessages.some((m) => m.content.includes("Error"))).toBe(true)
|
||||
})
|
||||
|
||||
it("should emit error status on LLM failure", async () => {
|
||||
const onStatusChange = vi.fn()
|
||||
useCase.setEvents({ onStatusChange })
|
||||
|
||||
vi.mocked(mockLLM.chat).mockRejectedValue(new Error("LLM error"))
|
||||
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
expect(onStatusChange).toHaveBeenCalledWith("error")
|
||||
})
|
||||
|
||||
it("should allow retry on error", async () => {
|
||||
const onError = vi.fn().mockResolvedValue("retry")
|
||||
useCase.setEvents({ onError })
|
||||
|
||||
vi.mocked(mockLLM.chat)
|
||||
.mockRejectedValueOnce(new Error("Temporary error"))
|
||||
.mockResolvedValueOnce(createMockLLMResponse("Success!"))
|
||||
|
||||
await useCase.execute(session, "Hello")
|
||||
|
||||
expect(onError).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,112 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import { StartSession } from "../../../../src/application/use-cases/StartSession.js"
|
||||
import type { ISessionStorage } from "../../../../src/domain/services/ISessionStorage.js"
|
||||
import { Session } from "../../../../src/domain/entities/Session.js"
|
||||
|
||||
describe("StartSession", () => {
|
||||
let useCase: StartSession
|
||||
let mockSessionStorage: ISessionStorage
|
||||
|
||||
beforeEach(() => {
|
||||
mockSessionStorage = {
|
||||
saveSession: vi.fn().mockResolvedValue(undefined),
|
||||
loadSession: vi.fn().mockResolvedValue(null),
|
||||
deleteSession: vi.fn().mockResolvedValue(undefined),
|
||||
listSessions: vi.fn().mockResolvedValue([]),
|
||||
getLatestSession: vi.fn().mockResolvedValue(null),
|
||||
sessionExists: vi.fn().mockResolvedValue(false),
|
||||
pushUndoEntry: vi.fn().mockResolvedValue(undefined),
|
||||
popUndoEntry: vi.fn().mockResolvedValue(null),
|
||||
getUndoStack: vi.fn().mockResolvedValue([]),
|
||||
touchSession: vi.fn().mockResolvedValue(undefined),
|
||||
clearAllSessions: vi.fn().mockResolvedValue(undefined),
|
||||
}
|
||||
|
||||
useCase = new StartSession(mockSessionStorage)
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should create new session when no existing session", async () => {
|
||||
const result = await useCase.execute("test-project")
|
||||
|
||||
expect(result.isNew).toBe(true)
|
||||
expect(result.session.projectName).toBe("test-project")
|
||||
expect(mockSessionStorage.saveSession).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should return latest session when one exists", async () => {
|
||||
const existingSession = new Session("existing-id", "test-project")
|
||||
vi.mocked(mockSessionStorage.getLatestSession).mockResolvedValue(existingSession)
|
||||
|
||||
const result = await useCase.execute("test-project")
|
||||
|
||||
expect(result.isNew).toBe(false)
|
||||
expect(result.session.id).toBe("existing-id")
|
||||
expect(mockSessionStorage.touchSession).toHaveBeenCalledWith("existing-id")
|
||||
})
|
||||
|
||||
it("should load specific session by ID", async () => {
|
||||
const specificSession = new Session("specific-id", "test-project")
|
||||
vi.mocked(mockSessionStorage.loadSession).mockResolvedValue(specificSession)
|
||||
|
||||
const result = await useCase.execute("test-project", { sessionId: "specific-id" })
|
||||
|
||||
expect(result.isNew).toBe(false)
|
||||
expect(result.session.id).toBe("specific-id")
|
||||
expect(mockSessionStorage.loadSession).toHaveBeenCalledWith("specific-id")
|
||||
})
|
||||
|
||||
it("should create new session when specified session not found", async () => {
|
||||
vi.mocked(mockSessionStorage.loadSession).mockResolvedValue(null)
|
||||
|
||||
const result = await useCase.execute("test-project", { sessionId: "non-existent" })
|
||||
|
||||
expect(result.isNew).toBe(true)
|
||||
expect(mockSessionStorage.saveSession).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should force new session when forceNew is true", async () => {
|
||||
const existingSession = new Session("existing-id", "test-project")
|
||||
vi.mocked(mockSessionStorage.getLatestSession).mockResolvedValue(existingSession)
|
||||
|
||||
const result = await useCase.execute("test-project", { forceNew: true })
|
||||
|
||||
expect(result.isNew).toBe(true)
|
||||
expect(result.session.id).not.toBe("existing-id")
|
||||
expect(mockSessionStorage.saveSession).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should generate unique session IDs", async () => {
|
||||
const result1 = await useCase.execute("test-project", { forceNew: true })
|
||||
const result2 = await useCase.execute("test-project", { forceNew: true })
|
||||
|
||||
expect(result1.session.id).not.toBe(result2.session.id)
|
||||
})
|
||||
|
||||
it("should set correct project name on new session", async () => {
|
||||
const result = await useCase.execute("my-special-project")
|
||||
|
||||
expect(result.session.projectName).toBe("my-special-project")
|
||||
})
|
||||
|
||||
it("should initialize new session with empty history", async () => {
|
||||
const result = await useCase.execute("test-project")
|
||||
|
||||
expect(result.session.history).toEqual([])
|
||||
})
|
||||
|
||||
it("should initialize new session with empty undo stack", async () => {
|
||||
const result = await useCase.execute("test-project")
|
||||
|
||||
expect(result.session.undoStack).toEqual([])
|
||||
})
|
||||
|
||||
it("should initialize new session with zero stats", async () => {
|
||||
const result = await useCase.execute("test-project")
|
||||
|
||||
expect(result.session.stats.totalTokens).toBe(0)
|
||||
expect(result.session.stats.toolCalls).toBe(0)
|
||||
expect(result.session.stats.editsApplied).toBe(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,234 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"
|
||||
import { promises as fs } from "node:fs"
|
||||
import { UndoChange } from "../../../../src/application/use-cases/UndoChange.js"
|
||||
import type { ISessionStorage } from "../../../../src/domain/services/ISessionStorage.js"
|
||||
import type { IStorage } from "../../../../src/domain/services/IStorage.js"
|
||||
import { Session } from "../../../../src/domain/entities/Session.js"
|
||||
import type { UndoEntry } from "../../../../src/domain/value-objects/UndoEntry.js"
|
||||
|
||||
vi.mock("node:fs", () => ({
|
||||
promises: {
|
||||
readFile: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
stat: vi.fn(),
|
||||
},
|
||||
}))
|
||||
|
||||
describe("UndoChange", () => {
|
||||
let useCase: UndoChange
|
||||
let mockSessionStorage: ISessionStorage
|
||||
let mockStorage: IStorage
|
||||
let session: Session
|
||||
|
||||
const createUndoEntry = (overrides: Partial<UndoEntry> = {}): UndoEntry => ({
|
||||
id: "undo-1",
|
||||
timestamp: Date.now(),
|
||||
filePath: "/project/test.ts",
|
||||
previousContent: ["const a = 1"],
|
||||
newContent: ["const a = 2"],
|
||||
description: "Edit test.ts",
|
||||
...overrides,
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
mockSessionStorage = {
|
||||
saveSession: vi.fn().mockResolvedValue(undefined),
|
||||
loadSession: vi.fn().mockResolvedValue(null),
|
||||
deleteSession: vi.fn().mockResolvedValue(undefined),
|
||||
listSessions: vi.fn().mockResolvedValue([]),
|
||||
getLatestSession: vi.fn().mockResolvedValue(null),
|
||||
sessionExists: vi.fn().mockResolvedValue(false),
|
||||
pushUndoEntry: vi.fn().mockResolvedValue(undefined),
|
||||
popUndoEntry: vi.fn().mockResolvedValue(null),
|
||||
getUndoStack: vi.fn().mockResolvedValue([]),
|
||||
touchSession: vi.fn().mockResolvedValue(undefined),
|
||||
clearAllSessions: vi.fn().mockResolvedValue(undefined),
|
||||
}
|
||||
|
||||
mockStorage = {
|
||||
getFile: vi.fn().mockResolvedValue(null),
|
||||
setFile: vi.fn().mockResolvedValue(undefined),
|
||||
deleteFile: vi.fn().mockResolvedValue(undefined),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn().mockResolvedValue(null),
|
||||
setAST: vi.fn().mockResolvedValue(undefined),
|
||||
deleteAST: vi.fn().mockResolvedValue(undefined),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn().mockResolvedValue(null),
|
||||
setMeta: vi.fn().mockResolvedValue(undefined),
|
||||
deleteMeta: vi.fn().mockResolvedValue(undefined),
|
||||
getAllMetas: vi.fn().mockResolvedValue(new Map()),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn().mockResolvedValue(undefined),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn().mockResolvedValue(undefined),
|
||||
getProjectConfig: vi.fn().mockResolvedValue(null),
|
||||
setProjectConfig: vi.fn().mockResolvedValue(undefined),
|
||||
connect: vi.fn().mockResolvedValue(undefined),
|
||||
disconnect: vi.fn().mockResolvedValue(undefined),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn().mockResolvedValue(undefined),
|
||||
}
|
||||
|
||||
session = new Session("test-session", "test-project")
|
||||
session.stats.editsApplied = 1
|
||||
|
||||
useCase = new UndoChange(mockSessionStorage, mockStorage)
|
||||
|
||||
vi.mocked(fs.stat).mockResolvedValue({
|
||||
size: 100,
|
||||
mtimeMs: Date.now(),
|
||||
} as unknown as Awaited<ReturnType<typeof fs.stat>>)
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should return error when no undo entries", async () => {
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(null)
|
||||
|
||||
const result = await useCase.execute(session)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("No changes to undo")
|
||||
})
|
||||
|
||||
it("should restore previous content when file matches", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
vi.mocked(fs.readFile).mockResolvedValue("const a = 2")
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined)
|
||||
|
||||
session.addUndoEntry(entry)
|
||||
|
||||
const result = await useCase.execute(session)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
expect(result.entry).toBe(entry)
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(entry.filePath, "const a = 1", "utf-8")
|
||||
})
|
||||
|
||||
it("should update storage after undo", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
vi.mocked(fs.readFile).mockResolvedValue("const a = 2")
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined)
|
||||
|
||||
session.addUndoEntry(entry)
|
||||
|
||||
await useCase.execute(session)
|
||||
|
||||
expect(mockStorage.setFile).toHaveBeenCalledWith(
|
||||
entry.filePath,
|
||||
expect.objectContaining({
|
||||
lines: entry.previousContent,
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should decrement editsApplied counter", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
vi.mocked(fs.readFile).mockResolvedValue("const a = 2")
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined)
|
||||
|
||||
session.addUndoEntry(entry)
|
||||
const initialEdits = session.stats.editsApplied
|
||||
|
||||
await useCase.execute(session)
|
||||
|
||||
expect(session.stats.editsApplied).toBe(initialEdits - 1)
|
||||
})
|
||||
|
||||
it("should fail when file has been modified externally", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
vi.mocked(fs.readFile).mockResolvedValue("const a = 999")
|
||||
|
||||
const result = await useCase.execute(session)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("modified since the change")
|
||||
})
|
||||
|
||||
it("should re-push undo entry on conflict", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
vi.mocked(fs.readFile).mockResolvedValue("const a = 999")
|
||||
|
||||
await useCase.execute(session)
|
||||
|
||||
expect(mockSessionStorage.pushUndoEntry).toHaveBeenCalledWith(session.id, entry)
|
||||
})
|
||||
|
||||
it("should handle empty file for undo", async () => {
|
||||
const entry = createUndoEntry({
|
||||
previousContent: [],
|
||||
newContent: ["new content"],
|
||||
})
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
vi.mocked(fs.readFile).mockResolvedValue("new content")
|
||||
vi.mocked(fs.writeFile).mockResolvedValue(undefined)
|
||||
|
||||
session.addUndoEntry(entry)
|
||||
|
||||
const result = await useCase.execute(session)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
expect(fs.writeFile).toHaveBeenCalledWith(entry.filePath, "", "utf-8")
|
||||
})
|
||||
|
||||
it("should handle file not found during undo", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.popUndoEntry).mockResolvedValue(entry)
|
||||
const error = new Error("ENOENT") as NodeJS.ErrnoException
|
||||
error.code = "ENOENT"
|
||||
vi.mocked(fs.readFile).mockRejectedValue(error)
|
||||
|
||||
const result = await useCase.execute(session)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("canUndo", () => {
|
||||
it("should return false when stack is empty", async () => {
|
||||
vi.mocked(mockSessionStorage.getUndoStack).mockResolvedValue([])
|
||||
|
||||
const result = await useCase.canUndo(session)
|
||||
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
|
||||
it("should return true when stack has entries", async () => {
|
||||
vi.mocked(mockSessionStorage.getUndoStack).mockResolvedValue([createUndoEntry()])
|
||||
|
||||
const result = await useCase.canUndo(session)
|
||||
|
||||
expect(result).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("peekUndoEntry", () => {
|
||||
it("should return null when stack is empty", async () => {
|
||||
vi.mocked(mockSessionStorage.getUndoStack).mockResolvedValue([])
|
||||
|
||||
const result = await useCase.peekUndoEntry(session)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it("should return last entry without removing", async () => {
|
||||
const entry = createUndoEntry()
|
||||
vi.mocked(mockSessionStorage.getUndoStack).mockResolvedValue([entry])
|
||||
|
||||
const result = await useCase.peekUndoEntry(session)
|
||||
|
||||
expect(result).toBe(entry)
|
||||
expect(mockSessionStorage.popUndoEntry).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -63,6 +63,13 @@ describe("ChatMessage", () => {
|
||||
|
||||
expect(msg.content).toContain("[2] Error: Not found")
|
||||
})
|
||||
|
||||
it("should handle error result without error message", () => {
|
||||
const results = [{ callId: "3", success: false, executionTimeMs: 5 }]
|
||||
const msg = createToolMessage(results)
|
||||
|
||||
expect(msg.content).toContain("[3] Error: Unknown error")
|
||||
})
|
||||
})
|
||||
|
||||
describe("createSystemMessage", () => {
|
||||
|
||||
@@ -301,6 +301,66 @@ describe("ASTParser", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("import string formats", () => {
|
||||
it("should handle single-quoted imports", () => {
|
||||
const code = `import { foo } from './module'`
|
||||
const ast = parser.parse(code, "ts")
|
||||
|
||||
expect(ast.imports).toHaveLength(1)
|
||||
expect(ast.imports[0].from).toBe("./module")
|
||||
})
|
||||
|
||||
it("should handle double-quoted imports", () => {
|
||||
const code = `import { bar } from "./other"`
|
||||
const ast = parser.parse(code, "ts")
|
||||
|
||||
expect(ast.imports).toHaveLength(1)
|
||||
expect(ast.imports[0].from).toBe("./other")
|
||||
})
|
||||
})
|
||||
|
||||
describe("parameter types", () => {
|
||||
it("should handle simple identifier parameters", () => {
|
||||
const code = `const fn = (x) => x * 2`
|
||||
const ast = parser.parse(code, "ts")
|
||||
|
||||
expect(ast.functions.length).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle optional parameters with defaults", () => {
|
||||
const code = `function greet(name: string = "World"): string { return name }`
|
||||
const ast = parser.parse(code, "ts")
|
||||
|
||||
expect(ast.functions).toHaveLength(1)
|
||||
const fn = ast.functions[0]
|
||||
expect(fn.params.some((p) => p.hasDefault)).toBe(true)
|
||||
})
|
||||
|
||||
it("should handle arrow function with untyped params", () => {
|
||||
const code = `const add = (a, b) => a + b`
|
||||
const ast = parser.parse(code, "ts")
|
||||
|
||||
expect(ast.functions.length).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle multiple parameter types", () => {
|
||||
const code = `
|
||||
function mix(
|
||||
required: string,
|
||||
optional?: number,
|
||||
withDefault: boolean = true
|
||||
) {}
|
||||
`
|
||||
const ast = parser.parse(code, "ts")
|
||||
|
||||
expect(ast.functions).toHaveLength(1)
|
||||
const fn = ast.functions[0]
|
||||
expect(fn.params).toHaveLength(3)
|
||||
expect(fn.params.some((p) => p.optional)).toBe(true)
|
||||
expect(fn.params.some((p) => p.hasDefault)).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("complex file", () => {
|
||||
it("should parse complex TypeScript file", () => {
|
||||
const code = `
|
||||
|
||||
@@ -212,6 +212,32 @@ describe("FileScanner", () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe("empty file handling", () => {
|
||||
it("should consider empty files as text files", async () => {
|
||||
const emptyFile = path.join(FIXTURES_DIR, "empty-file.ts")
|
||||
await fs.writeFile(emptyFile, "")
|
||||
|
||||
try {
|
||||
const isText = await FileScanner.isTextFile(emptyFile)
|
||||
expect(isText).toBe(true)
|
||||
} finally {
|
||||
await fs.unlink(emptyFile)
|
||||
}
|
||||
})
|
||||
|
||||
it("should read empty file content", async () => {
|
||||
const emptyFile = path.join(FIXTURES_DIR, "empty-content.ts")
|
||||
await fs.writeFile(emptyFile, "")
|
||||
|
||||
try {
|
||||
const content = await FileScanner.readFileContent(emptyFile)
|
||||
expect(content).toBe("")
|
||||
} finally {
|
||||
await fs.unlink(emptyFile)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("empty directory handling", () => {
|
||||
let emptyDir: string
|
||||
|
||||
|
||||
@@ -605,4 +605,44 @@ export type ServiceResult<T> = { success: true; data: T } | { success: false; er
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("jsx to tsx resolution", () => {
|
||||
it("should resolve .jsx imports to .tsx files", () => {
|
||||
const mainCode = `import { Button } from "./Button.jsx"`
|
||||
const buttonCode = `export function Button() { return null }`
|
||||
|
||||
const asts = new Map<string, FileAST>([
|
||||
["/project/src/main.ts", parser.parse(mainCode, "ts")],
|
||||
["/project/src/Button.tsx", parser.parse(buttonCode, "tsx")],
|
||||
])
|
||||
|
||||
const graph = builder.buildDepsGraph(asts)
|
||||
|
||||
expect(graph.imports.get("/project/src/main.ts")).toContain("/project/src/Button.tsx")
|
||||
})
|
||||
})
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle empty deps graph for circular dependencies", () => {
|
||||
const graph = {
|
||||
imports: new Map<string, string[]>(),
|
||||
importedBy: new Map<string, string[]>(),
|
||||
}
|
||||
|
||||
const cycles = builder.findCircularDependencies(graph)
|
||||
expect(cycles).toEqual([])
|
||||
})
|
||||
|
||||
it("should handle single file with no imports", () => {
|
||||
const code = `export const x = 1`
|
||||
const asts = new Map<string, FileAST>([
|
||||
["/project/src/single.ts", parser.parse(code, "ts")],
|
||||
])
|
||||
|
||||
const graph = builder.buildDepsGraph(asts)
|
||||
const cycles = builder.findCircularDependencies(graph)
|
||||
|
||||
expect(cycles).toEqual([])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -544,6 +544,44 @@ const b = 2`
|
||||
})
|
||||
})
|
||||
|
||||
describe("dependency resolution with different extensions", () => {
|
||||
it("should resolve imports from index files", () => {
|
||||
const content = `import { utils } from "./utils/index"`
|
||||
const ast = parser.parse(content, "ts")
|
||||
const allASTs = new Map<string, FileAST>()
|
||||
allASTs.set("/project/src/main.ts", ast)
|
||||
allASTs.set("/project/src/utils/index.ts", createEmptyFileAST())
|
||||
|
||||
const meta = analyzer.analyze("/project/src/main.ts", ast, content, allASTs)
|
||||
|
||||
expect(meta.dependencies).toContain("/project/src/utils/index.ts")
|
||||
})
|
||||
|
||||
it("should convert .js extension to .ts when resolving", () => {
|
||||
const content = `import { helper } from "./helper.js"`
|
||||
const ast = parser.parse(content, "ts")
|
||||
const allASTs = new Map<string, FileAST>()
|
||||
allASTs.set("/project/src/main.ts", ast)
|
||||
allASTs.set("/project/src/helper.ts", createEmptyFileAST())
|
||||
|
||||
const meta = analyzer.analyze("/project/src/main.ts", ast, content, allASTs)
|
||||
|
||||
expect(meta.dependencies).toContain("/project/src/helper.ts")
|
||||
})
|
||||
|
||||
it("should convert .jsx extension to .tsx when resolving", () => {
|
||||
const content = `import { Button } from "./Button.jsx"`
|
||||
const ast = parser.parse(content, "ts")
|
||||
const allASTs = new Map<string, FileAST>()
|
||||
allASTs.set("/project/src/App.tsx", ast)
|
||||
allASTs.set("/project/src/Button.tsx", createEmptyFileAST())
|
||||
|
||||
const meta = analyzer.analyze("/project/src/App.tsx", ast, content, allASTs)
|
||||
|
||||
expect(meta.dependencies).toContain("/project/src/Button.tsx")
|
||||
})
|
||||
})
|
||||
|
||||
describe("analyze", () => {
|
||||
it("should produce complete FileMeta", () => {
|
||||
const content = `import { helper } from "./helper"
|
||||
|
||||
@@ -94,12 +94,70 @@ describe("Watchdog", () => {
|
||||
it("should return empty array when not watching", () => {
|
||||
expect(watchdog.getWatchedPaths()).toEqual([])
|
||||
})
|
||||
|
||||
it("should return paths when watching", async () => {
|
||||
const testFile = path.join(tempDir, "exists.ts")
|
||||
await fs.writeFile(testFile, "const x = 1")
|
||||
|
||||
watchdog.start(tempDir)
|
||||
await new Promise((resolve) => setTimeout(resolve, 200))
|
||||
|
||||
const paths = watchdog.getWatchedPaths()
|
||||
expect(Array.isArray(paths)).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("flushAll", () => {
|
||||
it("should not throw when no pending changes", () => {
|
||||
expect(() => watchdog.flushAll()).not.toThrow()
|
||||
})
|
||||
|
||||
it("should flush all pending changes", async () => {
|
||||
const events: FileChangeEvent[] = []
|
||||
watchdog.onFileChange((event) => events.push(event))
|
||||
watchdog.start(tempDir)
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 100))
|
||||
|
||||
const testFile = path.join(tempDir, "flush-test.ts")
|
||||
await fs.writeFile(testFile, "const x = 1")
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 20))
|
||||
|
||||
watchdog.flushAll()
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50))
|
||||
})
|
||||
})
|
||||
|
||||
describe("ignore patterns", () => {
|
||||
it("should handle glob patterns with wildcards", async () => {
|
||||
const customWatchdog = new Watchdog({
|
||||
debounceMs: 50,
|
||||
ignorePatterns: ["*.log", "**/*.tmp"],
|
||||
})
|
||||
|
||||
customWatchdog.start(tempDir)
|
||||
await new Promise((resolve) => setTimeout(resolve, 100))
|
||||
|
||||
expect(customWatchdog.isWatching()).toBe(true)
|
||||
|
||||
await customWatchdog.stop()
|
||||
})
|
||||
|
||||
it("should handle simple directory patterns", async () => {
|
||||
const customWatchdog = new Watchdog({
|
||||
debounceMs: 50,
|
||||
ignorePatterns: ["node_modules", "dist"],
|
||||
})
|
||||
|
||||
customWatchdog.start(tempDir)
|
||||
await new Promise((resolve) => setTimeout(resolve, 100))
|
||||
|
||||
expect(customWatchdog.isWatching()).toBe(true)
|
||||
|
||||
await customWatchdog.stop()
|
||||
})
|
||||
})
|
||||
|
||||
describe("file change detection", () => {
|
||||
|
||||
@@ -0,0 +1,488 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"
|
||||
import type { LLMConfig } from "../../../../src/shared/constants/config.js"
|
||||
import { IpuaroError } from "../../../../src/shared/errors/IpuaroError.js"
|
||||
import { createUserMessage } from "../../../../src/domain/value-objects/ChatMessage.js"
|
||||
|
||||
const mockChatResponse = {
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: "This is a test response.",
|
||||
tool_calls: undefined,
|
||||
},
|
||||
eval_count: 50,
|
||||
done_reason: "stop",
|
||||
}
|
||||
|
||||
const mockListResponse = {
|
||||
models: [
|
||||
{ name: "qwen2.5-coder:7b-instruct", size: 4000000000 },
|
||||
{ name: "llama2:latest", size: 3500000000 },
|
||||
],
|
||||
}
|
||||
|
||||
const mockOllamaInstance = {
|
||||
chat: vi.fn(),
|
||||
list: vi.fn(),
|
||||
pull: vi.fn(),
|
||||
}
|
||||
|
||||
vi.mock("ollama", () => {
|
||||
return {
|
||||
Ollama: vi.fn(() => mockOllamaInstance),
|
||||
}
|
||||
})
|
||||
|
||||
const { OllamaClient } = await import("../../../../src/infrastructure/llm/OllamaClient.js")
|
||||
|
||||
describe("OllamaClient", () => {
|
||||
const defaultConfig: LLMConfig = {
|
||||
model: "qwen2.5-coder:7b-instruct",
|
||||
contextWindow: 128000,
|
||||
temperature: 0.1,
|
||||
host: "http://localhost:11434",
|
||||
timeout: 120000,
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockOllamaInstance.chat.mockResolvedValue(mockChatResponse)
|
||||
mockOllamaInstance.list.mockResolvedValue(mockListResponse)
|
||||
mockOllamaInstance.pull.mockResolvedValue({})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should create instance with config", () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
expect(client).toBeDefined()
|
||||
expect(client.getModelName()).toBe("qwen2.5-coder:7b-instruct")
|
||||
expect(client.getContextWindowSize()).toBe(128000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("chat", () => {
|
||||
it("should send messages and return response", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [createUserMessage("Hello, world!")]
|
||||
|
||||
const response = await client.chat(messages)
|
||||
|
||||
expect(response.content).toBe("This is a test response.")
|
||||
expect(response.tokens).toBe(50)
|
||||
expect(response.stopReason).toBe("end")
|
||||
expect(response.truncated).toBe(false)
|
||||
})
|
||||
|
||||
it("should convert messages to Ollama format", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [createUserMessage("Hello")]
|
||||
|
||||
await client.chat(messages)
|
||||
|
||||
expect(mockOllamaInstance.chat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: "qwen2.5-coder:7b-instruct",
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
role: "user",
|
||||
content: "Hello",
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should pass tools when provided", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [createUserMessage("Read file")]
|
||||
const tools = [
|
||||
{
|
||||
name: "get_lines",
|
||||
description: "Get lines from file",
|
||||
parameters: [
|
||||
{
|
||||
name: "path",
|
||||
type: "string" as const,
|
||||
description: "File path",
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
await client.chat(messages, tools)
|
||||
|
||||
expect(mockOllamaInstance.chat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tools: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
type: "function",
|
||||
function: expect.objectContaining({
|
||||
name: "get_lines",
|
||||
}),
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should extract tool calls from response", async () => {
|
||||
mockOllamaInstance.chat.mockResolvedValue({
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: "",
|
||||
tool_calls: [
|
||||
{
|
||||
function: {
|
||||
name: "get_lines",
|
||||
arguments: { path: "src/index.ts" },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
eval_count: 30,
|
||||
})
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const response = await client.chat([createUserMessage("Read file")])
|
||||
|
||||
expect(response.toolCalls).toHaveLength(1)
|
||||
expect(response.toolCalls[0].name).toBe("get_lines")
|
||||
expect(response.toolCalls[0].params).toEqual({ path: "src/index.ts" })
|
||||
expect(response.stopReason).toBe("tool_use")
|
||||
})
|
||||
|
||||
it("should handle connection errors", async () => {
|
||||
mockOllamaInstance.chat.mockRejectedValue(new Error("fetch failed"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.chat([createUserMessage("Hello")])).rejects.toThrow(IpuaroError)
|
||||
})
|
||||
|
||||
it("should handle model not found errors", async () => {
|
||||
mockOllamaInstance.chat.mockRejectedValue(new Error("model not found"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.chat([createUserMessage("Hello")])).rejects.toThrow(/not found/)
|
||||
})
|
||||
})
|
||||
|
||||
describe("countTokens", () => {
|
||||
it("should estimate tokens for text", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const count = await client.countTokens("Hello, world!")
|
||||
|
||||
expect(count).toBeGreaterThan(0)
|
||||
expect(typeof count).toBe("number")
|
||||
})
|
||||
})
|
||||
|
||||
describe("isAvailable", () => {
|
||||
it("should return true when Ollama is available", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const available = await client.isAvailable()
|
||||
|
||||
expect(available).toBe(true)
|
||||
})
|
||||
|
||||
it("should return false when Ollama is not available", async () => {
|
||||
mockOllamaInstance.list.mockRejectedValue(new Error("Connection refused"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const available = await client.isAvailable()
|
||||
|
||||
expect(available).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("getModelName", () => {
|
||||
it("should return configured model name", () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
expect(client.getModelName()).toBe("qwen2.5-coder:7b-instruct")
|
||||
})
|
||||
})
|
||||
|
||||
describe("getContextWindowSize", () => {
|
||||
it("should return configured context window size", () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
expect(client.getContextWindowSize()).toBe(128000)
|
||||
})
|
||||
})
|
||||
|
||||
describe("pullModel", () => {
|
||||
it("should pull model successfully", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.pullModel("llama2")).resolves.toBeUndefined()
|
||||
expect(mockOllamaInstance.pull).toHaveBeenCalledWith({
|
||||
model: "llama2",
|
||||
stream: false,
|
||||
})
|
||||
})
|
||||
|
||||
it("should throw on pull failure", async () => {
|
||||
mockOllamaInstance.pull.mockRejectedValue(new Error("Network error"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.pullModel("llama2")).rejects.toThrow(IpuaroError)
|
||||
})
|
||||
})
|
||||
|
||||
describe("hasModel", () => {
|
||||
it("should return true for available model", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const has = await client.hasModel("qwen2.5-coder:7b-instruct")
|
||||
|
||||
expect(has).toBe(true)
|
||||
})
|
||||
|
||||
it("should return true for model prefix", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const has = await client.hasModel("llama2")
|
||||
|
||||
expect(has).toBe(true)
|
||||
})
|
||||
|
||||
it("should return false for missing model", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const has = await client.hasModel("unknown-model")
|
||||
|
||||
expect(has).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false when list fails", async () => {
|
||||
mockOllamaInstance.list.mockRejectedValue(new Error("Error"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const has = await client.hasModel("any-model")
|
||||
|
||||
expect(has).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("listModels", () => {
|
||||
it("should return list of model names", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
const models = await client.listModels()
|
||||
|
||||
expect(models).toContain("qwen2.5-coder:7b-instruct")
|
||||
expect(models).toContain("llama2:latest")
|
||||
})
|
||||
|
||||
it("should throw on list failure", async () => {
|
||||
mockOllamaInstance.list.mockRejectedValue(new Error("Network error"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.listModels()).rejects.toThrow(IpuaroError)
|
||||
})
|
||||
})
|
||||
|
||||
describe("abort", () => {
|
||||
it("should not throw when no request is in progress", () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
expect(() => client.abort()).not.toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
describe("message conversion", () => {
|
||||
it("should convert system messages", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [
|
||||
{
|
||||
role: "system" as const,
|
||||
content: "You are a helpful assistant",
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
]
|
||||
|
||||
await client.chat(messages)
|
||||
|
||||
expect(mockOllamaInstance.chat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
role: "system",
|
||||
content: "You are a helpful assistant",
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should convert tool result messages", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [
|
||||
{
|
||||
role: "tool" as const,
|
||||
content: '{"result": "success"}',
|
||||
timestamp: Date.now(),
|
||||
toolResults: [
|
||||
{ callId: "call_1", success: true, data: "success", executionTimeMs: 10 },
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
await client.chat(messages)
|
||||
|
||||
expect(mockOllamaInstance.chat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
role: "tool",
|
||||
content: '{"result": "success"}',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should convert assistant messages with tool calls", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [
|
||||
{
|
||||
role: "assistant" as const,
|
||||
content: "I will read the file",
|
||||
timestamp: Date.now(),
|
||||
toolCalls: [{ id: "call_1", name: "get_lines", params: { path: "test.ts" } }],
|
||||
},
|
||||
]
|
||||
|
||||
await client.chat(messages)
|
||||
|
||||
expect(mockOllamaInstance.chat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
role: "assistant",
|
||||
content: "I will read the file",
|
||||
tool_calls: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
function: expect.objectContaining({
|
||||
name: "get_lines",
|
||||
arguments: { path: "test.ts" },
|
||||
}),
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("response handling", () => {
|
||||
it("should estimate tokens when eval_count is undefined", async () => {
|
||||
mockOllamaInstance.chat.mockResolvedValue({
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: "Hello world response",
|
||||
tool_calls: undefined,
|
||||
},
|
||||
eval_count: undefined,
|
||||
done_reason: "stop",
|
||||
})
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const response = await client.chat([createUserMessage("Hello")])
|
||||
|
||||
expect(response.tokens).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("should return length stop reason", async () => {
|
||||
mockOllamaInstance.chat.mockResolvedValue({
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: "Truncated...",
|
||||
tool_calls: undefined,
|
||||
},
|
||||
eval_count: 100,
|
||||
done_reason: "length",
|
||||
})
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const response = await client.chat([createUserMessage("Hello")])
|
||||
|
||||
expect(response.stopReason).toBe("length")
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool parameter conversion", () => {
|
||||
it("should include enum values when present", async () => {
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
const messages = [createUserMessage("Get status")]
|
||||
const tools = [
|
||||
{
|
||||
name: "get_status",
|
||||
description: "Get status",
|
||||
parameters: [
|
||||
{
|
||||
name: "type",
|
||||
type: "string" as const,
|
||||
description: "Status type",
|
||||
required: true,
|
||||
enum: ["active", "inactive", "pending"],
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
await client.chat(messages, tools)
|
||||
|
||||
expect(mockOllamaInstance.chat).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tools: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
function: expect.objectContaining({
|
||||
parameters: expect.objectContaining({
|
||||
properties: expect.objectContaining({
|
||||
type: expect.objectContaining({
|
||||
enum: ["active", "inactive", "pending"],
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("error handling", () => {
|
||||
it("should handle ECONNREFUSED errors", async () => {
|
||||
mockOllamaInstance.chat.mockRejectedValue(new Error("ECONNREFUSED"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.chat([createUserMessage("Hello")])).rejects.toThrow(
|
||||
/Cannot connect to Ollama/,
|
||||
)
|
||||
})
|
||||
|
||||
it("should handle generic errors with context", async () => {
|
||||
mockOllamaInstance.pull.mockRejectedValue(new Error("Unknown error"))
|
||||
|
||||
const client = new OllamaClient(defaultConfig)
|
||||
|
||||
await expect(client.pullModel("test")).rejects.toThrow(/Failed to pull model/)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,251 @@
|
||||
import { describe, it, expect } from "vitest"
|
||||
import {
|
||||
parseToolCalls,
|
||||
formatToolCallsAsXml,
|
||||
extractThinking,
|
||||
hasToolCalls,
|
||||
validateToolCallParams,
|
||||
} from "../../../../src/infrastructure/llm/ResponseParser.js"
|
||||
import { createToolCall } from "../../../../src/domain/value-objects/ToolCall.js"
|
||||
|
||||
describe("ResponseParser", () => {
|
||||
describe("parseToolCalls", () => {
|
||||
it("should parse a single tool call", () => {
|
||||
const response = `<tool_call name="get_lines">
|
||||
<path>src/index.ts</path>
|
||||
<start>1</start>
|
||||
<end>10</end>
|
||||
</tool_call>`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls).toHaveLength(1)
|
||||
expect(result.toolCalls[0].name).toBe("get_lines")
|
||||
expect(result.toolCalls[0].params).toEqual({
|
||||
path: "src/index.ts",
|
||||
start: 1,
|
||||
end: 10,
|
||||
})
|
||||
expect(result.hasParseErrors).toBe(false)
|
||||
})
|
||||
|
||||
it("should parse multiple tool calls", () => {
|
||||
const response = `
|
||||
<tool_call name="get_lines">
|
||||
<path>src/a.ts</path>
|
||||
</tool_call>
|
||||
<tool_call name="get_function">
|
||||
<path>src/b.ts</path>
|
||||
<name>myFunc</name>
|
||||
</tool_call>
|
||||
`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls).toHaveLength(2)
|
||||
expect(result.toolCalls[0].name).toBe("get_lines")
|
||||
expect(result.toolCalls[1].name).toBe("get_function")
|
||||
})
|
||||
|
||||
it("should extract text content without tool calls", () => {
|
||||
const response = `Let me check the file.
|
||||
<tool_call name="get_lines">
|
||||
<path>src/index.ts</path>
|
||||
</tool_call>
|
||||
Here's what I found.`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.content).toContain("Let me check the file.")
|
||||
expect(result.content).toContain("Here's what I found.")
|
||||
expect(result.content).not.toContain("tool_call")
|
||||
})
|
||||
|
||||
it("should parse boolean values", () => {
|
||||
const response = `<tool_call name="git_diff">
|
||||
<staged>true</staged>
|
||||
</tool_call>`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls[0].params.staged).toBe(true)
|
||||
})
|
||||
|
||||
it("should parse null values", () => {
|
||||
const response = `<tool_call name="test">
|
||||
<value>null</value>
|
||||
</tool_call>`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls[0].params.value).toBe(null)
|
||||
})
|
||||
|
||||
it("should parse JSON arrays", () => {
|
||||
const response = `<tool_call name="git_commit">
|
||||
<files>["a.ts", "b.ts"]</files>
|
||||
</tool_call>`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls[0].params.files).toEqual(["a.ts", "b.ts"])
|
||||
})
|
||||
|
||||
it("should parse JSON objects", () => {
|
||||
const response = `<tool_call name="test">
|
||||
<config>{"key": "value"}</config>
|
||||
</tool_call>`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls[0].params.config).toEqual({ key: "value" })
|
||||
})
|
||||
|
||||
it("should return empty array for response without tool calls", () => {
|
||||
const response = "This is just a regular response."
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls).toHaveLength(0)
|
||||
expect(result.content).toBe(response)
|
||||
})
|
||||
|
||||
it("should handle named param syntax", () => {
|
||||
const response = `<tool_call name="get_lines">
|
||||
<param name="path">src/index.ts</param>
|
||||
<param name="start">5</param>
|
||||
</tool_call>`
|
||||
|
||||
const result = parseToolCalls(response)
|
||||
|
||||
expect(result.toolCalls[0].params).toEqual({
|
||||
path: "src/index.ts",
|
||||
start: 5,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("formatToolCallsAsXml", () => {
|
||||
it("should format tool calls as XML", () => {
|
||||
const toolCalls = [createToolCall("1", "get_lines", { path: "src/index.ts", start: 1 })]
|
||||
|
||||
const xml = formatToolCallsAsXml(toolCalls)
|
||||
|
||||
expect(xml).toContain('<tool_call name="get_lines">')
|
||||
expect(xml).toContain("<path>src/index.ts</path>")
|
||||
expect(xml).toContain("<start>1</start>")
|
||||
expect(xml).toContain("</tool_call>")
|
||||
})
|
||||
|
||||
it("should format multiple tool calls", () => {
|
||||
const toolCalls = [
|
||||
createToolCall("1", "get_lines", { path: "a.ts" }),
|
||||
createToolCall("2", "get_function", { path: "b.ts", name: "foo" }),
|
||||
]
|
||||
|
||||
const xml = formatToolCallsAsXml(toolCalls)
|
||||
|
||||
expect(xml).toContain('<tool_call name="get_lines">')
|
||||
expect(xml).toContain('<tool_call name="get_function">')
|
||||
})
|
||||
|
||||
it("should handle object values as JSON", () => {
|
||||
const toolCalls = [createToolCall("1", "test", { data: { key: "value" } })]
|
||||
|
||||
const xml = formatToolCallsAsXml(toolCalls)
|
||||
|
||||
expect(xml).toContain('{"key":"value"}')
|
||||
})
|
||||
})
|
||||
|
||||
describe("extractThinking", () => {
|
||||
it("should extract thinking content", () => {
|
||||
const response = `<thinking>Let me analyze this.</thinking>
|
||||
Here is the answer.`
|
||||
|
||||
const result = extractThinking(response)
|
||||
|
||||
expect(result.thinking).toBe("Let me analyze this.")
|
||||
expect(result.content).toContain("Here is the answer.")
|
||||
expect(result.content).not.toContain("thinking")
|
||||
})
|
||||
|
||||
it("should handle multiple thinking blocks", () => {
|
||||
const response = `<thinking>First thought.</thinking>
|
||||
Some content.
|
||||
<thinking>Second thought.</thinking>
|
||||
More content.`
|
||||
|
||||
const result = extractThinking(response)
|
||||
|
||||
expect(result.thinking).toContain("First thought.")
|
||||
expect(result.thinking).toContain("Second thought.")
|
||||
})
|
||||
|
||||
it("should return original content if no thinking", () => {
|
||||
const response = "Just a regular response."
|
||||
|
||||
const result = extractThinking(response)
|
||||
|
||||
expect(result.thinking).toBe("")
|
||||
expect(result.content).toBe(response)
|
||||
})
|
||||
})
|
||||
|
||||
describe("hasToolCalls", () => {
|
||||
it("should return true if response has tool calls", () => {
|
||||
const response = `<tool_call name="get_lines"><path>a.ts</path></tool_call>`
|
||||
|
||||
expect(hasToolCalls(response)).toBe(true)
|
||||
})
|
||||
|
||||
it("should return false if response has no tool calls", () => {
|
||||
const response = "Just text without tool calls."
|
||||
|
||||
expect(hasToolCalls(response)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateToolCallParams", () => {
|
||||
it("should return valid for complete params", () => {
|
||||
const params = { path: "src/index.ts", start: 1, end: 10 }
|
||||
const required = ["path", "start", "end"]
|
||||
|
||||
const result = validateToolCallParams("get_lines", params, required)
|
||||
|
||||
expect(result.valid).toBe(true)
|
||||
expect(result.errors).toHaveLength(0)
|
||||
})
|
||||
|
||||
it("should return errors for missing required params", () => {
|
||||
const params = { path: "src/index.ts" }
|
||||
const required = ["path", "start", "end"]
|
||||
|
||||
const result = validateToolCallParams("get_lines", params, required)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.errors).toHaveLength(2)
|
||||
expect(result.errors).toContain("Missing required parameter: start")
|
||||
expect(result.errors).toContain("Missing required parameter: end")
|
||||
})
|
||||
|
||||
it("should treat null and undefined as missing", () => {
|
||||
const params = { path: null, start: undefined }
|
||||
const required = ["path", "start"]
|
||||
|
||||
const result = validateToolCallParams("test", params, required)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.errors).toHaveLength(2)
|
||||
})
|
||||
|
||||
it("should accept empty required array", () => {
|
||||
const params = {}
|
||||
const required: string[] = []
|
||||
|
||||
const result = validateToolCallParams("git_status", params, required)
|
||||
|
||||
expect(result.valid).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
717
packages/ipuaro/tests/unit/infrastructure/llm/prompts.test.ts
Normal file
717
packages/ipuaro/tests/unit/infrastructure/llm/prompts.test.ts
Normal file
@@ -0,0 +1,717 @@
|
||||
import { describe, it, expect } from "vitest"
|
||||
import {
|
||||
SYSTEM_PROMPT,
|
||||
buildInitialContext,
|
||||
buildFileContext,
|
||||
truncateContext,
|
||||
type ProjectStructure,
|
||||
} from "../../../../src/infrastructure/llm/prompts.js"
|
||||
import type { FileAST } from "../../../../src/domain/value-objects/FileAST.js"
|
||||
import type { FileMeta } from "../../../../src/domain/value-objects/FileMeta.js"
|
||||
|
||||
describe("prompts", () => {
|
||||
describe("SYSTEM_PROMPT", () => {
|
||||
it("should be a non-empty string", () => {
|
||||
expect(typeof SYSTEM_PROMPT).toBe("string")
|
||||
expect(SYSTEM_PROMPT.length).toBeGreaterThan(100)
|
||||
})
|
||||
|
||||
it("should contain core principles", () => {
|
||||
expect(SYSTEM_PROMPT).toContain("Lazy Loading")
|
||||
expect(SYSTEM_PROMPT).toContain("Precision")
|
||||
expect(SYSTEM_PROMPT).toContain("Safety")
|
||||
})
|
||||
|
||||
it("should list available tools", () => {
|
||||
expect(SYSTEM_PROMPT).toContain("get_lines")
|
||||
expect(SYSTEM_PROMPT).toContain("edit_lines")
|
||||
expect(SYSTEM_PROMPT).toContain("find_references")
|
||||
expect(SYSTEM_PROMPT).toContain("git_status")
|
||||
expect(SYSTEM_PROMPT).toContain("run_command")
|
||||
})
|
||||
|
||||
it("should include safety rules", () => {
|
||||
expect(SYSTEM_PROMPT).toContain("Safety Rules")
|
||||
expect(SYSTEM_PROMPT).toContain("Never execute commands that could harm")
|
||||
})
|
||||
})
|
||||
|
||||
describe("buildInitialContext", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "my-project",
|
||||
rootPath: "/home/user/my-project",
|
||||
files: ["src/index.ts", "src/utils.ts", "package.json"],
|
||||
directories: ["src", "tests"],
|
||||
}
|
||||
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"src/index.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [
|
||||
{
|
||||
name: "main",
|
||||
lineStart: 1,
|
||||
lineEnd: 10,
|
||||
params: [],
|
||||
isAsync: false,
|
||||
isExported: true,
|
||||
},
|
||||
],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
[
|
||||
"src/utils.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [
|
||||
{
|
||||
name: "Helper",
|
||||
lineStart: 1,
|
||||
lineEnd: 20,
|
||||
methods: [],
|
||||
properties: [],
|
||||
implements: [],
|
||||
isExported: true,
|
||||
isAbstract: false,
|
||||
},
|
||||
],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
it("should include project header", () => {
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("# Project: my-project")
|
||||
expect(context).toContain("Root: /home/user/my-project")
|
||||
expect(context).toContain("Files: 3")
|
||||
expect(context).toContain("Directories: 2")
|
||||
})
|
||||
|
||||
it("should include directory structure", () => {
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("## Structure")
|
||||
expect(context).toContain("src/")
|
||||
expect(context).toContain("tests/")
|
||||
})
|
||||
|
||||
it("should include file overview with AST summaries", () => {
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("## Files")
|
||||
expect(context).toContain("src/index.ts")
|
||||
expect(context).toContain("fn: main")
|
||||
expect(context).toContain("src/utils.ts")
|
||||
expect(context).toContain("class: Helper")
|
||||
})
|
||||
|
||||
it("should include file flags from metadata", () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/index.ts",
|
||||
{
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 75 },
|
||||
dependencies: [],
|
||||
dependents: ["a.ts", "b.ts", "c.ts", "d.ts", "e.ts", "f.ts"],
|
||||
isHub: true,
|
||||
isEntryPoint: true,
|
||||
fileType: "source",
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts, metas)
|
||||
|
||||
expect(context).toContain("(hub, entry, complex)")
|
||||
})
|
||||
})
|
||||
|
||||
describe("buildFileContext", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [
|
||||
{ name: "fs", from: "node:fs", line: 1, type: "builtin", isDefault: false },
|
||||
{ name: "helper", from: "./helper", line: 2, type: "internal", isDefault: true },
|
||||
],
|
||||
exports: [
|
||||
{ name: "main", line: 10, isDefault: false, kind: "function" },
|
||||
{ name: "Config", line: 20, isDefault: true, kind: "class" },
|
||||
],
|
||||
functions: [
|
||||
{
|
||||
name: "main",
|
||||
lineStart: 10,
|
||||
lineEnd: 30,
|
||||
params: [
|
||||
{ name: "args", optional: false, hasDefault: false },
|
||||
{ name: "options", optional: true, hasDefault: false },
|
||||
],
|
||||
isAsync: true,
|
||||
isExported: true,
|
||||
},
|
||||
],
|
||||
classes: [
|
||||
{
|
||||
name: "Config",
|
||||
lineStart: 40,
|
||||
lineEnd: 80,
|
||||
methods: [
|
||||
{
|
||||
name: "load",
|
||||
lineStart: 50,
|
||||
lineEnd: 60,
|
||||
params: [],
|
||||
isAsync: false,
|
||||
visibility: "public",
|
||||
isStatic: false,
|
||||
},
|
||||
],
|
||||
properties: [],
|
||||
extends: "BaseConfig",
|
||||
implements: ["IConfig"],
|
||||
isExported: true,
|
||||
isAbstract: false,
|
||||
},
|
||||
],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
it("should include file path header", () => {
|
||||
const context = buildFileContext("src/index.ts", ast)
|
||||
|
||||
expect(context).toContain("## src/index.ts")
|
||||
})
|
||||
|
||||
it("should include imports section", () => {
|
||||
const context = buildFileContext("src/index.ts", ast)
|
||||
|
||||
expect(context).toContain("### Imports")
|
||||
expect(context).toContain('fs from "node:fs" (builtin)')
|
||||
expect(context).toContain('helper from "./helper" (internal)')
|
||||
})
|
||||
|
||||
it("should include exports section", () => {
|
||||
const context = buildFileContext("src/index.ts", ast)
|
||||
|
||||
expect(context).toContain("### Exports")
|
||||
expect(context).toContain("function main")
|
||||
expect(context).toContain("class Config (default)")
|
||||
})
|
||||
|
||||
it("should include functions section", () => {
|
||||
const context = buildFileContext("src/index.ts", ast)
|
||||
|
||||
expect(context).toContain("### Functions")
|
||||
expect(context).toContain("async main(args, options)")
|
||||
expect(context).toContain("[10-30]")
|
||||
})
|
||||
|
||||
it("should include classes section with methods", () => {
|
||||
const context = buildFileContext("src/index.ts", ast)
|
||||
|
||||
expect(context).toContain("### Classes")
|
||||
expect(context).toContain("Config extends BaseConfig implements IConfig")
|
||||
expect(context).toContain("[40-80]")
|
||||
expect(context).toContain("load()")
|
||||
})
|
||||
|
||||
it("should include metadata section when provided", () => {
|
||||
const meta: FileMeta = {
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 65 },
|
||||
dependencies: ["a.ts", "b.ts"],
|
||||
dependents: ["c.ts"],
|
||||
isHub: false,
|
||||
isEntryPoint: true,
|
||||
fileType: "source",
|
||||
}
|
||||
|
||||
const context = buildFileContext("src/index.ts", ast, meta)
|
||||
|
||||
expect(context).toContain("### Metadata")
|
||||
expect(context).toContain("LOC: 100")
|
||||
expect(context).toContain("Complexity: 65/100")
|
||||
expect(context).toContain("Dependencies: 2")
|
||||
expect(context).toContain("Dependents: 1")
|
||||
})
|
||||
})
|
||||
|
||||
describe("buildFileContext - edge cases", () => {
|
||||
it("should handle empty imports", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("empty.ts", ast)
|
||||
|
||||
expect(context).toContain("## empty.ts")
|
||||
expect(context).not.toContain("### Imports")
|
||||
})
|
||||
|
||||
it("should handle empty exports", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [{ name: "x", from: "./x", line: 1, type: "internal", isDefault: false }],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("no-exports.ts", ast)
|
||||
|
||||
expect(context).toContain("### Imports")
|
||||
expect(context).not.toContain("### Exports")
|
||||
})
|
||||
|
||||
it("should handle empty functions", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [
|
||||
{
|
||||
name: "MyClass",
|
||||
lineStart: 1,
|
||||
lineEnd: 10,
|
||||
methods: [],
|
||||
properties: [],
|
||||
implements: [],
|
||||
isExported: false,
|
||||
isAbstract: false,
|
||||
},
|
||||
],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("no-functions.ts", ast)
|
||||
|
||||
expect(context).not.toContain("### Functions")
|
||||
expect(context).toContain("### Classes")
|
||||
})
|
||||
|
||||
it("should handle empty classes", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [
|
||||
{
|
||||
name: "test",
|
||||
lineStart: 1,
|
||||
lineEnd: 5,
|
||||
params: [],
|
||||
isAsync: false,
|
||||
isExported: false,
|
||||
},
|
||||
],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("no-classes.ts", ast)
|
||||
|
||||
expect(context).toContain("### Functions")
|
||||
expect(context).not.toContain("### Classes")
|
||||
})
|
||||
|
||||
it("should handle class without extends", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [
|
||||
{
|
||||
name: "Standalone",
|
||||
lineStart: 1,
|
||||
lineEnd: 10,
|
||||
methods: [],
|
||||
properties: [],
|
||||
implements: ["IFoo"],
|
||||
isExported: false,
|
||||
isAbstract: false,
|
||||
},
|
||||
],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("standalone.ts", ast)
|
||||
|
||||
expect(context).toContain("Standalone implements IFoo")
|
||||
expect(context).not.toContain("extends")
|
||||
})
|
||||
|
||||
it("should handle class without implements", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [
|
||||
{
|
||||
name: "Child",
|
||||
lineStart: 1,
|
||||
lineEnd: 10,
|
||||
methods: [],
|
||||
properties: [],
|
||||
extends: "Parent",
|
||||
implements: [],
|
||||
isExported: false,
|
||||
isAbstract: false,
|
||||
},
|
||||
],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("child.ts", ast)
|
||||
|
||||
expect(context).toContain("Child extends Parent")
|
||||
expect(context).not.toContain("implements")
|
||||
})
|
||||
|
||||
it("should handle method with private visibility", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [
|
||||
{
|
||||
name: "WithPrivate",
|
||||
lineStart: 1,
|
||||
lineEnd: 20,
|
||||
methods: [
|
||||
{
|
||||
name: "secretMethod",
|
||||
lineStart: 5,
|
||||
lineEnd: 10,
|
||||
params: [],
|
||||
isAsync: false,
|
||||
visibility: "private",
|
||||
isStatic: false,
|
||||
},
|
||||
],
|
||||
properties: [],
|
||||
implements: [],
|
||||
isExported: false,
|
||||
isAbstract: false,
|
||||
},
|
||||
],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("private.ts", ast)
|
||||
|
||||
expect(context).toContain("private secretMethod()")
|
||||
})
|
||||
|
||||
it("should handle non-async function", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [
|
||||
{
|
||||
name: "syncFn",
|
||||
lineStart: 1,
|
||||
lineEnd: 5,
|
||||
params: [{ name: "x", optional: false, hasDefault: false }],
|
||||
isAsync: false,
|
||||
isExported: false,
|
||||
},
|
||||
],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("sync.ts", ast)
|
||||
|
||||
expect(context).toContain("syncFn(x)")
|
||||
expect(context).not.toContain("async syncFn")
|
||||
})
|
||||
|
||||
it("should handle export without default", () => {
|
||||
const ast: FileAST = {
|
||||
imports: [],
|
||||
exports: [{ name: "foo", line: 1, isDefault: false, kind: "variable" }],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
|
||||
const context = buildFileContext("named-export.ts", ast)
|
||||
|
||||
expect(context).toContain("variable foo")
|
||||
expect(context).not.toContain("(default)")
|
||||
})
|
||||
})
|
||||
|
||||
describe("buildInitialContext - edge cases", () => {
|
||||
it("should handle nested directory names", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: [],
|
||||
directories: ["src/components/ui"],
|
||||
}
|
||||
const asts = new Map<string, FileAST>()
|
||||
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("ui/")
|
||||
})
|
||||
|
||||
it("should handle file with only interfaces", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: ["types.ts"],
|
||||
directories: [],
|
||||
}
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"types.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [{ name: "IFoo", lineStart: 1, lineEnd: 5, isExported: true }],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("interface: IFoo")
|
||||
})
|
||||
|
||||
it("should handle file with only type aliases", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: ["types.ts"],
|
||||
directories: [],
|
||||
}
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"types.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [
|
||||
{ name: "MyType", lineStart: 1, lineEnd: 1, isExported: true },
|
||||
],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("type: MyType")
|
||||
})
|
||||
|
||||
it("should handle file with no AST content", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: ["empty.ts"],
|
||||
directories: [],
|
||||
}
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"empty.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("- empty.ts")
|
||||
})
|
||||
|
||||
it("should handle meta with only hub flag", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: ["hub.ts"],
|
||||
directories: [],
|
||||
}
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"hub.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"hub.ts",
|
||||
{
|
||||
complexity: { loc: 10, nesting: 1, cyclomaticComplexity: 1, score: 10 },
|
||||
dependencies: [],
|
||||
dependents: [],
|
||||
isHub: true,
|
||||
isEntryPoint: false,
|
||||
fileType: "source",
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts, metas)
|
||||
|
||||
expect(context).toContain("(hub)")
|
||||
expect(context).not.toContain("entry")
|
||||
expect(context).not.toContain("complex")
|
||||
})
|
||||
|
||||
it("should handle meta with no flags", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: ["normal.ts"],
|
||||
directories: [],
|
||||
}
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"normal.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"normal.ts",
|
||||
{
|
||||
complexity: { loc: 10, nesting: 1, cyclomaticComplexity: 1, score: 10 },
|
||||
dependencies: [],
|
||||
dependents: [],
|
||||
isHub: false,
|
||||
isEntryPoint: false,
|
||||
fileType: "source",
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts, metas)
|
||||
|
||||
expect(context).toContain("- normal.ts")
|
||||
expect(context).not.toContain("(hub")
|
||||
expect(context).not.toContain("entry")
|
||||
expect(context).not.toContain("complex")
|
||||
})
|
||||
|
||||
it("should skip files not in AST map", () => {
|
||||
const structure: ProjectStructure = {
|
||||
name: "test",
|
||||
rootPath: "/test",
|
||||
files: ["exists.ts", "missing.ts"],
|
||||
directories: [],
|
||||
}
|
||||
const asts = new Map<string, FileAST>([
|
||||
[
|
||||
"exists.ts",
|
||||
{
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
},
|
||||
],
|
||||
])
|
||||
|
||||
const context = buildInitialContext(structure, asts)
|
||||
|
||||
expect(context).toContain("exists.ts")
|
||||
expect(context).not.toContain("missing.ts")
|
||||
})
|
||||
})
|
||||
|
||||
describe("truncateContext", () => {
|
||||
it("should return original context if within limit", () => {
|
||||
const context = "Short context"
|
||||
|
||||
const result = truncateContext(context, 1000)
|
||||
|
||||
expect(result).toBe(context)
|
||||
})
|
||||
|
||||
it("should truncate long context", () => {
|
||||
const context = "a".repeat(1000)
|
||||
|
||||
const result = truncateContext(context, 100)
|
||||
|
||||
expect(result.length).toBeLessThan(500)
|
||||
expect(result).toContain("truncated")
|
||||
})
|
||||
|
||||
it("should break at newline boundary", () => {
|
||||
const context = "Line 1\nLine 2\nLine 3\n" + "a".repeat(1000)
|
||||
|
||||
const result = truncateContext(context, 50)
|
||||
|
||||
expect(result).toContain("truncated")
|
||||
})
|
||||
})
|
||||
})
|
||||
287
packages/ipuaro/tests/unit/infrastructure/llm/toolDefs.test.ts
Normal file
287
packages/ipuaro/tests/unit/infrastructure/llm/toolDefs.test.ts
Normal file
@@ -0,0 +1,287 @@
|
||||
import { describe, it, expect } from "vitest"
|
||||
import {
|
||||
ALL_TOOLS,
|
||||
READ_TOOLS,
|
||||
EDIT_TOOLS,
|
||||
SEARCH_TOOLS,
|
||||
ANALYSIS_TOOLS,
|
||||
GIT_TOOLS,
|
||||
RUN_TOOLS,
|
||||
CONFIRMATION_TOOLS,
|
||||
requiresConfirmation,
|
||||
getToolDef,
|
||||
getToolsByCategory,
|
||||
GET_LINES_TOOL,
|
||||
GET_FUNCTION_TOOL,
|
||||
GET_CLASS_TOOL,
|
||||
GET_STRUCTURE_TOOL,
|
||||
EDIT_LINES_TOOL,
|
||||
CREATE_FILE_TOOL,
|
||||
DELETE_FILE_TOOL,
|
||||
FIND_REFERENCES_TOOL,
|
||||
FIND_DEFINITION_TOOL,
|
||||
GET_DEPENDENCIES_TOOL,
|
||||
GET_DEPENDENTS_TOOL,
|
||||
GET_COMPLEXITY_TOOL,
|
||||
GET_TODOS_TOOL,
|
||||
GIT_STATUS_TOOL,
|
||||
GIT_DIFF_TOOL,
|
||||
GIT_COMMIT_TOOL,
|
||||
RUN_COMMAND_TOOL,
|
||||
RUN_TESTS_TOOL,
|
||||
} from "../../../../src/infrastructure/llm/toolDefs.js"
|
||||
|
||||
describe("toolDefs", () => {
|
||||
describe("ALL_TOOLS", () => {
|
||||
it("should contain exactly 18 tools", () => {
|
||||
expect(ALL_TOOLS).toHaveLength(18)
|
||||
})
|
||||
|
||||
it("should have unique tool names", () => {
|
||||
const names = ALL_TOOLS.map((t) => t.name)
|
||||
const uniqueNames = new Set(names)
|
||||
expect(uniqueNames.size).toBe(18)
|
||||
})
|
||||
|
||||
it("should have valid structure for all tools", () => {
|
||||
for (const tool of ALL_TOOLS) {
|
||||
expect(tool.name).toBeDefined()
|
||||
expect(typeof tool.name).toBe("string")
|
||||
expect(tool.description).toBeDefined()
|
||||
expect(typeof tool.description).toBe("string")
|
||||
expect(Array.isArray(tool.parameters)).toBe(true)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("READ_TOOLS", () => {
|
||||
it("should contain 4 read tools", () => {
|
||||
expect(READ_TOOLS).toHaveLength(4)
|
||||
})
|
||||
|
||||
it("should include all read tools", () => {
|
||||
expect(READ_TOOLS).toContain(GET_LINES_TOOL)
|
||||
expect(READ_TOOLS).toContain(GET_FUNCTION_TOOL)
|
||||
expect(READ_TOOLS).toContain(GET_CLASS_TOOL)
|
||||
expect(READ_TOOLS).toContain(GET_STRUCTURE_TOOL)
|
||||
})
|
||||
})
|
||||
|
||||
describe("EDIT_TOOLS", () => {
|
||||
it("should contain 3 edit tools", () => {
|
||||
expect(EDIT_TOOLS).toHaveLength(3)
|
||||
})
|
||||
|
||||
it("should include all edit tools", () => {
|
||||
expect(EDIT_TOOLS).toContain(EDIT_LINES_TOOL)
|
||||
expect(EDIT_TOOLS).toContain(CREATE_FILE_TOOL)
|
||||
expect(EDIT_TOOLS).toContain(DELETE_FILE_TOOL)
|
||||
})
|
||||
})
|
||||
|
||||
describe("SEARCH_TOOLS", () => {
|
||||
it("should contain 2 search tools", () => {
|
||||
expect(SEARCH_TOOLS).toHaveLength(2)
|
||||
})
|
||||
|
||||
it("should include all search tools", () => {
|
||||
expect(SEARCH_TOOLS).toContain(FIND_REFERENCES_TOOL)
|
||||
expect(SEARCH_TOOLS).toContain(FIND_DEFINITION_TOOL)
|
||||
})
|
||||
})
|
||||
|
||||
describe("ANALYSIS_TOOLS", () => {
|
||||
it("should contain 4 analysis tools", () => {
|
||||
expect(ANALYSIS_TOOLS).toHaveLength(4)
|
||||
})
|
||||
|
||||
it("should include all analysis tools", () => {
|
||||
expect(ANALYSIS_TOOLS).toContain(GET_DEPENDENCIES_TOOL)
|
||||
expect(ANALYSIS_TOOLS).toContain(GET_DEPENDENTS_TOOL)
|
||||
expect(ANALYSIS_TOOLS).toContain(GET_COMPLEXITY_TOOL)
|
||||
expect(ANALYSIS_TOOLS).toContain(GET_TODOS_TOOL)
|
||||
})
|
||||
})
|
||||
|
||||
describe("GIT_TOOLS", () => {
|
||||
it("should contain 3 git tools", () => {
|
||||
expect(GIT_TOOLS).toHaveLength(3)
|
||||
})
|
||||
|
||||
it("should include all git tools", () => {
|
||||
expect(GIT_TOOLS).toContain(GIT_STATUS_TOOL)
|
||||
expect(GIT_TOOLS).toContain(GIT_DIFF_TOOL)
|
||||
expect(GIT_TOOLS).toContain(GIT_COMMIT_TOOL)
|
||||
})
|
||||
})
|
||||
|
||||
describe("RUN_TOOLS", () => {
|
||||
it("should contain 2 run tools", () => {
|
||||
expect(RUN_TOOLS).toHaveLength(2)
|
||||
})
|
||||
|
||||
it("should include all run tools", () => {
|
||||
expect(RUN_TOOLS).toContain(RUN_COMMAND_TOOL)
|
||||
expect(RUN_TOOLS).toContain(RUN_TESTS_TOOL)
|
||||
})
|
||||
})
|
||||
|
||||
describe("individual tool definitions", () => {
|
||||
describe("GET_LINES_TOOL", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(GET_LINES_TOOL.name).toBe("get_lines")
|
||||
})
|
||||
|
||||
it("should have required path parameter", () => {
|
||||
const pathParam = GET_LINES_TOOL.parameters.find((p) => p.name === "path")
|
||||
expect(pathParam).toBeDefined()
|
||||
expect(pathParam?.required).toBe(true)
|
||||
})
|
||||
|
||||
it("should have optional start and end parameters", () => {
|
||||
const startParam = GET_LINES_TOOL.parameters.find((p) => p.name === "start")
|
||||
const endParam = GET_LINES_TOOL.parameters.find((p) => p.name === "end")
|
||||
expect(startParam?.required).toBe(false)
|
||||
expect(endParam?.required).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("EDIT_LINES_TOOL", () => {
|
||||
it("should have all required parameters", () => {
|
||||
const requiredParams = EDIT_LINES_TOOL.parameters.filter((p) => p.required)
|
||||
const names = requiredParams.map((p) => p.name)
|
||||
expect(names).toContain("path")
|
||||
expect(names).toContain("start")
|
||||
expect(names).toContain("end")
|
||||
expect(names).toContain("content")
|
||||
})
|
||||
})
|
||||
|
||||
describe("GIT_STATUS_TOOL", () => {
|
||||
it("should have no required parameters", () => {
|
||||
expect(GIT_STATUS_TOOL.parameters).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("GET_TODOS_TOOL", () => {
|
||||
it("should have enum for type parameter", () => {
|
||||
const typeParam = GET_TODOS_TOOL.parameters.find((p) => p.name === "type")
|
||||
expect(typeParam?.enum).toEqual(["TODO", "FIXME", "HACK", "XXX"])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("CONFIRMATION_TOOLS", () => {
|
||||
it("should be a Set", () => {
|
||||
expect(CONFIRMATION_TOOLS instanceof Set).toBe(true)
|
||||
})
|
||||
|
||||
it("should contain edit and git_commit tools", () => {
|
||||
expect(CONFIRMATION_TOOLS.has("edit_lines")).toBe(true)
|
||||
expect(CONFIRMATION_TOOLS.has("create_file")).toBe(true)
|
||||
expect(CONFIRMATION_TOOLS.has("delete_file")).toBe(true)
|
||||
expect(CONFIRMATION_TOOLS.has("git_commit")).toBe(true)
|
||||
})
|
||||
|
||||
it("should not contain read tools", () => {
|
||||
expect(CONFIRMATION_TOOLS.has("get_lines")).toBe(false)
|
||||
expect(CONFIRMATION_TOOLS.has("get_function")).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("requiresConfirmation", () => {
|
||||
it("should return true for edit tools", () => {
|
||||
expect(requiresConfirmation("edit_lines")).toBe(true)
|
||||
expect(requiresConfirmation("create_file")).toBe(true)
|
||||
expect(requiresConfirmation("delete_file")).toBe(true)
|
||||
})
|
||||
|
||||
it("should return true for git_commit", () => {
|
||||
expect(requiresConfirmation("git_commit")).toBe(true)
|
||||
})
|
||||
|
||||
it("should return false for read tools", () => {
|
||||
expect(requiresConfirmation("get_lines")).toBe(false)
|
||||
expect(requiresConfirmation("get_function")).toBe(false)
|
||||
expect(requiresConfirmation("get_structure")).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for analysis tools", () => {
|
||||
expect(requiresConfirmation("get_dependencies")).toBe(false)
|
||||
expect(requiresConfirmation("get_complexity")).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for unknown tools", () => {
|
||||
expect(requiresConfirmation("unknown_tool")).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("getToolDef", () => {
|
||||
it("should return tool definition by name", () => {
|
||||
const tool = getToolDef("get_lines")
|
||||
expect(tool).toBe(GET_LINES_TOOL)
|
||||
})
|
||||
|
||||
it("should return undefined for unknown tool", () => {
|
||||
const tool = getToolDef("unknown_tool")
|
||||
expect(tool).toBeUndefined()
|
||||
})
|
||||
|
||||
it("should find all 18 tools", () => {
|
||||
const names = [
|
||||
"get_lines",
|
||||
"get_function",
|
||||
"get_class",
|
||||
"get_structure",
|
||||
"edit_lines",
|
||||
"create_file",
|
||||
"delete_file",
|
||||
"find_references",
|
||||
"find_definition",
|
||||
"get_dependencies",
|
||||
"get_dependents",
|
||||
"get_complexity",
|
||||
"get_todos",
|
||||
"git_status",
|
||||
"git_diff",
|
||||
"git_commit",
|
||||
"run_command",
|
||||
"run_tests",
|
||||
]
|
||||
|
||||
for (const name of names) {
|
||||
expect(getToolDef(name)).toBeDefined()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe("getToolsByCategory", () => {
|
||||
it("should return read tools", () => {
|
||||
expect(getToolsByCategory("read")).toBe(READ_TOOLS)
|
||||
})
|
||||
|
||||
it("should return edit tools", () => {
|
||||
expect(getToolsByCategory("edit")).toBe(EDIT_TOOLS)
|
||||
})
|
||||
|
||||
it("should return search tools", () => {
|
||||
expect(getToolsByCategory("search")).toBe(SEARCH_TOOLS)
|
||||
})
|
||||
|
||||
it("should return analysis tools", () => {
|
||||
expect(getToolsByCategory("analysis")).toBe(ANALYSIS_TOOLS)
|
||||
})
|
||||
|
||||
it("should return git tools", () => {
|
||||
expect(getToolsByCategory("git")).toBe(GIT_TOOLS)
|
||||
})
|
||||
|
||||
it("should return run tools", () => {
|
||||
expect(getToolsByCategory("run")).toBe(RUN_TOOLS)
|
||||
})
|
||||
|
||||
it("should return empty array for unknown category", () => {
|
||||
expect(getToolsByCategory("unknown")).toEqual([])
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,320 @@
|
||||
import { describe, it, expect, beforeEach, afterEach } from "vitest"
|
||||
import * as path from "node:path"
|
||||
import * as fs from "node:fs/promises"
|
||||
import * as os from "node:os"
|
||||
import {
|
||||
PathValidator,
|
||||
createPathValidator,
|
||||
validatePath,
|
||||
} from "../../../../src/infrastructure/security/PathValidator.js"
|
||||
|
||||
describe("PathValidator", () => {
|
||||
let validator: PathValidator
|
||||
let tempDir: string
|
||||
let projectRoot: string
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "pathvalidator-test-"))
|
||||
projectRoot = path.join(tempDir, "project")
|
||||
await fs.mkdir(projectRoot)
|
||||
validator = new PathValidator(projectRoot)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should resolve project root to absolute path", () => {
|
||||
const relativeValidator = new PathValidator("./project")
|
||||
expect(relativeValidator.getProjectRoot()).toBe(path.resolve("./project"))
|
||||
})
|
||||
|
||||
it("should store project root", () => {
|
||||
expect(validator.getProjectRoot()).toBe(projectRoot)
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateSync", () => {
|
||||
it("should validate relative path within project", () => {
|
||||
const result = validator.validateSync("src/file.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
expect(result.absolutePath).toBe(path.join(projectRoot, "src/file.ts"))
|
||||
expect(result.relativePath).toBe(path.join("src", "file.ts"))
|
||||
})
|
||||
|
||||
it("should validate nested relative paths", () => {
|
||||
const result = validator.validateSync("src/components/Button.tsx")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should validate root level files", () => {
|
||||
const result = validator.validateSync("package.json")
|
||||
expect(result.status).toBe("valid")
|
||||
expect(result.relativePath).toBe("package.json")
|
||||
})
|
||||
|
||||
it("should reject empty path", () => {
|
||||
const result = validator.validateSync("")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path is empty")
|
||||
})
|
||||
|
||||
it("should reject whitespace-only path", () => {
|
||||
const result = validator.validateSync(" ")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path is empty")
|
||||
})
|
||||
|
||||
it("should reject path with .. traversal", () => {
|
||||
const result = validator.validateSync("../outside")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should reject path with embedded .. traversal", () => {
|
||||
const result = validator.validateSync("src/../../../etc/passwd")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should reject path starting with tilde", () => {
|
||||
const result = validator.validateSync("~/secret/file")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should reject absolute path outside project", () => {
|
||||
const result = validator.validateSync("/etc/passwd")
|
||||
expect(result.status).toBe("outside_project")
|
||||
expect(result.reason).toBe("Path is outside project root")
|
||||
})
|
||||
|
||||
it("should accept absolute path inside project", () => {
|
||||
const absoluteInside = path.join(projectRoot, "src/file.ts")
|
||||
const result = validator.validateSync(absoluteInside)
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should trim whitespace from path", () => {
|
||||
const result = validator.validateSync(" src/file.ts ")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle Windows-style backslashes", () => {
|
||||
const result = validator.validateSync("src\\components\\file.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should reject path that resolves outside via symlink-like patterns", () => {
|
||||
const result = validator.validateSync("src/./../../etc")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path contains traversal patterns")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validate (async)", () => {
|
||||
beforeEach(async () => {
|
||||
await fs.mkdir(path.join(projectRoot, "src"), { recursive: true })
|
||||
await fs.writeFile(path.join(projectRoot, "src/file.ts"), "// content")
|
||||
await fs.mkdir(path.join(projectRoot, "dist"), { recursive: true })
|
||||
})
|
||||
|
||||
it("should validate existing file", async () => {
|
||||
const result = await validator.validate("src/file.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should reject non-existent file by default", async () => {
|
||||
const result = await validator.validate("src/nonexistent.ts")
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path does not exist")
|
||||
})
|
||||
|
||||
it("should allow non-existent file with allowNonExistent option", async () => {
|
||||
const result = await validator.validate("src/newfile.ts", { allowNonExistent: true })
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should validate directory when requireDirectory is true", async () => {
|
||||
const result = await validator.validate("src", { requireDirectory: true })
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should reject file when requireDirectory is true", async () => {
|
||||
const result = await validator.validate("src/file.ts", { requireDirectory: true })
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path is not a directory")
|
||||
})
|
||||
|
||||
it("should validate file when requireFile is true", async () => {
|
||||
const result = await validator.validate("src/file.ts", { requireFile: true })
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should reject directory when requireFile is true", async () => {
|
||||
const result = await validator.validate("src", { requireFile: true })
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path is not a file")
|
||||
})
|
||||
|
||||
it("should handle permission errors gracefully", async () => {
|
||||
const result = await validator.validate("src/../../../root/secret")
|
||||
expect(result.status).toBe("invalid")
|
||||
})
|
||||
|
||||
it("should still check traversal before existence", async () => {
|
||||
const result = await validator.validate("../outside", { allowNonExistent: true })
|
||||
expect(result.status).toBe("invalid")
|
||||
expect(result.reason).toBe("Path contains traversal patterns")
|
||||
})
|
||||
})
|
||||
|
||||
describe("isWithin", () => {
|
||||
it("should return true for path within project", () => {
|
||||
expect(validator.isWithin("src/file.ts")).toBe(true)
|
||||
})
|
||||
|
||||
it("should return true for project root itself", () => {
|
||||
expect(validator.isWithin(".")).toBe(true)
|
||||
expect(validator.isWithin("")).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for path outside project", () => {
|
||||
expect(validator.isWithin("/etc/passwd")).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for traversal path", () => {
|
||||
expect(validator.isWithin("../outside")).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for empty path", () => {
|
||||
expect(validator.isWithin("")).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for tilde path", () => {
|
||||
expect(validator.isWithin("~/file")).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe("resolve", () => {
|
||||
it("should resolve valid relative path to absolute", () => {
|
||||
const result = validator.resolve("src/file.ts")
|
||||
expect(result).toBe(path.join(projectRoot, "src/file.ts"))
|
||||
})
|
||||
|
||||
it("should return null for invalid path", () => {
|
||||
expect(validator.resolve("../outside")).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for empty path", () => {
|
||||
expect(validator.resolve("")).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for path outside project", () => {
|
||||
expect(validator.resolve("/etc/passwd")).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("relativize", () => {
|
||||
it("should return relative path for valid input", () => {
|
||||
const result = validator.relativize("src/file.ts")
|
||||
expect(result).toBe(path.join("src", "file.ts"))
|
||||
})
|
||||
|
||||
it("should handle absolute path within project", () => {
|
||||
const absolutePath = path.join(projectRoot, "src/file.ts")
|
||||
const result = validator.relativize(absolutePath)
|
||||
expect(result).toBe(path.join("src", "file.ts"))
|
||||
})
|
||||
|
||||
it("should return null for path outside project", () => {
|
||||
expect(validator.relativize("/etc/passwd")).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for traversal path", () => {
|
||||
expect(validator.relativize("../outside")).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle path with multiple slashes", () => {
|
||||
const result = validator.validateSync("src///file.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle path with dots in filename", () => {
|
||||
const result = validator.validateSync("src/file.test.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle hidden files", () => {
|
||||
const result = validator.validateSync(".gitignore")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle hidden directories", () => {
|
||||
const result = validator.validateSync(".github/workflows/ci.yml")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle single dot current directory", () => {
|
||||
const result = validator.validateSync("./src/file.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle project root as path", () => {
|
||||
const result = validator.validateSync(projectRoot)
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle unicode characters in path", () => {
|
||||
const result = validator.validateSync("src/файл.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
|
||||
it("should handle spaces in path", () => {
|
||||
const result = validator.validateSync("src/my file.ts")
|
||||
expect(result.status).toBe("valid")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("createPathValidator", () => {
|
||||
it("should create PathValidator instance", () => {
|
||||
const validator = createPathValidator("/tmp/project")
|
||||
expect(validator).toBeInstanceOf(PathValidator)
|
||||
expect(validator.getProjectRoot()).toBe("/tmp/project")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validatePath", () => {
|
||||
let tempDir: string
|
||||
let projectRoot: string
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "validatepath-test-"))
|
||||
projectRoot = path.join(tempDir, "project")
|
||||
await fs.mkdir(projectRoot)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
it("should return true for valid path", () => {
|
||||
expect(validatePath("src/file.ts", projectRoot)).toBe(true)
|
||||
})
|
||||
|
||||
it("should return false for traversal path", () => {
|
||||
expect(validatePath("../outside", projectRoot)).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for path outside project", () => {
|
||||
expect(validatePath("/etc/passwd", projectRoot)).toBe(false)
|
||||
})
|
||||
|
||||
it("should return false for empty path", () => {
|
||||
expect(validatePath("", projectRoot)).toBe(false)
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,390 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import { RedisSessionStorage } from "../../../../src/infrastructure/storage/RedisSessionStorage.js"
|
||||
import { RedisClient } from "../../../../src/infrastructure/storage/RedisClient.js"
|
||||
import { Session } from "../../../../src/domain/entities/Session.js"
|
||||
import type { UndoEntry } from "../../../../src/domain/value-objects/UndoEntry.js"
|
||||
import { SessionKeys, SessionFields } from "../../../../src/infrastructure/storage/schema.js"
|
||||
|
||||
describe("RedisSessionStorage", () => {
|
||||
let storage: RedisSessionStorage
|
||||
let mockRedis: {
|
||||
hset: ReturnType<typeof vi.fn>
|
||||
hget: ReturnType<typeof vi.fn>
|
||||
hgetall: ReturnType<typeof vi.fn>
|
||||
del: ReturnType<typeof vi.fn>
|
||||
lrange: ReturnType<typeof vi.fn>
|
||||
lpush: ReturnType<typeof vi.fn>
|
||||
lpos: ReturnType<typeof vi.fn>
|
||||
lrem: ReturnType<typeof vi.fn>
|
||||
rpush: ReturnType<typeof vi.fn>
|
||||
rpop: ReturnType<typeof vi.fn>
|
||||
llen: ReturnType<typeof vi.fn>
|
||||
lpop: ReturnType<typeof vi.fn>
|
||||
exists: ReturnType<typeof vi.fn>
|
||||
pipeline: ReturnType<typeof vi.fn>
|
||||
}
|
||||
let mockClient: RedisClient
|
||||
|
||||
beforeEach(() => {
|
||||
mockRedis = {
|
||||
hset: vi.fn().mockResolvedValue(1),
|
||||
hget: vi.fn().mockResolvedValue(null),
|
||||
hgetall: vi.fn().mockResolvedValue({}),
|
||||
del: vi.fn().mockResolvedValue(1),
|
||||
lrange: vi.fn().mockResolvedValue([]),
|
||||
lpush: vi.fn().mockResolvedValue(1),
|
||||
lpos: vi.fn().mockResolvedValue(null),
|
||||
lrem: vi.fn().mockResolvedValue(1),
|
||||
rpush: vi.fn().mockResolvedValue(1),
|
||||
rpop: vi.fn().mockResolvedValue(null),
|
||||
llen: vi.fn().mockResolvedValue(0),
|
||||
lpop: vi.fn().mockResolvedValue(null),
|
||||
exists: vi.fn().mockResolvedValue(0),
|
||||
pipeline: vi.fn().mockReturnValue({
|
||||
hset: vi.fn().mockReturnThis(),
|
||||
del: vi.fn().mockReturnThis(),
|
||||
exec: vi.fn().mockResolvedValue([]),
|
||||
}),
|
||||
}
|
||||
|
||||
mockClient = {
|
||||
getClient: () => mockRedis,
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
} as unknown as RedisClient
|
||||
|
||||
storage = new RedisSessionStorage(mockClient)
|
||||
})
|
||||
|
||||
describe("saveSession", () => {
|
||||
it("should save session data to Redis", async () => {
|
||||
const session = new Session("test-session-1", "test-project")
|
||||
session.history = [{ role: "user", content: "Hello", timestamp: Date.now() }]
|
||||
|
||||
await storage.saveSession(session)
|
||||
|
||||
const pipeline = mockRedis.pipeline()
|
||||
expect(pipeline.hset).toHaveBeenCalled()
|
||||
expect(pipeline.exec).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should add session to list if not exists", async () => {
|
||||
const session = new Session("test-session-2", "test-project")
|
||||
|
||||
await storage.saveSession(session)
|
||||
|
||||
expect(mockRedis.lpos).toHaveBeenCalledWith(SessionKeys.list, "test-session-2")
|
||||
expect(mockRedis.lpush).toHaveBeenCalledWith(SessionKeys.list, "test-session-2")
|
||||
})
|
||||
|
||||
it("should not add session to list if already exists", async () => {
|
||||
const session = new Session("existing-session", "test-project")
|
||||
mockRedis.lpos.mockResolvedValue(0)
|
||||
|
||||
await storage.saveSession(session)
|
||||
|
||||
expect(mockRedis.lpush).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe("loadSession", () => {
|
||||
it("should return null for non-existent session", async () => {
|
||||
mockRedis.hgetall.mockResolvedValue({})
|
||||
|
||||
const result = await storage.loadSession("non-existent")
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it("should load session from Redis", async () => {
|
||||
const sessionData = {
|
||||
[SessionFields.projectName]: "test-project",
|
||||
[SessionFields.createdAt]: "1700000000000",
|
||||
[SessionFields.lastActivityAt]: "1700001000000",
|
||||
[SessionFields.history]: "[]",
|
||||
[SessionFields.context]: JSON.stringify({
|
||||
filesInContext: [],
|
||||
tokenUsage: 0,
|
||||
needsCompression: false,
|
||||
}),
|
||||
[SessionFields.stats]: JSON.stringify({
|
||||
totalTokens: 0,
|
||||
totalTimeMs: 0,
|
||||
toolCalls: 0,
|
||||
editsApplied: 0,
|
||||
editsRejected: 0,
|
||||
}),
|
||||
[SessionFields.inputHistory]: "[]",
|
||||
}
|
||||
mockRedis.hgetall.mockResolvedValue(sessionData)
|
||||
mockRedis.lrange.mockResolvedValue([])
|
||||
|
||||
const result = await storage.loadSession("test-session")
|
||||
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.id).toBe("test-session")
|
||||
expect(result?.projectName).toBe("test-project")
|
||||
expect(result?.createdAt).toBe(1700000000000)
|
||||
})
|
||||
|
||||
it("should load undo stack with session", async () => {
|
||||
const sessionData = {
|
||||
[SessionFields.projectName]: "test-project",
|
||||
[SessionFields.createdAt]: "1700000000000",
|
||||
[SessionFields.lastActivityAt]: "1700001000000",
|
||||
[SessionFields.history]: "[]",
|
||||
[SessionFields.context]: "{}",
|
||||
[SessionFields.stats]: "{}",
|
||||
[SessionFields.inputHistory]: "[]",
|
||||
}
|
||||
const undoEntry: UndoEntry = {
|
||||
id: "undo-1",
|
||||
timestamp: Date.now(),
|
||||
filePath: "test.ts",
|
||||
previousContent: ["old"],
|
||||
newContent: ["new"],
|
||||
description: "Edit",
|
||||
}
|
||||
mockRedis.hgetall.mockResolvedValue(sessionData)
|
||||
mockRedis.lrange.mockResolvedValue([JSON.stringify(undoEntry)])
|
||||
|
||||
const result = await storage.loadSession("test-session")
|
||||
|
||||
expect(result?.undoStack).toHaveLength(1)
|
||||
expect(result?.undoStack[0].id).toBe("undo-1")
|
||||
})
|
||||
})
|
||||
|
||||
describe("deleteSession", () => {
|
||||
it("should delete session data and undo stack", async () => {
|
||||
await storage.deleteSession("test-session")
|
||||
|
||||
expect(mockRedis.del).toHaveBeenCalledWith(SessionKeys.data("test-session"))
|
||||
expect(mockRedis.del).toHaveBeenCalledWith(SessionKeys.undo("test-session"))
|
||||
expect(mockRedis.lrem).toHaveBeenCalledWith(SessionKeys.list, 0, "test-session")
|
||||
})
|
||||
})
|
||||
|
||||
describe("listSessions", () => {
|
||||
it("should return empty array when no sessions", async () => {
|
||||
mockRedis.lrange.mockResolvedValue([])
|
||||
|
||||
const result = await storage.listSessions()
|
||||
|
||||
expect(result).toEqual([])
|
||||
})
|
||||
|
||||
it("should list all sessions", async () => {
|
||||
mockRedis.lrange.mockResolvedValue(["session-1", "session-2"])
|
||||
mockRedis.hgetall.mockImplementation((key: string) => {
|
||||
if (key.includes("session-1")) {
|
||||
return Promise.resolve({
|
||||
[SessionFields.projectName]: "project-1",
|
||||
[SessionFields.createdAt]: "1700000000000",
|
||||
[SessionFields.lastActivityAt]: "1700001000000",
|
||||
[SessionFields.history]: "[]",
|
||||
})
|
||||
}
|
||||
if (key.includes("session-2")) {
|
||||
return Promise.resolve({
|
||||
[SessionFields.projectName]: "project-2",
|
||||
[SessionFields.createdAt]: "1700002000000",
|
||||
[SessionFields.lastActivityAt]: "1700003000000",
|
||||
[SessionFields.history]: '[{"role":"user","content":"Hi"}]',
|
||||
})
|
||||
}
|
||||
return Promise.resolve({})
|
||||
})
|
||||
|
||||
const result = await storage.listSessions()
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].id).toBe("session-2")
|
||||
expect(result[1].id).toBe("session-1")
|
||||
})
|
||||
|
||||
it("should filter by project name", async () => {
|
||||
mockRedis.lrange.mockResolvedValue(["session-1", "session-2"])
|
||||
mockRedis.hgetall.mockImplementation((key: string) => {
|
||||
if (key.includes("session-1")) {
|
||||
return Promise.resolve({
|
||||
[SessionFields.projectName]: "project-1",
|
||||
[SessionFields.createdAt]: "1700000000000",
|
||||
[SessionFields.lastActivityAt]: "1700001000000",
|
||||
[SessionFields.history]: "[]",
|
||||
})
|
||||
}
|
||||
if (key.includes("session-2")) {
|
||||
return Promise.resolve({
|
||||
[SessionFields.projectName]: "project-2",
|
||||
[SessionFields.createdAt]: "1700002000000",
|
||||
[SessionFields.lastActivityAt]: "1700003000000",
|
||||
[SessionFields.history]: "[]",
|
||||
})
|
||||
}
|
||||
return Promise.resolve({})
|
||||
})
|
||||
|
||||
const result = await storage.listSessions("project-1")
|
||||
|
||||
expect(result).toHaveLength(1)
|
||||
expect(result[0].projectName).toBe("project-1")
|
||||
})
|
||||
})
|
||||
|
||||
describe("getLatestSession", () => {
|
||||
it("should return null when no sessions", async () => {
|
||||
mockRedis.lrange.mockResolvedValue([])
|
||||
|
||||
const result = await storage.getLatestSession("test-project")
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it("should return the most recent session", async () => {
|
||||
mockRedis.lrange.mockImplementation((key: string) => {
|
||||
if (key === SessionKeys.list) {
|
||||
return Promise.resolve(["session-1"])
|
||||
}
|
||||
return Promise.resolve([])
|
||||
})
|
||||
mockRedis.hgetall.mockResolvedValue({
|
||||
[SessionFields.projectName]: "test-project",
|
||||
[SessionFields.createdAt]: "1700000000000",
|
||||
[SessionFields.lastActivityAt]: "1700001000000",
|
||||
[SessionFields.history]: "[]",
|
||||
[SessionFields.context]: "{}",
|
||||
[SessionFields.stats]: "{}",
|
||||
[SessionFields.inputHistory]: "[]",
|
||||
})
|
||||
|
||||
const result = await storage.getLatestSession("test-project")
|
||||
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.id).toBe("session-1")
|
||||
})
|
||||
})
|
||||
|
||||
describe("sessionExists", () => {
|
||||
it("should return false for non-existent session", async () => {
|
||||
mockRedis.exists.mockResolvedValue(0)
|
||||
|
||||
const result = await storage.sessionExists("non-existent")
|
||||
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
|
||||
it("should return true for existing session", async () => {
|
||||
mockRedis.exists.mockResolvedValue(1)
|
||||
|
||||
const result = await storage.sessionExists("existing")
|
||||
|
||||
expect(result).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("undo stack operations", () => {
|
||||
const undoEntry: UndoEntry = {
|
||||
id: "undo-1",
|
||||
timestamp: Date.now(),
|
||||
filePath: "test.ts",
|
||||
previousContent: ["old"],
|
||||
newContent: ["new"],
|
||||
description: "Edit",
|
||||
}
|
||||
|
||||
describe("pushUndoEntry", () => {
|
||||
it("should push undo entry to stack", async () => {
|
||||
mockRedis.llen.mockResolvedValue(1)
|
||||
|
||||
await storage.pushUndoEntry("session-1", undoEntry)
|
||||
|
||||
expect(mockRedis.rpush).toHaveBeenCalledWith(
|
||||
SessionKeys.undo("session-1"),
|
||||
JSON.stringify(undoEntry),
|
||||
)
|
||||
})
|
||||
|
||||
it("should remove oldest entry when stack exceeds limit", async () => {
|
||||
mockRedis.llen.mockResolvedValue(11)
|
||||
|
||||
await storage.pushUndoEntry("session-1", undoEntry)
|
||||
|
||||
expect(mockRedis.lpop).toHaveBeenCalledWith(SessionKeys.undo("session-1"))
|
||||
})
|
||||
})
|
||||
|
||||
describe("popUndoEntry", () => {
|
||||
it("should return null for empty stack", async () => {
|
||||
mockRedis.rpop.mockResolvedValue(null)
|
||||
|
||||
const result = await storage.popUndoEntry("session-1")
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it("should pop and return undo entry", async () => {
|
||||
mockRedis.rpop.mockResolvedValue(JSON.stringify(undoEntry))
|
||||
|
||||
const result = await storage.popUndoEntry("session-1")
|
||||
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.id).toBe("undo-1")
|
||||
})
|
||||
})
|
||||
|
||||
describe("getUndoStack", () => {
|
||||
it("should return empty array for empty stack", async () => {
|
||||
mockRedis.lrange.mockResolvedValue([])
|
||||
|
||||
const result = await storage.getUndoStack("session-1")
|
||||
|
||||
expect(result).toEqual([])
|
||||
})
|
||||
|
||||
it("should return all undo entries", async () => {
|
||||
mockRedis.lrange.mockResolvedValue([
|
||||
JSON.stringify({ ...undoEntry, id: "undo-1" }),
|
||||
JSON.stringify({ ...undoEntry, id: "undo-2" }),
|
||||
])
|
||||
|
||||
const result = await storage.getUndoStack("session-1")
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0].id).toBe("undo-1")
|
||||
expect(result[1].id).toBe("undo-2")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("touchSession", () => {
|
||||
it("should update last activity timestamp", async () => {
|
||||
const beforeTouch = Date.now()
|
||||
|
||||
await storage.touchSession("session-1")
|
||||
|
||||
expect(mockRedis.hset).toHaveBeenCalledWith(
|
||||
SessionKeys.data("session-1"),
|
||||
SessionFields.lastActivityAt,
|
||||
expect.any(String),
|
||||
)
|
||||
|
||||
const callArgs = mockRedis.hset.mock.calls[0]
|
||||
const timestamp = Number(callArgs[2])
|
||||
expect(timestamp).toBeGreaterThanOrEqual(beforeTouch)
|
||||
})
|
||||
})
|
||||
|
||||
describe("clearAllSessions", () => {
|
||||
it("should clear all session data", async () => {
|
||||
mockRedis.lrange.mockResolvedValue(["session-1", "session-2"])
|
||||
|
||||
await storage.clearAllSessions()
|
||||
|
||||
const pipeline = mockRedis.pipeline()
|
||||
expect(pipeline.del).toHaveBeenCalled()
|
||||
expect(pipeline.exec).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,513 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GetComplexityTool,
|
||||
type GetComplexityResult,
|
||||
} from "../../../../../src/infrastructure/tools/analysis/GetComplexityTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { FileMeta } from "../../../../../src/domain/value-objects/FileMeta.js"
|
||||
|
||||
function createMockFileMeta(partial: Partial<FileMeta> = {}): FileMeta {
|
||||
return {
|
||||
complexity: { loc: 10, nesting: 2, cyclomaticComplexity: 5, score: 25 },
|
||||
dependencies: [],
|
||||
dependents: [],
|
||||
isHub: false,
|
||||
isEntryPoint: false,
|
||||
fileType: "source",
|
||||
...partial,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStorage(metas: Map<string, FileMeta> = new Map()): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(null),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn().mockResolvedValue(null),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn().mockImplementation((p: string) => Promise.resolve(metas.get(p) ?? null)),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(metas),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("GetComplexityTool", () => {
|
||||
let tool: GetComplexityTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GetComplexityTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("get_complexity")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("analysis")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(false)
|
||||
expect(tool.parameters[1].name).toBe("limit")
|
||||
expect(tool.parameters[1].required).toBe(false)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("complexity")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for no params", () => {
|
||||
expect(tool.validateParams({})).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid path", () => {
|
||||
expect(tool.validateParams({ path: "src/index.ts" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid limit", () => {
|
||||
expect(tool.validateParams({ limit: 10 })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid path and limit", () => {
|
||||
expect(tool.validateParams({ path: "src", limit: 5 })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123 })).toBe("Parameter 'path' must be a string")
|
||||
})
|
||||
|
||||
it("should return error for non-integer limit", () => {
|
||||
expect(tool.validateParams({ limit: 10.5 })).toBe(
|
||||
"Parameter 'limit' must be an integer",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-number limit", () => {
|
||||
expect(tool.validateParams({ limit: "10" })).toBe(
|
||||
"Parameter 'limit' must be an integer",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for limit less than 1", () => {
|
||||
expect(tool.validateParams({ limit: 0 })).toBe("Parameter 'limit' must be at least 1")
|
||||
})
|
||||
|
||||
it("should return error for negative limit", () => {
|
||||
expect(tool.validateParams({ limit: -5 })).toBe("Parameter 'limit' must be at least 1")
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should return complexity for all files without path", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/a.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 50 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/b.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 50, nesting: 2, cyclomaticComplexity: 5, score: 25 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.analyzedPath).toBeNull()
|
||||
expect(data.totalFiles).toBe(2)
|
||||
expect(data.files).toHaveLength(2)
|
||||
})
|
||||
|
||||
it("should sort files by complexity score descending", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/low.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 10, nesting: 1, cyclomaticComplexity: 2, score: 10 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/high.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 200, nesting: 5, cyclomaticComplexity: 25, score: 80 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/mid.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 50, nesting: 3, cyclomaticComplexity: 10, score: 40 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.files[0].path).toBe("src/high.ts")
|
||||
expect(data.files[1].path).toBe("src/mid.ts")
|
||||
expect(data.files[2].path).toBe("src/low.ts")
|
||||
})
|
||||
|
||||
it("should filter by path prefix", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/a.ts", createMockFileMeta()],
|
||||
["src/b.ts", createMockFileMeta()],
|
||||
["lib/c.ts", createMockFileMeta()],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.analyzedPath).toBe("src")
|
||||
expect(data.totalFiles).toBe(2)
|
||||
expect(data.files.every((f) => f.path.startsWith("src/"))).toBe(true)
|
||||
})
|
||||
|
||||
it("should filter by specific file path", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/a.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 15, score: 55 },
|
||||
}),
|
||||
],
|
||||
["src/b.ts", createMockFileMeta()],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/a.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.totalFiles).toBe(1)
|
||||
expect(data.files[0].path).toBe("src/a.ts")
|
||||
expect(data.files[0].metrics.score).toBe(55)
|
||||
})
|
||||
|
||||
it("should respect limit parameter", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/a.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 70 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/b.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 50, nesting: 2, cyclomaticComplexity: 5, score: 50 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/c.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 20, nesting: 1, cyclomaticComplexity: 2, score: 20 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ limit: 2 }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.totalFiles).toBe(3)
|
||||
expect(data.files).toHaveLength(2)
|
||||
expect(data.files[0].metrics.score).toBe(70)
|
||||
expect(data.files[1].metrics.score).toBe(50)
|
||||
})
|
||||
|
||||
it("should use default limit of 20", async () => {
|
||||
const metas = new Map<string, FileMeta>()
|
||||
for (let i = 0; i < 30; i++) {
|
||||
metas.set(`src/file${String(i)}.ts`, createMockFileMeta())
|
||||
}
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.totalFiles).toBe(30)
|
||||
expect(data.files).toHaveLength(20)
|
||||
})
|
||||
|
||||
it("should calculate average score", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/a.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 60 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/b.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 50, nesting: 2, cyclomaticComplexity: 5, score: 40 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.averageScore).toBe(50)
|
||||
})
|
||||
|
||||
it("should calculate summary statistics", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/high.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 200, nesting: 5, cyclomaticComplexity: 25, score: 75 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/medium.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 80, nesting: 3, cyclomaticComplexity: 12, score: 45 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/low.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 20, nesting: 1, cyclomaticComplexity: 3, score: 15 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.summary.highComplexity).toBe(1)
|
||||
expect(data.summary.mediumComplexity).toBe(1)
|
||||
expect(data.summary.lowComplexity).toBe(1)
|
||||
})
|
||||
|
||||
it("should return empty result for empty project", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.totalFiles).toBe(0)
|
||||
expect(data.averageScore).toBe(0)
|
||||
expect(data.files).toEqual([])
|
||||
expect(data.summary).toEqual({
|
||||
highComplexity: 0,
|
||||
mediumComplexity: 0,
|
||||
lowComplexity: 0,
|
||||
})
|
||||
})
|
||||
|
||||
it("should return error for non-existent path", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/a.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "nonexistent" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("No files found at path")
|
||||
})
|
||||
|
||||
it("should handle absolute paths", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/a.ts", createMockFileMeta()],
|
||||
["src/b.ts", createMockFileMeta()],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "/test/project/src" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.analyzedPath).toBe("src")
|
||||
expect(data.totalFiles).toBe(2)
|
||||
})
|
||||
|
||||
it("should include file metadata", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/hub.ts",
|
||||
createMockFileMeta({
|
||||
fileType: "source",
|
||||
isHub: true,
|
||||
complexity: { loc: 150, nesting: 4, cyclomaticComplexity: 18, score: 65 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.files[0].fileType).toBe("source")
|
||||
expect(data.files[0].isHub).toBe(true)
|
||||
expect(data.files[0].metrics).toEqual({
|
||||
loc: 150,
|
||||
nesting: 4,
|
||||
cyclomaticComplexity: 18,
|
||||
score: 65,
|
||||
})
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/a.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^get_complexity-\d+$/)
|
||||
})
|
||||
|
||||
it("should include execution time in result", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/a.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle storage errors gracefully", async () => {
|
||||
const storage = createMockStorage()
|
||||
;(storage.getAllMetas as ReturnType<typeof vi.fn>).mockRejectedValue(
|
||||
new Error("Redis connection failed"),
|
||||
)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Redis connection failed")
|
||||
})
|
||||
|
||||
it("should round average score to 2 decimal places", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/a.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 33 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/b.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 50, nesting: 2, cyclomaticComplexity: 5, score: 33 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/c.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 20, nesting: 1, cyclomaticComplexity: 2, score: 34 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.averageScore).toBe(33.33)
|
||||
})
|
||||
|
||||
it("should handle complexity threshold boundaries", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/exact-high.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 200, nesting: 5, cyclomaticComplexity: 20, score: 60 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/exact-medium.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 100, nesting: 3, cyclomaticComplexity: 10, score: 30 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/below-medium.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 50, nesting: 2, cyclomaticComplexity: 5, score: 29 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetComplexityResult
|
||||
expect(data.summary.highComplexity).toBe(1)
|
||||
expect(data.summary.mediumComplexity).toBe(1)
|
||||
expect(data.summary.lowComplexity).toBe(1)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,342 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GetDependenciesTool,
|
||||
type GetDependenciesResult,
|
||||
} from "../../../../../src/infrastructure/tools/analysis/GetDependenciesTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { FileMeta } from "../../../../../src/domain/value-objects/FileMeta.js"
|
||||
|
||||
function createMockFileMeta(partial: Partial<FileMeta> = {}): FileMeta {
|
||||
return {
|
||||
complexity: { loc: 10, nesting: 2, cyclomaticComplexity: 5, score: 25 },
|
||||
dependencies: [],
|
||||
dependents: [],
|
||||
isHub: false,
|
||||
isEntryPoint: false,
|
||||
fileType: "source",
|
||||
...partial,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStorage(metas: Map<string, FileMeta> = new Map()): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(null),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn().mockResolvedValue(null),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn().mockImplementation((p: string) => Promise.resolve(metas.get(p) ?? null)),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(metas),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("GetDependenciesTool", () => {
|
||||
let tool: GetDependenciesTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GetDependenciesTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("get_dependencies")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("analysis")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(1)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("imports")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid path", () => {
|
||||
expect(tool.validateParams({ path: "src/index.ts" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({})).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for whitespace-only path", () => {
|
||||
expect(tool.validateParams({ path: " " })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123 })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should return dependencies for a file", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileMeta({
|
||||
dependencies: ["src/utils.ts", "src/config.ts"],
|
||||
}),
|
||||
],
|
||||
["src/utils.ts", createMockFileMeta({ isHub: true })],
|
||||
["src/config.ts", createMockFileMeta({ isEntryPoint: true })],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.file).toBe("src/index.ts")
|
||||
expect(data.totalDependencies).toBe(2)
|
||||
expect(data.dependencies).toHaveLength(2)
|
||||
})
|
||||
|
||||
it("should include metadata for each dependency", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileMeta({
|
||||
dependencies: ["src/utils.ts"],
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/utils.ts",
|
||||
createMockFileMeta({
|
||||
isHub: true,
|
||||
isEntryPoint: false,
|
||||
fileType: "source",
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.dependencies[0]).toEqual({
|
||||
path: "src/utils.ts",
|
||||
exists: true,
|
||||
isEntryPoint: false,
|
||||
isHub: true,
|
||||
fileType: "source",
|
||||
})
|
||||
})
|
||||
|
||||
it("should handle file with no dependencies", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/standalone.ts", createMockFileMeta({ dependencies: [] })],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/standalone.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.totalDependencies).toBe(0)
|
||||
expect(data.dependencies).toEqual([])
|
||||
})
|
||||
|
||||
it("should return error for non-existent file", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "nonexistent.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("File not found or not indexed")
|
||||
})
|
||||
|
||||
it("should handle absolute paths", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/index.ts", createMockFileMeta({ dependencies: [] })],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "/test/project/src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.file).toBe("src/index.ts")
|
||||
})
|
||||
|
||||
it("should mark non-existent dependencies", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileMeta({
|
||||
dependencies: ["src/missing.ts"],
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.dependencies[0].exists).toBe(false)
|
||||
expect(data.dependencies[0].isHub).toBe(false)
|
||||
expect(data.dependencies[0].fileType).toBe("unknown")
|
||||
})
|
||||
|
||||
it("should sort dependencies by path", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileMeta({
|
||||
dependencies: ["src/z.ts", "src/a.ts", "src/m.ts"],
|
||||
}),
|
||||
],
|
||||
["src/z.ts", createMockFileMeta()],
|
||||
["src/a.ts", createMockFileMeta()],
|
||||
["src/m.ts", createMockFileMeta()],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.dependencies[0].path).toBe("src/a.ts")
|
||||
expect(data.dependencies[1].path).toBe("src/m.ts")
|
||||
expect(data.dependencies[2].path).toBe("src/z.ts")
|
||||
})
|
||||
|
||||
it("should include file type of source file", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"tests/index.test.ts",
|
||||
createMockFileMeta({
|
||||
fileType: "test",
|
||||
dependencies: [],
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "tests/index.test.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.fileType).toBe("test")
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/index.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^get_dependencies-\d+$/)
|
||||
})
|
||||
|
||||
it("should include execution time in result", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/index.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle storage errors gracefully", async () => {
|
||||
const storage = createMockStorage()
|
||||
;(storage.getMeta as ReturnType<typeof vi.fn>).mockRejectedValue(
|
||||
new Error("Redis connection failed"),
|
||||
)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Redis connection failed")
|
||||
})
|
||||
|
||||
it("should trim path before searching", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/index.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: " src/index.ts " }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.file).toBe("src/index.ts")
|
||||
})
|
||||
|
||||
it("should handle many dependencies", async () => {
|
||||
const deps = Array.from({ length: 50 }, (_, i) => `src/dep${String(i)}.ts`)
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/index.ts", createMockFileMeta({ dependencies: deps })],
|
||||
...deps.map((dep) => [dep, createMockFileMeta()] as [string, FileMeta]),
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/index.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependenciesResult
|
||||
expect(data.totalDependencies).toBe(50)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,388 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GetDependentsTool,
|
||||
type GetDependentsResult,
|
||||
} from "../../../../../src/infrastructure/tools/analysis/GetDependentsTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { FileMeta } from "../../../../../src/domain/value-objects/FileMeta.js"
|
||||
|
||||
function createMockFileMeta(partial: Partial<FileMeta> = {}): FileMeta {
|
||||
return {
|
||||
complexity: { loc: 10, nesting: 2, cyclomaticComplexity: 5, score: 25 },
|
||||
dependencies: [],
|
||||
dependents: [],
|
||||
isHub: false,
|
||||
isEntryPoint: false,
|
||||
fileType: "source",
|
||||
...partial,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStorage(metas: Map<string, FileMeta> = new Map()): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(null),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn().mockResolvedValue(null),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn().mockImplementation((p: string) => Promise.resolve(metas.get(p) ?? null)),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(metas),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("GetDependentsTool", () => {
|
||||
let tool: GetDependentsTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GetDependentsTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("get_dependents")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("analysis")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(1)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("import")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid path", () => {
|
||||
expect(tool.validateParams({ path: "src/utils.ts" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({})).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for whitespace-only path", () => {
|
||||
expect(tool.validateParams({ path: " " })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123 })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should return dependents for a file", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/utils.ts",
|
||||
createMockFileMeta({
|
||||
dependents: ["src/index.ts", "src/app.ts"],
|
||||
isHub: true,
|
||||
}),
|
||||
],
|
||||
["src/index.ts", createMockFileMeta({ isEntryPoint: true })],
|
||||
["src/app.ts", createMockFileMeta()],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.file).toBe("src/utils.ts")
|
||||
expect(data.totalDependents).toBe(2)
|
||||
expect(data.isHub).toBe(true)
|
||||
expect(data.dependents).toHaveLength(2)
|
||||
})
|
||||
|
||||
it("should include metadata for each dependent", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/utils.ts",
|
||||
createMockFileMeta({
|
||||
dependents: ["src/index.ts"],
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileMeta({
|
||||
isHub: false,
|
||||
isEntryPoint: true,
|
||||
fileType: "source",
|
||||
complexity: { loc: 50, nesting: 3, cyclomaticComplexity: 10, score: 45 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.dependents[0]).toEqual({
|
||||
path: "src/index.ts",
|
||||
isEntryPoint: true,
|
||||
isHub: false,
|
||||
fileType: "source",
|
||||
complexityScore: 45,
|
||||
})
|
||||
})
|
||||
|
||||
it("should handle file with no dependents", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/isolated.ts", createMockFileMeta({ dependents: [] })],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/isolated.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.totalDependents).toBe(0)
|
||||
expect(data.isHub).toBe(false)
|
||||
expect(data.dependents).toEqual([])
|
||||
})
|
||||
|
||||
it("should return error for non-existent file", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "nonexistent.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("File not found or not indexed")
|
||||
})
|
||||
|
||||
it("should handle absolute paths", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
["src/utils.ts", createMockFileMeta({ dependents: [] })],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "/test/project/src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.file).toBe("src/utils.ts")
|
||||
})
|
||||
|
||||
it("should handle missing dependent metadata", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/utils.ts",
|
||||
createMockFileMeta({
|
||||
dependents: ["src/missing.ts"],
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.dependents[0].isHub).toBe(false)
|
||||
expect(data.dependents[0].isEntryPoint).toBe(false)
|
||||
expect(data.dependents[0].fileType).toBe("unknown")
|
||||
expect(data.dependents[0].complexityScore).toBe(0)
|
||||
})
|
||||
|
||||
it("should sort dependents by path", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/utils.ts",
|
||||
createMockFileMeta({
|
||||
dependents: ["src/z.ts", "src/a.ts", "src/m.ts"],
|
||||
}),
|
||||
],
|
||||
["src/z.ts", createMockFileMeta()],
|
||||
["src/a.ts", createMockFileMeta()],
|
||||
["src/m.ts", createMockFileMeta()],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.dependents[0].path).toBe("src/a.ts")
|
||||
expect(data.dependents[1].path).toBe("src/m.ts")
|
||||
expect(data.dependents[2].path).toBe("src/z.ts")
|
||||
})
|
||||
|
||||
it("should include file type of source file", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/types.ts",
|
||||
createMockFileMeta({
|
||||
fileType: "types",
|
||||
dependents: [],
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/types.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.fileType).toBe("types")
|
||||
})
|
||||
|
||||
it("should correctly identify hub files", async () => {
|
||||
const dependents = Array.from({ length: 10 }, (_, i) => `src/file${String(i)}.ts`)
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/core.ts",
|
||||
createMockFileMeta({
|
||||
dependents,
|
||||
isHub: true,
|
||||
}),
|
||||
],
|
||||
...dependents.map((dep) => [dep, createMockFileMeta()] as [string, FileMeta]),
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/core.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.isHub).toBe(true)
|
||||
expect(data.totalDependents).toBe(10)
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/utils.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^get_dependents-\d+$/)
|
||||
})
|
||||
|
||||
it("should include execution time in result", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/utils.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle storage errors gracefully", async () => {
|
||||
const storage = createMockStorage()
|
||||
;(storage.getMeta as ReturnType<typeof vi.fn>).mockRejectedValue(
|
||||
new Error("Redis connection failed"),
|
||||
)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Redis connection failed")
|
||||
})
|
||||
|
||||
it("should trim path before searching", async () => {
|
||||
const metas = new Map<string, FileMeta>([["src/utils.ts", createMockFileMeta()]])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: " src/utils.ts " }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
expect(data.file).toBe("src/utils.ts")
|
||||
})
|
||||
|
||||
it("should include complexity scores for dependents", async () => {
|
||||
const metas = new Map<string, FileMeta>([
|
||||
[
|
||||
"src/utils.ts",
|
||||
createMockFileMeta({
|
||||
dependents: ["src/high.ts", "src/low.ts"],
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/high.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 200, nesting: 5, cyclomaticComplexity: 20, score: 80 },
|
||||
}),
|
||||
],
|
||||
[
|
||||
"src/low.ts",
|
||||
createMockFileMeta({
|
||||
complexity: { loc: 20, nesting: 1, cyclomaticComplexity: 2, score: 10 },
|
||||
}),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(metas)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/utils.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetDependentsResult
|
||||
const highDep = data.dependents.find((d) => d.path === "src/high.ts")
|
||||
const lowDep = data.dependents.find((d) => d.path === "src/low.ts")
|
||||
expect(highDep?.complexityScore).toBe(80)
|
||||
expect(lowDep?.complexityScore).toBe(10)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,583 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GetTodosTool,
|
||||
type GetTodosResult,
|
||||
} from "../../../../../src/infrastructure/tools/analysis/GetTodosTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { FileData } from "../../../../../src/domain/value-objects/FileData.js"
|
||||
|
||||
function createMockFileData(lines: string[]): FileData {
|
||||
return {
|
||||
lines,
|
||||
hash: "abc123",
|
||||
size: lines.join("\n").length,
|
||||
lastModified: Date.now(),
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStorage(files: Map<string, FileData> = new Map()): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockImplementation((p: string) => Promise.resolve(files.get(p) ?? null)),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(files),
|
||||
getFileCount: vi.fn().mockResolvedValue(files.size),
|
||||
getAST: vi.fn().mockResolvedValue(null),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn().mockResolvedValue(null),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(new Map()),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("GetTodosTool", () => {
|
||||
let tool: GetTodosTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GetTodosTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("get_todos")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("analysis")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(false)
|
||||
expect(tool.parameters[1].name).toBe("type")
|
||||
expect(tool.parameters[1].required).toBe(false)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("TODO")
|
||||
expect(tool.description).toContain("FIXME")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for no params", () => {
|
||||
expect(tool.validateParams({})).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid path", () => {
|
||||
expect(tool.validateParams({ path: "src" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid type", () => {
|
||||
expect(tool.validateParams({ type: "TODO" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for lowercase type", () => {
|
||||
expect(tool.validateParams({ type: "fixme" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for path and type", () => {
|
||||
expect(tool.validateParams({ path: "src", type: "TODO" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123 })).toBe("Parameter 'path' must be a string")
|
||||
})
|
||||
|
||||
it("should return error for non-string type", () => {
|
||||
expect(tool.validateParams({ type: 123 })).toBe("Parameter 'type' must be a string")
|
||||
})
|
||||
|
||||
it("should return error for invalid type", () => {
|
||||
expect(tool.validateParams({ type: "INVALID" })).toBe(
|
||||
"Parameter 'type' must be one of: TODO, FIXME, HACK, XXX, BUG, NOTE",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should find TODO comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileData([
|
||||
"// TODO: implement this",
|
||||
"function foo() {}",
|
||||
"// TODO: add tests",
|
||||
]),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(2)
|
||||
expect(data.todos[0].type).toBe("TODO")
|
||||
expect(data.todos[0].text).toBe("implement this")
|
||||
expect(data.todos[1].text).toBe("add tests")
|
||||
})
|
||||
|
||||
it("should find FIXME comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileData(["// FIXME: broken logic here", "const x = 1"]),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].type).toBe("FIXME")
|
||||
expect(data.todos[0].text).toBe("broken logic here")
|
||||
})
|
||||
|
||||
it("should find HACK comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// HACK: temporary workaround"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].type).toBe("HACK")
|
||||
})
|
||||
|
||||
it("should find XXX comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// XXX: needs attention"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].type).toBe("XXX")
|
||||
})
|
||||
|
||||
it("should find BUG comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// BUG: race condition"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].type).toBe("BUG")
|
||||
})
|
||||
|
||||
it("should find NOTE comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// NOTE: important consideration"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].type).toBe("NOTE")
|
||||
})
|
||||
|
||||
it("should find comments in block comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["/*", " * TODO: in block comment", " */"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].text).toBe("in block comment")
|
||||
})
|
||||
|
||||
it("should find comments with author annotation", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// TODO(john): fix this"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].text).toBe("fix this")
|
||||
})
|
||||
|
||||
it("should handle TODO without colon", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// TODO implement feature"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].text).toBe("implement feature")
|
||||
})
|
||||
|
||||
it("should filter by type", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileData([
|
||||
"// TODO: task one",
|
||||
"// FIXME: bug here",
|
||||
"// TODO: task two",
|
||||
]),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ type: "TODO" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(2)
|
||||
expect(data.todos.every((t) => t.type === "TODO")).toBe(true)
|
||||
})
|
||||
|
||||
it("should filter by type case-insensitively", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// TODO: task", "// FIXME: bug"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ type: "todo" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].type).toBe("TODO")
|
||||
})
|
||||
|
||||
it("should filter by path", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/a.ts", createMockFileData(["// TODO: in src"])],
|
||||
["lib/b.ts", createMockFileData(["// TODO: in lib"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.searchedPath).toBe("src")
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].path).toBe("src/a.ts")
|
||||
})
|
||||
|
||||
it("should filter by specific file", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/a.ts", createMockFileData(["// TODO: in a"])],
|
||||
["src/b.ts", createMockFileData(["// TODO: in b"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "src/a.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].path).toBe("src/a.ts")
|
||||
})
|
||||
|
||||
it("should return error for non-existent path", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/a.ts", createMockFileData(["// TODO: task"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "nonexistent" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("No files found at path")
|
||||
})
|
||||
|
||||
it("should count by type", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileData([
|
||||
"// TODO: task 1",
|
||||
"// TODO: task 2",
|
||||
"// FIXME: bug",
|
||||
"// HACK: workaround",
|
||||
]),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.byType.TODO).toBe(2)
|
||||
expect(data.byType.FIXME).toBe(1)
|
||||
expect(data.byType.HACK).toBe(1)
|
||||
expect(data.byType.XXX).toBe(0)
|
||||
expect(data.byType.BUG).toBe(0)
|
||||
expect(data.byType.NOTE).toBe(0)
|
||||
})
|
||||
|
||||
it("should count files with todos", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/a.ts", createMockFileData(["// TODO: task"])],
|
||||
["src/b.ts", createMockFileData(["const x = 1"])],
|
||||
["src/c.ts", createMockFileData(["// TODO: another task"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.filesWithTodos).toBe(2)
|
||||
})
|
||||
|
||||
it("should sort results by path then line", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/b.ts", createMockFileData(["// TODO: b1", "", "// TODO: b2"])],
|
||||
["src/a.ts", createMockFileData(["// TODO: a1"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].path).toBe("src/a.ts")
|
||||
expect(data.todos[1].path).toBe("src/b.ts")
|
||||
expect(data.todos[1].line).toBe(1)
|
||||
expect(data.todos[2].path).toBe("src/b.ts")
|
||||
expect(data.todos[2].line).toBe(3)
|
||||
})
|
||||
|
||||
it("should include line context", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData([" // TODO: indented task"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].context).toBe("// TODO: indented task")
|
||||
})
|
||||
|
||||
it("should return empty result for empty project", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(0)
|
||||
expect(data.filesWithTodos).toBe(0)
|
||||
expect(data.todos).toEqual([])
|
||||
})
|
||||
|
||||
it("should return empty result when no todos found", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["const x = 1", "const y = 2"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(0)
|
||||
})
|
||||
|
||||
it("should handle TODO without description", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// TODO:"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].text).toBe("(no description)")
|
||||
})
|
||||
|
||||
it("should handle absolute paths", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/a.ts", createMockFileData(["// TODO: task"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "/test/project/src" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.searchedPath).toBe("src")
|
||||
})
|
||||
|
||||
it("should find todos with hash comments", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["script.sh", createMockFileData(["# TODO: shell script task"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].text).toBe("shell script task")
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^get_todos-\d+$/)
|
||||
})
|
||||
|
||||
it("should include execution time in result", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle storage errors gracefully", async () => {
|
||||
const storage = createMockStorage()
|
||||
;(storage.getAllFiles as ReturnType<typeof vi.fn>).mockRejectedValue(
|
||||
new Error("Redis connection failed"),
|
||||
)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Redis connection failed")
|
||||
})
|
||||
|
||||
it("should find lowercase todo markers", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/index.ts", createMockFileData(["// todo: lowercase"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(1)
|
||||
expect(data.todos[0].type).toBe("TODO")
|
||||
})
|
||||
|
||||
it("should handle multiple files with todos", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
["src/a.ts", createMockFileData(["// TODO: a1", "// TODO: a2"])],
|
||||
["src/b.ts", createMockFileData(["// FIXME: b1"])],
|
||||
["src/c.ts", createMockFileData(["// HACK: c1", "// BUG: c2"])],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.totalTodos).toBe(5)
|
||||
expect(data.filesWithTodos).toBe(3)
|
||||
})
|
||||
|
||||
it("should correctly identify line numbers", async () => {
|
||||
const files = new Map<string, FileData>([
|
||||
[
|
||||
"src/index.ts",
|
||||
createMockFileData([
|
||||
"const a = 1",
|
||||
"const b = 2",
|
||||
"// TODO: on line 3",
|
||||
"const c = 3",
|
||||
]),
|
||||
],
|
||||
])
|
||||
const storage = createMockStorage(files)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetTodosResult
|
||||
expect(data.todos[0].line).toBe(3)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,335 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"
|
||||
import { promises as fs } from "node:fs"
|
||||
import * as path from "node:path"
|
||||
import * as os from "node:os"
|
||||
import {
|
||||
CreateFileTool,
|
||||
type CreateFileResult,
|
||||
} from "../../../../../src/infrastructure/tools/edit/CreateFileTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import { hashLines } from "../../../../../src/shared/utils/hash.js"
|
||||
|
||||
function createMockStorage(): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(null),
|
||||
setFile: vi.fn().mockResolvedValue(undefined),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn(),
|
||||
getFileCount: vi.fn(),
|
||||
getAST: vi.fn(),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn(),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn(),
|
||||
getSymbolIndex: vi.fn(),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn(),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn(),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(
|
||||
storage?: IStorage,
|
||||
confirmResult = true,
|
||||
projectRoot = "/test/project",
|
||||
): ToolContext {
|
||||
return {
|
||||
projectRoot,
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(confirmResult),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("CreateFileTool", () => {
|
||||
let tool: CreateFileTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new CreateFileTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("create_file")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("edit")
|
||||
})
|
||||
|
||||
it("should require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(true)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
expect(tool.parameters[1].name).toBe("content")
|
||||
expect(tool.parameters[1].required).toBe(true)
|
||||
})
|
||||
|
||||
it("should have description mentioning confirmation", () => {
|
||||
expect(tool.description).toContain("confirmation")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid params", () => {
|
||||
expect(
|
||||
tool.validateParams({ path: "src/new-file.ts", content: "const x = 1" }),
|
||||
).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({ content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "", content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
expect(tool.validateParams({ path: " ", content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123, content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for missing content", () => {
|
||||
expect(tool.validateParams({ path: "test.ts" })).toBe(
|
||||
"Parameter 'content' is required and must be a string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string content", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", content: 123 })).toBe(
|
||||
"Parameter 'content' is required and must be a string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should allow empty content string", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", content: "" })).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
let tempDir: string
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "create-file-test-"))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
it("should create new file with content", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const content = "line 1\nline 2\nline 3"
|
||||
const result = await tool.execute({ path: "new-file.ts", content }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as CreateFileResult
|
||||
expect(data.path).toBe("new-file.ts")
|
||||
expect(data.lines).toBe(3)
|
||||
|
||||
const filePath = path.join(tempDir, "new-file.ts")
|
||||
const fileContent = await fs.readFile(filePath, "utf-8")
|
||||
expect(fileContent).toBe(content)
|
||||
})
|
||||
|
||||
it("should create directories if they do not exist", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "deep/nested/dir/file.ts", content: "test" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
|
||||
const filePath = path.join(tempDir, "deep/nested/dir/file.ts")
|
||||
const fileContent = await fs.readFile(filePath, "utf-8")
|
||||
expect(fileContent).toBe("test")
|
||||
})
|
||||
|
||||
it("should call requestConfirmation with diff info", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "new-file.ts", content: "line 1\nline 2" }, ctx)
|
||||
|
||||
expect(ctx.requestConfirmation).toHaveBeenCalledWith(
|
||||
"Create new file: new-file.ts (2 lines)",
|
||||
{
|
||||
filePath: "new-file.ts",
|
||||
oldLines: [],
|
||||
newLines: ["line 1", "line 2"],
|
||||
startLine: 1,
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
it("should cancel creation when confirmation rejected", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, false, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "new-file.ts", content: "test" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("File creation cancelled by user")
|
||||
|
||||
const filePath = path.join(tempDir, "new-file.ts")
|
||||
await expect(fs.access(filePath)).rejects.toThrow()
|
||||
})
|
||||
|
||||
it("should update storage after creation", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "new-file.ts", content: "line 1\nline 2" }, ctx)
|
||||
|
||||
expect(storage.setFile).toHaveBeenCalledWith(
|
||||
"new-file.ts",
|
||||
expect.objectContaining({
|
||||
lines: ["line 1", "line 2"],
|
||||
hash: hashLines(["line 1", "line 2"]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for path outside project root", async () => {
|
||||
const ctx = createMockContext(undefined, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "../outside/file.ts", content: "test" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should return error if file already exists", async () => {
|
||||
const existingFile = path.join(tempDir, "existing.ts")
|
||||
await fs.writeFile(existingFile, "original content", "utf-8")
|
||||
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "existing.ts", content: "new content" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("File already exists: existing.ts")
|
||||
|
||||
const content = await fs.readFile(existingFile, "utf-8")
|
||||
expect(content).toBe("original content")
|
||||
})
|
||||
|
||||
it("should handle empty content", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "empty.ts", content: "" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as CreateFileResult
|
||||
expect(data.lines).toBe(1)
|
||||
|
||||
const filePath = path.join(tempDir, "empty.ts")
|
||||
const fileContent = await fs.readFile(filePath, "utf-8")
|
||||
expect(fileContent).toBe("")
|
||||
})
|
||||
|
||||
it("should handle single line content", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "single.ts", content: "export const x = 1" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as CreateFileResult
|
||||
expect(data.lines).toBe(1)
|
||||
})
|
||||
|
||||
it("should return correct file size", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const content = "hello world"
|
||||
const result = await tool.execute({ path: "file.ts", content }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as CreateFileResult
|
||||
expect(data.size).toBe(Buffer.byteLength(content, "utf-8"))
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "new.ts", content: "test" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^create_file-\d+$/)
|
||||
})
|
||||
|
||||
it("should include executionTimeMs in result", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "new.ts", content: "test" }, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should handle multi-line content correctly", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const content = "import { x } from './x'\n\nexport function foo() {\n return x\n}\n"
|
||||
const result = await tool.execute({ path: "foo.ts", content }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as CreateFileResult
|
||||
expect(data.lines).toBe(6)
|
||||
|
||||
const filePath = path.join(tempDir, "foo.ts")
|
||||
const fileContent = await fs.readFile(filePath, "utf-8")
|
||||
expect(fileContent).toBe(content)
|
||||
})
|
||||
|
||||
it("should handle special characters in content", async () => {
|
||||
const storage = createMockStorage()
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const content = "const emoji = '🚀'\nconst quote = \"hello 'world'\""
|
||||
const result = await tool.execute({ path: "special.ts", content }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
|
||||
const filePath = path.join(tempDir, "special.ts")
|
||||
const fileContent = await fs.readFile(filePath, "utf-8")
|
||||
expect(fileContent).toBe(content)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,274 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"
|
||||
import { promises as fs } from "node:fs"
|
||||
import * as path from "node:path"
|
||||
import * as os from "node:os"
|
||||
import {
|
||||
DeleteFileTool,
|
||||
type DeleteFileResult,
|
||||
} from "../../../../../src/infrastructure/tools/edit/DeleteFileTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
|
||||
function createMockStorage(fileData: { lines: string[] } | null = null): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(fileData),
|
||||
setFile: vi.fn().mockResolvedValue(undefined),
|
||||
deleteFile: vi.fn().mockResolvedValue(undefined),
|
||||
getAllFiles: vi.fn(),
|
||||
getFileCount: vi.fn(),
|
||||
getAST: vi.fn(),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn().mockResolvedValue(undefined),
|
||||
getAllASTs: vi.fn(),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn().mockResolvedValue(undefined),
|
||||
getAllMetas: vi.fn(),
|
||||
getSymbolIndex: vi.fn(),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn(),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn(),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(
|
||||
storage?: IStorage,
|
||||
confirmResult = true,
|
||||
projectRoot = "/test/project",
|
||||
): ToolContext {
|
||||
return {
|
||||
projectRoot,
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(confirmResult),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("DeleteFileTool", () => {
|
||||
let tool: DeleteFileTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new DeleteFileTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("delete_file")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("edit")
|
||||
})
|
||||
|
||||
it("should require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(true)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(1)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
})
|
||||
|
||||
it("should have description mentioning confirmation", () => {
|
||||
expect(tool.description).toContain("confirmation")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid params", () => {
|
||||
expect(tool.validateParams({ path: "src/file.ts" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({})).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
expect(tool.validateParams({ path: " " })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123 })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
let tempDir: string
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "delete-file-test-"))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
it("should delete existing file", async () => {
|
||||
const testFile = path.join(tempDir, "to-delete.ts")
|
||||
await fs.writeFile(testFile, "content to delete", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["content to delete"] })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "to-delete.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as DeleteFileResult
|
||||
expect(data.path).toBe("to-delete.ts")
|
||||
expect(data.deleted).toBe(true)
|
||||
|
||||
await expect(fs.access(testFile)).rejects.toThrow()
|
||||
})
|
||||
|
||||
it("should delete file from storage", async () => {
|
||||
const testFile = path.join(tempDir, "to-delete.ts")
|
||||
await fs.writeFile(testFile, "content", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["content"] })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "to-delete.ts" }, ctx)
|
||||
|
||||
expect(storage.deleteFile).toHaveBeenCalledWith("to-delete.ts")
|
||||
expect(storage.deleteAST).toHaveBeenCalledWith("to-delete.ts")
|
||||
expect(storage.deleteMeta).toHaveBeenCalledWith("to-delete.ts")
|
||||
})
|
||||
|
||||
it("should call requestConfirmation with diff info", async () => {
|
||||
const testFile = path.join(tempDir, "to-delete.ts")
|
||||
await fs.writeFile(testFile, "line 1\nline 2", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["line 1", "line 2"] })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "to-delete.ts" }, ctx)
|
||||
|
||||
expect(ctx.requestConfirmation).toHaveBeenCalledWith("Delete file: to-delete.ts", {
|
||||
filePath: "to-delete.ts",
|
||||
oldLines: ["line 1", "line 2"],
|
||||
newLines: [],
|
||||
startLine: 1,
|
||||
})
|
||||
})
|
||||
|
||||
it("should cancel deletion when confirmation rejected", async () => {
|
||||
const testFile = path.join(tempDir, "keep.ts")
|
||||
await fs.writeFile(testFile, "keep this", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["keep this"] })
|
||||
const ctx = createMockContext(storage, false, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "keep.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("File deletion cancelled by user")
|
||||
|
||||
const content = await fs.readFile(testFile, "utf-8")
|
||||
expect(content).toBe("keep this")
|
||||
})
|
||||
|
||||
it("should return error for path outside project root", async () => {
|
||||
const ctx = createMockContext(undefined, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "../outside/file.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should return error if file does not exist", async () => {
|
||||
const storage = createMockStorage(null)
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "nonexistent.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("File not found: nonexistent.ts")
|
||||
})
|
||||
|
||||
it("should read content from filesystem if not in storage", async () => {
|
||||
const testFile = path.join(tempDir, "not-indexed.ts")
|
||||
await fs.writeFile(testFile, "filesystem content\nline 2", "utf-8")
|
||||
|
||||
const storage = createMockStorage(null)
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "not-indexed.ts" }, ctx)
|
||||
|
||||
expect(ctx.requestConfirmation).toHaveBeenCalledWith(
|
||||
"Delete file: not-indexed.ts",
|
||||
expect.objectContaining({
|
||||
oldLines: ["filesystem content", "line 2"],
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const testFile = path.join(tempDir, "file.ts")
|
||||
await fs.writeFile(testFile, "x", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["x"] })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "file.ts" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^delete_file-\d+$/)
|
||||
})
|
||||
|
||||
it("should include executionTimeMs in result", async () => {
|
||||
const testFile = path.join(tempDir, "file.ts")
|
||||
await fs.writeFile(testFile, "x", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["x"] })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "file.ts" }, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should not delete directories", async () => {
|
||||
const dirPath = path.join(tempDir, "some-dir")
|
||||
await fs.mkdir(dirPath)
|
||||
|
||||
const storage = createMockStorage(null)
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "some-dir" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("File not found: some-dir")
|
||||
})
|
||||
|
||||
it("should handle nested file paths", async () => {
|
||||
const nestedDir = path.join(tempDir, "a/b/c")
|
||||
await fs.mkdir(nestedDir, { recursive: true })
|
||||
const testFile = path.join(nestedDir, "file.ts")
|
||||
await fs.writeFile(testFile, "nested", "utf-8")
|
||||
|
||||
const storage = createMockStorage({ lines: ["nested"] })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute({ path: "a/b/c/file.ts" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
await expect(fs.access(testFile)).rejects.toThrow()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,493 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"
|
||||
import { promises as fs } from "node:fs"
|
||||
import * as path from "node:path"
|
||||
import * as os from "node:os"
|
||||
import {
|
||||
EditLinesTool,
|
||||
type EditLinesResult,
|
||||
} from "../../../../../src/infrastructure/tools/edit/EditLinesTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import { hashLines } from "../../../../../src/shared/utils/hash.js"
|
||||
|
||||
function createMockStorage(fileData: { lines: string[]; hash: string } | null = null): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(fileData),
|
||||
setFile: vi.fn().mockResolvedValue(undefined),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn(),
|
||||
getFileCount: vi.fn(),
|
||||
getAST: vi.fn(),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn(),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn(),
|
||||
getSymbolIndex: vi.fn(),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn(),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn(),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(
|
||||
storage?: IStorage,
|
||||
confirmResult = true,
|
||||
projectRoot = "/test/project",
|
||||
): ToolContext {
|
||||
return {
|
||||
projectRoot,
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(confirmResult),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("EditLinesTool", () => {
|
||||
let tool: EditLinesTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new EditLinesTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("edit_lines")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("edit")
|
||||
})
|
||||
|
||||
it("should require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(true)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(4)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
expect(tool.parameters[1].name).toBe("start")
|
||||
expect(tool.parameters[1].required).toBe(true)
|
||||
expect(tool.parameters[2].name).toBe("end")
|
||||
expect(tool.parameters[2].required).toBe(true)
|
||||
expect(tool.parameters[3].name).toBe("content")
|
||||
expect(tool.parameters[3].required).toBe(true)
|
||||
})
|
||||
|
||||
it("should have description mentioning confirmation", () => {
|
||||
expect(tool.description).toContain("confirmation")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid params", () => {
|
||||
expect(
|
||||
tool.validateParams({
|
||||
path: "src/index.ts",
|
||||
start: 1,
|
||||
end: 5,
|
||||
content: "new content",
|
||||
}),
|
||||
).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({ start: 1, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "", start: 1, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
expect(tool.validateParams({ path: " ", start: 1, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string path", () => {
|
||||
expect(tool.validateParams({ path: 123, start: 1, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for missing start", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", end: 5, content: "x" })).toBe(
|
||||
"Parameter 'start' is required and must be an integer",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-integer start", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 1.5, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'start' is required and must be an integer",
|
||||
)
|
||||
expect(tool.validateParams({ path: "test.ts", start: "1", end: 5, content: "x" })).toBe(
|
||||
"Parameter 'start' is required and must be an integer",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for start < 1", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 0, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'start' must be >= 1",
|
||||
)
|
||||
expect(tool.validateParams({ path: "test.ts", start: -1, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'start' must be >= 1",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for missing end", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 1, content: "x" })).toBe(
|
||||
"Parameter 'end' is required and must be an integer",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-integer end", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 1, end: 5.5, content: "x" })).toBe(
|
||||
"Parameter 'end' is required and must be an integer",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for end < 1", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 1, end: 0, content: "x" })).toBe(
|
||||
"Parameter 'end' must be >= 1",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for start > end", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 10, end: 5, content: "x" })).toBe(
|
||||
"Parameter 'start' must be <= 'end'",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for missing content", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 1, end: 5 })).toBe(
|
||||
"Parameter 'content' is required and must be a string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for non-string content", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", start: 1, end: 5, content: 123 })).toBe(
|
||||
"Parameter 'content' is required and must be a string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should allow empty content string", () => {
|
||||
expect(
|
||||
tool.validateParams({ path: "test.ts", start: 1, end: 5, content: "" }),
|
||||
).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
let tempDir: string
|
||||
let testFilePath: string
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "edit-lines-test-"))
|
||||
testFilePath = path.join(tempDir, "test.ts")
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true })
|
||||
})
|
||||
|
||||
it("should replace lines with new content", async () => {
|
||||
const originalLines = ["line 1", "line 2", "line 3", "line 4", "line 5"]
|
||||
const originalContent = originalLines.join("\n")
|
||||
await fs.writeFile(testFilePath, originalContent, "utf-8")
|
||||
|
||||
const lines = [...originalLines]
|
||||
const hash = hashLines(lines)
|
||||
const storage = createMockStorage({ lines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 2, end: 4, content: "new line A\nnew line B" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as EditLinesResult
|
||||
expect(data.path).toBe("test.ts")
|
||||
expect(data.startLine).toBe(2)
|
||||
expect(data.endLine).toBe(4)
|
||||
expect(data.linesReplaced).toBe(3)
|
||||
expect(data.linesInserted).toBe(2)
|
||||
expect(data.totalLines).toBe(4)
|
||||
|
||||
const newContent = await fs.readFile(testFilePath, "utf-8")
|
||||
expect(newContent).toBe("line 1\nnew line A\nnew line B\nline 5")
|
||||
})
|
||||
|
||||
it("should call requestConfirmation with diff info", async () => {
|
||||
const originalLines = ["line 1", "line 2", "line 3"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "test.ts", start: 2, end: 2, content: "replaced" }, ctx)
|
||||
|
||||
expect(ctx.requestConfirmation).toHaveBeenCalledWith("Replace lines 2-2 in test.ts", {
|
||||
filePath: "test.ts",
|
||||
oldLines: ["line 2"],
|
||||
newLines: ["replaced"],
|
||||
startLine: 2,
|
||||
})
|
||||
})
|
||||
|
||||
it("should cancel edit when confirmation rejected", async () => {
|
||||
const originalLines = ["line 1", "line 2", "line 3"]
|
||||
const originalContent = originalLines.join("\n")
|
||||
await fs.writeFile(testFilePath, originalContent, "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, false, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 1, content: "changed" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Edit cancelled by user")
|
||||
|
||||
const content = await fs.readFile(testFilePath, "utf-8")
|
||||
expect(content).toBe(originalContent)
|
||||
})
|
||||
|
||||
it("should update storage after edit", async () => {
|
||||
const originalLines = ["line 1", "line 2"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
await tool.execute({ path: "test.ts", start: 1, end: 1, content: "changed" }, ctx)
|
||||
|
||||
expect(storage.setFile).toHaveBeenCalledWith(
|
||||
"test.ts",
|
||||
expect.objectContaining({
|
||||
lines: ["changed", "line 2"],
|
||||
hash: hashLines(["changed", "line 2"]),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for path outside project root", async () => {
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "../outside/file.ts", start: 1, end: 1, content: "x" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should return error when start exceeds file length", async () => {
|
||||
const originalLines = ["line 1", "line 2"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 10, end: 15, content: "x" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Start line 10 exceeds file length (2 lines)")
|
||||
})
|
||||
|
||||
it("should adjust end to file length if it exceeds", async () => {
|
||||
const originalLines = ["line 1", "line 2", "line 3"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 2, end: 100, content: "new" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as EditLinesResult
|
||||
expect(data.endLine).toBe(3)
|
||||
expect(data.linesReplaced).toBe(2)
|
||||
})
|
||||
|
||||
it("should detect hash conflict", async () => {
|
||||
const originalLines = ["line 1", "line 2"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const oldHash = hashLines(["old content"])
|
||||
const storage = createMockStorage({ lines: originalLines, hash: oldHash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 1, content: "new" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe(
|
||||
"File has been modified externally. Please refresh the file before editing.",
|
||||
)
|
||||
})
|
||||
|
||||
it("should allow edit when file not in storage", async () => {
|
||||
const originalLines = ["line 1", "line 2"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const storage = createMockStorage(null)
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 1, content: "new" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
})
|
||||
|
||||
it("should handle single line replacement", async () => {
|
||||
const originalLines = ["line 1", "line 2", "line 3"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 2, end: 2, content: "replaced line 2" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const content = await fs.readFile(testFilePath, "utf-8")
|
||||
expect(content).toBe("line 1\nreplaced line 2\nline 3")
|
||||
})
|
||||
|
||||
it("should handle replacing all lines", async () => {
|
||||
const originalLines = ["line 1", "line 2"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 2, content: "completely\nnew\nfile" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const content = await fs.readFile(testFilePath, "utf-8")
|
||||
expect(content).toBe("completely\nnew\nfile")
|
||||
})
|
||||
|
||||
it("should handle inserting more lines than replaced", async () => {
|
||||
const originalLines = ["line 1", "line 2"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 1, content: "a\nb\nc\nd" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as EditLinesResult
|
||||
expect(data.linesReplaced).toBe(1)
|
||||
expect(data.linesInserted).toBe(4)
|
||||
expect(data.totalLines).toBe(5)
|
||||
})
|
||||
|
||||
it("should handle deleting lines (empty content)", async () => {
|
||||
const originalLines = ["line 1", "line 2", "line 3"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 2, end: 2, content: "" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as EditLinesResult
|
||||
expect(data.linesReplaced).toBe(1)
|
||||
expect(data.linesInserted).toBe(1)
|
||||
expect(data.totalLines).toBe(3)
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const originalLines = ["line 1"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 1, content: "new" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.callId).toMatch(/^edit_lines-\d+$/)
|
||||
})
|
||||
|
||||
it("should include executionTimeMs in result", async () => {
|
||||
const originalLines = ["line 1"]
|
||||
await fs.writeFile(testFilePath, originalLines.join("\n"), "utf-8")
|
||||
|
||||
const hash = hashLines(originalLines)
|
||||
const storage = createMockStorage({ lines: originalLines, hash })
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "test.ts", start: 1, end: 1, content: "new" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should return error when file not found", async () => {
|
||||
const storage = createMockStorage(null)
|
||||
const ctx = createMockContext(storage, true, tempDir)
|
||||
|
||||
const result = await tool.execute(
|
||||
{ path: "nonexistent.ts", start: 1, end: 1, content: "x" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("ENOENT")
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,390 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GitCommitTool,
|
||||
type GitCommitResult,
|
||||
} from "../../../../../src/infrastructure/tools/git/GitCommitTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { SimpleGit, CommitResult, StatusResult } from "simple-git"
|
||||
|
||||
function createMockStorage(): IStorage {
|
||||
return {
|
||||
getFile: vi.fn(),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn(),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(new Map()),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage, confirmResult: boolean = true): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(confirmResult),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStatusResult(overrides: Partial<StatusResult> = {}): StatusResult {
|
||||
return {
|
||||
not_added: [],
|
||||
conflicted: [],
|
||||
created: [],
|
||||
deleted: [],
|
||||
ignored: [],
|
||||
modified: [],
|
||||
renamed: [],
|
||||
files: [],
|
||||
staged: ["file.ts"],
|
||||
ahead: 0,
|
||||
behind: 0,
|
||||
current: "main",
|
||||
tracking: "origin/main",
|
||||
detached: false,
|
||||
isClean: () => false,
|
||||
...overrides,
|
||||
} as StatusResult
|
||||
}
|
||||
|
||||
function createMockCommitResult(overrides: Partial<CommitResult> = {}): CommitResult {
|
||||
return {
|
||||
commit: "abc1234",
|
||||
branch: "main",
|
||||
root: false,
|
||||
author: null,
|
||||
summary: {
|
||||
changes: 1,
|
||||
insertions: 5,
|
||||
deletions: 2,
|
||||
},
|
||||
...overrides,
|
||||
} as CommitResult
|
||||
}
|
||||
|
||||
function createMockGit(options: {
|
||||
isRepo?: boolean
|
||||
status?: StatusResult
|
||||
commitResult?: CommitResult
|
||||
error?: Error
|
||||
addError?: Error
|
||||
}): SimpleGit {
|
||||
const mockGit = {
|
||||
checkIsRepo: vi.fn().mockResolvedValue(options.isRepo ?? true),
|
||||
status: vi.fn().mockResolvedValue(options.status ?? createMockStatusResult()),
|
||||
add: vi.fn(),
|
||||
commit: vi.fn(),
|
||||
}
|
||||
|
||||
if (options.addError) {
|
||||
mockGit.add.mockRejectedValue(options.addError)
|
||||
} else {
|
||||
mockGit.add.mockResolvedValue(undefined)
|
||||
}
|
||||
|
||||
if (options.error) {
|
||||
mockGit.commit.mockRejectedValue(options.error)
|
||||
} else {
|
||||
mockGit.commit.mockResolvedValue(options.commitResult ?? createMockCommitResult())
|
||||
}
|
||||
|
||||
return mockGit as unknown as SimpleGit
|
||||
}
|
||||
|
||||
describe("GitCommitTool", () => {
|
||||
let tool: GitCommitTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GitCommitTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("git_commit")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("git")
|
||||
})
|
||||
|
||||
it("should require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(true)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("message")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
expect(tool.parameters[1].name).toBe("files")
|
||||
expect(tool.parameters[1].required).toBe(false)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("commit")
|
||||
expect(tool.description).toContain("confirmation")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return error for missing message", () => {
|
||||
expect(tool.validateParams({})).toContain("message")
|
||||
expect(tool.validateParams({})).toContain("required")
|
||||
})
|
||||
|
||||
it("should return error for non-string message", () => {
|
||||
expect(tool.validateParams({ message: 123 })).toContain("message")
|
||||
expect(tool.validateParams({ message: 123 })).toContain("string")
|
||||
})
|
||||
|
||||
it("should return error for empty message", () => {
|
||||
expect(tool.validateParams({ message: "" })).toContain("empty")
|
||||
expect(tool.validateParams({ message: " " })).toContain("empty")
|
||||
})
|
||||
|
||||
it("should return null for valid message", () => {
|
||||
expect(tool.validateParams({ message: "fix: bug" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid message with files", () => {
|
||||
expect(tool.validateParams({ message: "fix: bug", files: ["a.ts", "b.ts"] })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for non-array files", () => {
|
||||
expect(tool.validateParams({ message: "fix: bug", files: "a.ts" })).toContain("array")
|
||||
})
|
||||
|
||||
it("should return error for non-string in files array", () => {
|
||||
expect(tool.validateParams({ message: "fix: bug", files: [1, 2] })).toContain("strings")
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
describe("not a git repository", () => {
|
||||
it("should return error when not in a git repo", async () => {
|
||||
const mockGit = createMockGit({ isRepo: false })
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Not a git repository")
|
||||
})
|
||||
})
|
||||
|
||||
describe("nothing to commit", () => {
|
||||
it("should return error when no staged files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ staged: [] }),
|
||||
})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Nothing to commit")
|
||||
})
|
||||
})
|
||||
|
||||
describe("with staged files", () => {
|
||||
it("should commit successfully", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ staged: ["file.ts"] }),
|
||||
commitResult: createMockCommitResult({
|
||||
commit: "def5678",
|
||||
branch: "main",
|
||||
summary: { changes: 1, insertions: 10, deletions: 3 },
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "feat: new feature" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitCommitResult
|
||||
expect(data.hash).toBe("def5678")
|
||||
expect(data.branch).toBe("main")
|
||||
expect(data.message).toBe("feat: new feature")
|
||||
expect(data.filesChanged).toBe(1)
|
||||
expect(data.insertions).toBe(10)
|
||||
expect(data.deletions).toBe(3)
|
||||
})
|
||||
|
||||
it("should include author when available", async () => {
|
||||
const mockGit = createMockGit({
|
||||
commitResult: createMockCommitResult({
|
||||
author: {
|
||||
name: "Test User",
|
||||
email: "test@example.com",
|
||||
},
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitCommitResult
|
||||
expect(data.author).toEqual({
|
||||
name: "Test User",
|
||||
email: "test@example.com",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("files parameter", () => {
|
||||
it("should stage specified files before commit", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ staged: [] }),
|
||||
})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
await toolWithMock.execute({ message: "test", files: ["a.ts", "b.ts"] }, ctx)
|
||||
|
||||
expect(mockGit.add).toHaveBeenCalledWith(["a.ts", "b.ts"])
|
||||
})
|
||||
|
||||
it("should not call add when files is empty", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
await toolWithMock.execute({ message: "test", files: [] }, ctx)
|
||||
|
||||
expect(mockGit.add).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should handle add errors", async () => {
|
||||
const mockGit = createMockGit({
|
||||
addError: new Error("Failed to add files"),
|
||||
})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute(
|
||||
{ message: "test", files: ["nonexistent.ts"] },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Failed to add files")
|
||||
})
|
||||
})
|
||||
|
||||
describe("confirmation", () => {
|
||||
it("should request confirmation before commit", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(ctx.requestConfirmation).toHaveBeenCalled()
|
||||
const confirmMessage = (ctx.requestConfirmation as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0] as string
|
||||
expect(confirmMessage).toContain("Committing")
|
||||
expect(confirmMessage).toContain("test commit")
|
||||
})
|
||||
|
||||
it("should cancel commit when user declines", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext(undefined, false)
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("cancelled")
|
||||
expect(mockGit.commit).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it("should proceed with commit when user confirms", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext(undefined, true)
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
expect(mockGit.commit).toHaveBeenCalledWith("test commit")
|
||||
})
|
||||
})
|
||||
|
||||
describe("error handling", () => {
|
||||
it("should handle git command errors", async () => {
|
||||
const mockGit = createMockGit({
|
||||
error: new Error("Git commit failed"),
|
||||
})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Git commit failed")
|
||||
})
|
||||
|
||||
it("should handle non-Error exceptions", async () => {
|
||||
const mockGit = {
|
||||
checkIsRepo: vi.fn().mockResolvedValue(true),
|
||||
status: vi.fn().mockResolvedValue(createMockStatusResult()),
|
||||
add: vi.fn(),
|
||||
commit: vi.fn().mockRejectedValue("string error"),
|
||||
} as unknown as SimpleGit
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("string error")
|
||||
})
|
||||
})
|
||||
|
||||
describe("timing", () => {
|
||||
it("should return timing information", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("call id", () => {
|
||||
it("should generate unique call id", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitCommitTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ message: "test commit" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^git_commit-\d+$/)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,393 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GitDiffTool,
|
||||
type GitDiffResult,
|
||||
} from "../../../../../src/infrastructure/tools/git/GitDiffTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { SimpleGit, DiffResult } from "simple-git"
|
||||
|
||||
function createMockStorage(): IStorage {
|
||||
return {
|
||||
getFile: vi.fn(),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn(),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(new Map()),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
function createMockDiffSummary(overrides: Partial<DiffResult> = {}): DiffResult {
|
||||
return {
|
||||
changed: 0,
|
||||
deletions: 0,
|
||||
insertions: 0,
|
||||
files: [],
|
||||
...overrides,
|
||||
} as DiffResult
|
||||
}
|
||||
|
||||
function createMockGit(options: {
|
||||
isRepo?: boolean
|
||||
diffSummary?: DiffResult
|
||||
diff?: string
|
||||
error?: Error
|
||||
}): SimpleGit {
|
||||
const mockGit = {
|
||||
checkIsRepo: vi.fn().mockResolvedValue(options.isRepo ?? true),
|
||||
diffSummary: vi.fn(),
|
||||
diff: vi.fn(),
|
||||
}
|
||||
|
||||
if (options.error) {
|
||||
mockGit.diffSummary.mockRejectedValue(options.error)
|
||||
} else {
|
||||
mockGit.diffSummary.mockResolvedValue(options.diffSummary ?? createMockDiffSummary())
|
||||
mockGit.diff.mockResolvedValue(options.diff ?? "")
|
||||
}
|
||||
|
||||
return mockGit as unknown as SimpleGit
|
||||
}
|
||||
|
||||
describe("GitDiffTool", () => {
|
||||
let tool: GitDiffTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GitDiffTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("git_diff")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("git")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(false)
|
||||
expect(tool.parameters[1].name).toBe("staged")
|
||||
expect(tool.parameters[1].required).toBe(false)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("diff")
|
||||
expect(tool.description).toContain("changes")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for empty params", () => {
|
||||
expect(tool.validateParams({})).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid path", () => {
|
||||
expect(tool.validateParams({ path: "src" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for valid staged", () => {
|
||||
expect(tool.validateParams({ staged: true })).toBeNull()
|
||||
expect(tool.validateParams({ staged: false })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for invalid path type", () => {
|
||||
expect(tool.validateParams({ path: 123 })).toContain("path")
|
||||
expect(tool.validateParams({ path: 123 })).toContain("string")
|
||||
})
|
||||
|
||||
it("should return error for invalid staged type", () => {
|
||||
expect(tool.validateParams({ staged: "yes" })).toContain("staged")
|
||||
expect(tool.validateParams({ staged: "yes" })).toContain("boolean")
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
describe("not a git repository", () => {
|
||||
it("should return error when not in a git repo", async () => {
|
||||
const mockGit = createMockGit({ isRepo: false })
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Not a git repository")
|
||||
})
|
||||
})
|
||||
|
||||
describe("no changes", () => {
|
||||
it("should return empty diff for clean repo", async () => {
|
||||
const mockGit = createMockGit({
|
||||
diffSummary: createMockDiffSummary({ files: [] }),
|
||||
diff: "",
|
||||
})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.hasChanges).toBe(false)
|
||||
expect(data.files).toHaveLength(0)
|
||||
expect(data.diff).toBe("")
|
||||
})
|
||||
})
|
||||
|
||||
describe("with changes", () => {
|
||||
it("should return diff for modified files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
diffSummary: createMockDiffSummary({
|
||||
files: [
|
||||
{ file: "src/index.ts", insertions: 5, deletions: 2, binary: false },
|
||||
],
|
||||
insertions: 5,
|
||||
deletions: 2,
|
||||
}),
|
||||
diff: "diff --git a/src/index.ts",
|
||||
})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.hasChanges).toBe(true)
|
||||
expect(data.files).toHaveLength(1)
|
||||
expect(data.files[0].file).toBe("src/index.ts")
|
||||
expect(data.files[0].insertions).toBe(5)
|
||||
expect(data.files[0].deletions).toBe(2)
|
||||
})
|
||||
|
||||
it("should return multiple files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
diffSummary: createMockDiffSummary({
|
||||
files: [
|
||||
{ file: "a.ts", insertions: 1, deletions: 0, binary: false },
|
||||
{ file: "b.ts", insertions: 2, deletions: 1, binary: false },
|
||||
{ file: "c.ts", insertions: 0, deletions: 5, binary: false },
|
||||
],
|
||||
insertions: 3,
|
||||
deletions: 6,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.files).toHaveLength(3)
|
||||
expect(data.summary.filesChanged).toBe(3)
|
||||
expect(data.summary.insertions).toBe(3)
|
||||
expect(data.summary.deletions).toBe(6)
|
||||
})
|
||||
|
||||
it("should handle binary files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
diffSummary: createMockDiffSummary({
|
||||
files: [{ file: "image.png", insertions: 0, deletions: 0, binary: true }],
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.files[0].binary).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("staged parameter", () => {
|
||||
it("should default to false (unstaged)", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.staged).toBe(false)
|
||||
expect(mockGit.diffSummary).toHaveBeenCalledWith([])
|
||||
})
|
||||
|
||||
it("should pass --cached for staged=true", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ staged: true }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.staged).toBe(true)
|
||||
expect(mockGit.diffSummary).toHaveBeenCalledWith(["--cached"])
|
||||
})
|
||||
})
|
||||
|
||||
describe("path parameter", () => {
|
||||
it("should filter by path", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({ path: "src" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.pathFilter).toBe("src")
|
||||
expect(mockGit.diffSummary).toHaveBeenCalledWith(["--", "src"])
|
||||
})
|
||||
|
||||
it("should combine staged and path", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute(
|
||||
{ staged: true, path: "src/index.ts" },
|
||||
ctx,
|
||||
)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
expect(mockGit.diffSummary).toHaveBeenCalledWith(["--cached", "--", "src/index.ts"])
|
||||
})
|
||||
|
||||
it("should return null pathFilter when not provided", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.pathFilter).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("diff text", () => {
|
||||
it("should include full diff text", async () => {
|
||||
const diffText = `diff --git a/src/index.ts b/src/index.ts
|
||||
index abc123..def456 100644
|
||||
--- a/src/index.ts
|
||||
+++ b/src/index.ts
|
||||
@@ -1,3 +1,4 @@
|
||||
+import { foo } from "./foo"
|
||||
export function main() {
|
||||
console.log("hello")
|
||||
}`
|
||||
const mockGit = createMockGit({
|
||||
diffSummary: createMockDiffSummary({
|
||||
files: [
|
||||
{ file: "src/index.ts", insertions: 1, deletions: 0, binary: false },
|
||||
],
|
||||
}),
|
||||
diff: diffText,
|
||||
})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitDiffResult
|
||||
expect(data.diff).toBe(diffText)
|
||||
expect(data.diff).toContain("diff --git")
|
||||
expect(data.diff).toContain("import { foo }")
|
||||
})
|
||||
})
|
||||
|
||||
describe("error handling", () => {
|
||||
it("should handle git command errors", async () => {
|
||||
const mockGit = createMockGit({
|
||||
error: new Error("Git command failed"),
|
||||
})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Git command failed")
|
||||
})
|
||||
|
||||
it("should handle non-Error exceptions", async () => {
|
||||
const mockGit = {
|
||||
checkIsRepo: vi.fn().mockResolvedValue(true),
|
||||
diffSummary: vi.fn().mockRejectedValue("string error"),
|
||||
} as unknown as SimpleGit
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("string error")
|
||||
})
|
||||
})
|
||||
|
||||
describe("timing", () => {
|
||||
it("should return timing information", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("call id", () => {
|
||||
it("should generate unique call id", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitDiffTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^git_diff-\d+$/)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,503 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GitStatusTool,
|
||||
type GitStatusResult,
|
||||
} from "../../../../../src/infrastructure/tools/git/GitStatusTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { SimpleGit, StatusResult } from "simple-git"
|
||||
|
||||
function createMockStorage(): IStorage {
|
||||
return {
|
||||
getFile: vi.fn(),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn().mockResolvedValue(new Map()),
|
||||
getFileCount: vi.fn().mockResolvedValue(0),
|
||||
getAST: vi.fn(),
|
||||
setAST: vi.fn(),
|
||||
deleteAST: vi.fn(),
|
||||
getAllASTs: vi.fn().mockResolvedValue(new Map()),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
deleteMeta: vi.fn(),
|
||||
getAllMetas: vi.fn().mockResolvedValue(new Map()),
|
||||
getSymbolIndex: vi.fn().mockResolvedValue(new Map()),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn().mockResolvedValue({ imports: new Map(), importedBy: new Map() }),
|
||||
setDepsGraph: vi.fn(),
|
||||
getProjectConfig: vi.fn(),
|
||||
setProjectConfig: vi.fn(),
|
||||
connect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
isConnected: vi.fn().mockReturnValue(true),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStatusResult(overrides: Partial<StatusResult> = {}): StatusResult {
|
||||
return {
|
||||
not_added: [],
|
||||
conflicted: [],
|
||||
created: [],
|
||||
deleted: [],
|
||||
ignored: [],
|
||||
modified: [],
|
||||
renamed: [],
|
||||
files: [],
|
||||
staged: [],
|
||||
ahead: 0,
|
||||
behind: 0,
|
||||
current: "main",
|
||||
tracking: "origin/main",
|
||||
detached: false,
|
||||
isClean: () => true,
|
||||
...overrides,
|
||||
} as StatusResult
|
||||
}
|
||||
|
||||
function createMockGit(options: {
|
||||
isRepo?: boolean
|
||||
status?: StatusResult
|
||||
error?: Error
|
||||
}): SimpleGit {
|
||||
const mockGit = {
|
||||
checkIsRepo: vi.fn().mockResolvedValue(options.isRepo ?? true),
|
||||
status: vi.fn(),
|
||||
}
|
||||
|
||||
if (options.error) {
|
||||
mockGit.status.mockRejectedValue(options.error)
|
||||
} else {
|
||||
mockGit.status.mockResolvedValue(options.status ?? createMockStatusResult())
|
||||
}
|
||||
|
||||
return mockGit as unknown as SimpleGit
|
||||
}
|
||||
|
||||
describe("GitStatusTool", () => {
|
||||
let tool: GitStatusTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GitStatusTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("git_status")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("git")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have no parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(0)
|
||||
})
|
||||
|
||||
it("should have description", () => {
|
||||
expect(tool.description).toContain("git")
|
||||
expect(tool.description).toContain("status")
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for empty params", () => {
|
||||
expect(tool.validateParams({})).toBeNull()
|
||||
})
|
||||
|
||||
it("should return null for any params (no required)", () => {
|
||||
expect(tool.validateParams({ foo: "bar" })).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
describe("not a git repository", () => {
|
||||
it("should return error when not in a git repo", async () => {
|
||||
const mockGit = createMockGit({ isRepo: false })
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Not a git repository")
|
||||
})
|
||||
})
|
||||
|
||||
describe("clean repository", () => {
|
||||
it("should return clean status", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
current: "main",
|
||||
tracking: "origin/main",
|
||||
ahead: 0,
|
||||
behind: 0,
|
||||
isClean: () => true,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.branch).toBe("main")
|
||||
expect(data.tracking).toBe("origin/main")
|
||||
expect(data.isClean).toBe(true)
|
||||
expect(data.staged).toHaveLength(0)
|
||||
expect(data.modified).toHaveLength(0)
|
||||
expect(data.untracked).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("branch information", () => {
|
||||
it("should return current branch name", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ current: "feature/test" }),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.branch).toBe("feature/test")
|
||||
})
|
||||
|
||||
it("should handle detached HEAD", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ current: null }),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.branch).toBe("HEAD (detached)")
|
||||
})
|
||||
|
||||
it("should return tracking branch when available", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ tracking: "origin/develop" }),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.tracking).toBe("origin/develop")
|
||||
})
|
||||
|
||||
it("should handle no tracking branch", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ tracking: null }),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.tracking).toBeNull()
|
||||
})
|
||||
|
||||
it("should return ahead/behind counts", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({ ahead: 3, behind: 1 }),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.ahead).toBe(3)
|
||||
expect(data.behind).toBe(1)
|
||||
})
|
||||
})
|
||||
|
||||
describe("staged files", () => {
|
||||
it("should return staged files (new file)", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "new.ts", index: "A", working_dir: " " }],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.staged).toHaveLength(1)
|
||||
expect(data.staged[0].path).toBe("new.ts")
|
||||
expect(data.staged[0].index).toBe("A")
|
||||
})
|
||||
|
||||
it("should return staged files (modified)", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "src/index.ts", index: "M", working_dir: " " }],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.staged).toHaveLength(1)
|
||||
expect(data.staged[0].path).toBe("src/index.ts")
|
||||
expect(data.staged[0].index).toBe("M")
|
||||
})
|
||||
|
||||
it("should return staged files (deleted)", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "old.ts", index: "D", working_dir: " " }],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.staged).toHaveLength(1)
|
||||
expect(data.staged[0].index).toBe("D")
|
||||
})
|
||||
|
||||
it("should return multiple staged files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [
|
||||
{ path: "a.ts", index: "A", working_dir: " " },
|
||||
{ path: "b.ts", index: "M", working_dir: " " },
|
||||
{ path: "c.ts", index: "D", working_dir: " " },
|
||||
],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.staged).toHaveLength(3)
|
||||
})
|
||||
})
|
||||
|
||||
describe("modified files", () => {
|
||||
it("should return modified unstaged files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "src/app.ts", index: " ", working_dir: "M" }],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.modified).toHaveLength(1)
|
||||
expect(data.modified[0].path).toBe("src/app.ts")
|
||||
expect(data.modified[0].workingDir).toBe("M")
|
||||
})
|
||||
|
||||
it("should return deleted unstaged files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "deleted.ts", index: " ", working_dir: "D" }],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.modified).toHaveLength(1)
|
||||
expect(data.modified[0].workingDir).toBe("D")
|
||||
})
|
||||
})
|
||||
|
||||
describe("untracked files", () => {
|
||||
it("should return untracked files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
not_added: ["new-file.ts", "another.js"],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.untracked).toContain("new-file.ts")
|
||||
expect(data.untracked).toContain("another.js")
|
||||
})
|
||||
})
|
||||
|
||||
describe("conflicted files", () => {
|
||||
it("should return conflicted files", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
conflicted: ["conflict.ts"],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.conflicted).toContain("conflict.ts")
|
||||
})
|
||||
})
|
||||
|
||||
describe("mixed status", () => {
|
||||
it("should correctly categorize files with both staged and unstaged changes", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "both.ts", index: "M", working_dir: "M" }],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.staged).toHaveLength(1)
|
||||
expect(data.modified).toHaveLength(1)
|
||||
expect(data.staged[0].path).toBe("both.ts")
|
||||
expect(data.modified[0].path).toBe("both.ts")
|
||||
})
|
||||
|
||||
it("should not include untracked in staged/modified", async () => {
|
||||
const mockGit = createMockGit({
|
||||
status: createMockStatusResult({
|
||||
files: [{ path: "new.ts", index: "?", working_dir: "?" }],
|
||||
not_added: ["new.ts"],
|
||||
isClean: () => false,
|
||||
}),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GitStatusResult
|
||||
expect(data.staged).toHaveLength(0)
|
||||
expect(data.modified).toHaveLength(0)
|
||||
expect(data.untracked).toContain("new.ts")
|
||||
})
|
||||
})
|
||||
|
||||
describe("error handling", () => {
|
||||
it("should handle git command errors", async () => {
|
||||
const mockGit = createMockGit({
|
||||
error: new Error("Git command failed"),
|
||||
})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Git command failed")
|
||||
})
|
||||
|
||||
it("should handle non-Error exceptions", async () => {
|
||||
const mockGit = {
|
||||
checkIsRepo: vi.fn().mockResolvedValue(true),
|
||||
status: vi.fn().mockRejectedValue("string error"),
|
||||
} as unknown as SimpleGit
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("string error")
|
||||
})
|
||||
})
|
||||
|
||||
describe("timing", () => {
|
||||
it("should return timing information", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
|
||||
it("should include timing on error", async () => {
|
||||
const mockGit = createMockGit({ error: new Error("fail") })
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.executionTimeMs).toBeGreaterThanOrEqual(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("call id", () => {
|
||||
it("should generate unique call id", async () => {
|
||||
const mockGit = createMockGit({})
|
||||
const toolWithMock = new GitStatusTool(() => mockGit)
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await toolWithMock.execute({}, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^git_status-\d+$/)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,348 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GetClassTool,
|
||||
type GetClassResult,
|
||||
} from "../../../../../src/infrastructure/tools/read/GetClassTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { FileAST, ClassInfo } from "../../../../../src/domain/value-objects/FileAST.js"
|
||||
|
||||
function createMockClass(overrides: Partial<ClassInfo> = {}): ClassInfo {
|
||||
return {
|
||||
name: "TestClass",
|
||||
lineStart: 1,
|
||||
lineEnd: 10,
|
||||
methods: [
|
||||
{
|
||||
name: "testMethod",
|
||||
lineStart: 3,
|
||||
lineEnd: 5,
|
||||
params: [],
|
||||
isAsync: false,
|
||||
visibility: "public",
|
||||
isStatic: false,
|
||||
},
|
||||
],
|
||||
properties: [
|
||||
{
|
||||
name: "testProp",
|
||||
line: 2,
|
||||
visibility: "private",
|
||||
isStatic: false,
|
||||
isReadonly: false,
|
||||
},
|
||||
],
|
||||
implements: [],
|
||||
isExported: true,
|
||||
isAbstract: false,
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockAST(classes: ClassInfo[] = []): FileAST {
|
||||
return {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions: [],
|
||||
classes,
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStorage(
|
||||
fileData: { lines: string[] } | null = null,
|
||||
ast: FileAST | null = null,
|
||||
): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(fileData),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn(),
|
||||
getAST: vi.fn().mockResolvedValue(ast),
|
||||
setAST: vi.fn(),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
getSymbolIndex: vi.fn(),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn(),
|
||||
setDepsGraph: vi.fn(),
|
||||
getConfig: vi.fn(),
|
||||
setConfig: vi.fn(),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("GetClassTool", () => {
|
||||
let tool: GetClassTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GetClassTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("get_class")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("read")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
expect(tool.parameters[1].name).toBe("name")
|
||||
expect(tool.parameters[1].required).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid params", () => {
|
||||
expect(tool.validateParams({ path: "src/index.ts", name: "MyClass" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({ name: "MyClass" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "", name: "MyClass" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for missing name", () => {
|
||||
expect(tool.validateParams({ path: "test.ts" })).toBe(
|
||||
"Parameter 'name' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty name", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", name: "" })).toBe(
|
||||
"Parameter 'name' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should return class code with line numbers", async () => {
|
||||
const lines = [
|
||||
"export class TestClass {",
|
||||
" private testProp: string",
|
||||
" testMethod() {",
|
||||
" return this.testProp",
|
||||
" }",
|
||||
"}",
|
||||
]
|
||||
const cls = createMockClass({
|
||||
name: "TestClass",
|
||||
lineStart: 1,
|
||||
lineEnd: 6,
|
||||
})
|
||||
const ast = createMockAST([cls])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "TestClass" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetClassResult
|
||||
expect(data.path).toBe("test.ts")
|
||||
expect(data.name).toBe("TestClass")
|
||||
expect(data.startLine).toBe(1)
|
||||
expect(data.endLine).toBe(6)
|
||||
expect(data.content).toContain("1│export class TestClass {")
|
||||
expect(data.content).toContain("6│}")
|
||||
})
|
||||
|
||||
it("should return class metadata", async () => {
|
||||
const lines = ["abstract class BaseService extends Service implements IService {", "}"]
|
||||
const cls = createMockClass({
|
||||
name: "BaseService",
|
||||
lineStart: 1,
|
||||
lineEnd: 2,
|
||||
isExported: false,
|
||||
isAbstract: true,
|
||||
extends: "Service",
|
||||
implements: ["IService"],
|
||||
methods: [
|
||||
{
|
||||
name: "init",
|
||||
lineStart: 2,
|
||||
lineEnd: 2,
|
||||
params: [],
|
||||
isAsync: true,
|
||||
visibility: "public",
|
||||
isStatic: false,
|
||||
},
|
||||
{
|
||||
name: "destroy",
|
||||
lineStart: 3,
|
||||
lineEnd: 3,
|
||||
params: [],
|
||||
isAsync: false,
|
||||
visibility: "protected",
|
||||
isStatic: false,
|
||||
},
|
||||
],
|
||||
properties: [
|
||||
{
|
||||
name: "id",
|
||||
line: 2,
|
||||
visibility: "private",
|
||||
isStatic: false,
|
||||
isReadonly: true,
|
||||
},
|
||||
],
|
||||
})
|
||||
const ast = createMockAST([cls])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "service.ts", name: "BaseService" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetClassResult
|
||||
expect(data.isExported).toBe(false)
|
||||
expect(data.isAbstract).toBe(true)
|
||||
expect(data.extends).toBe("Service")
|
||||
expect(data.implements).toEqual(["IService"])
|
||||
expect(data.methods).toEqual(["init", "destroy"])
|
||||
expect(data.properties).toEqual(["id"])
|
||||
})
|
||||
|
||||
it("should return error when AST not found", async () => {
|
||||
const storage = createMockStorage({ lines: [] }, null)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "MyClass" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain('AST not found for "test.ts"')
|
||||
})
|
||||
|
||||
it("should return error when class not found", async () => {
|
||||
const ast = createMockAST([
|
||||
createMockClass({ name: "ClassA" }),
|
||||
createMockClass({ name: "ClassB" }),
|
||||
])
|
||||
const storage = createMockStorage({ lines: [] }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "NonExistent" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain('Class "NonExistent" not found')
|
||||
expect(result.error).toContain("Available: ClassA, ClassB")
|
||||
})
|
||||
|
||||
it("should return error when no classes available", async () => {
|
||||
const ast = createMockAST([])
|
||||
const storage = createMockStorage({ lines: [] }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "MyClass" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Available: none")
|
||||
})
|
||||
|
||||
it("should return error for path outside project root", async () => {
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await tool.execute({ path: "../outside/file.ts", name: "MyClass" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should handle class with no extends", async () => {
|
||||
const lines = ["class Simple {}"]
|
||||
const cls = createMockClass({
|
||||
name: "Simple",
|
||||
lineStart: 1,
|
||||
lineEnd: 1,
|
||||
extends: undefined,
|
||||
})
|
||||
const ast = createMockAST([cls])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "Simple" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetClassResult
|
||||
expect(data.extends).toBeUndefined()
|
||||
})
|
||||
|
||||
it("should handle class with empty implements", async () => {
|
||||
const lines = ["class NoInterfaces {}"]
|
||||
const cls = createMockClass({
|
||||
name: "NoInterfaces",
|
||||
lineStart: 1,
|
||||
lineEnd: 1,
|
||||
implements: [],
|
||||
})
|
||||
const ast = createMockAST([cls])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "NoInterfaces" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetClassResult
|
||||
expect(data.implements).toEqual([])
|
||||
})
|
||||
|
||||
it("should handle class with no methods or properties", async () => {
|
||||
const lines = ["class Empty {}"]
|
||||
const cls = createMockClass({
|
||||
name: "Empty",
|
||||
lineStart: 1,
|
||||
lineEnd: 1,
|
||||
methods: [],
|
||||
properties: [],
|
||||
})
|
||||
const ast = createMockAST([cls])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "Empty" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetClassResult
|
||||
expect(data.methods).toEqual([])
|
||||
expect(data.properties).toEqual([])
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const lines = ["class Test {}"]
|
||||
const cls = createMockClass({ name: "Test", lineStart: 1, lineEnd: 1 })
|
||||
const ast = createMockAST([cls])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "Test" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^get_class-\d+$/)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,305 @@
|
||||
import { describe, it, expect, vi, beforeEach } from "vitest"
|
||||
import {
|
||||
GetFunctionTool,
|
||||
type GetFunctionResult,
|
||||
} from "../../../../../src/infrastructure/tools/read/GetFunctionTool.js"
|
||||
import type { ToolContext } from "../../../../../src/domain/services/ITool.js"
|
||||
import type { IStorage } from "../../../../../src/domain/services/IStorage.js"
|
||||
import type { FileAST, FunctionInfo } from "../../../../../src/domain/value-objects/FileAST.js"
|
||||
|
||||
function createMockFunction(overrides: Partial<FunctionInfo> = {}): FunctionInfo {
|
||||
return {
|
||||
name: "testFunction",
|
||||
lineStart: 1,
|
||||
lineEnd: 5,
|
||||
params: [{ name: "arg1", optional: false, hasDefault: false }],
|
||||
isAsync: false,
|
||||
isExported: true,
|
||||
returnType: "void",
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockAST(functions: FunctionInfo[] = []): FileAST {
|
||||
return {
|
||||
imports: [],
|
||||
exports: [],
|
||||
functions,
|
||||
classes: [],
|
||||
interfaces: [],
|
||||
typeAliases: [],
|
||||
parseError: false,
|
||||
}
|
||||
}
|
||||
|
||||
function createMockStorage(
|
||||
fileData: { lines: string[] } | null = null,
|
||||
ast: FileAST | null = null,
|
||||
): IStorage {
|
||||
return {
|
||||
getFile: vi.fn().mockResolvedValue(fileData),
|
||||
setFile: vi.fn(),
|
||||
deleteFile: vi.fn(),
|
||||
getAllFiles: vi.fn(),
|
||||
getAST: vi.fn().mockResolvedValue(ast),
|
||||
setAST: vi.fn(),
|
||||
getMeta: vi.fn(),
|
||||
setMeta: vi.fn(),
|
||||
getSymbolIndex: vi.fn(),
|
||||
setSymbolIndex: vi.fn(),
|
||||
getDepsGraph: vi.fn(),
|
||||
setDepsGraph: vi.fn(),
|
||||
getConfig: vi.fn(),
|
||||
setConfig: vi.fn(),
|
||||
clear: vi.fn(),
|
||||
} as unknown as IStorage
|
||||
}
|
||||
|
||||
function createMockContext(storage?: IStorage): ToolContext {
|
||||
return {
|
||||
projectRoot: "/test/project",
|
||||
storage: storage ?? createMockStorage(),
|
||||
requestConfirmation: vi.fn().mockResolvedValue(true),
|
||||
onProgress: vi.fn(),
|
||||
}
|
||||
}
|
||||
|
||||
describe("GetFunctionTool", () => {
|
||||
let tool: GetFunctionTool
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new GetFunctionTool()
|
||||
})
|
||||
|
||||
describe("metadata", () => {
|
||||
it("should have correct name", () => {
|
||||
expect(tool.name).toBe("get_function")
|
||||
})
|
||||
|
||||
it("should have correct category", () => {
|
||||
expect(tool.category).toBe("read")
|
||||
})
|
||||
|
||||
it("should not require confirmation", () => {
|
||||
expect(tool.requiresConfirmation).toBe(false)
|
||||
})
|
||||
|
||||
it("should have correct parameters", () => {
|
||||
expect(tool.parameters).toHaveLength(2)
|
||||
expect(tool.parameters[0].name).toBe("path")
|
||||
expect(tool.parameters[0].required).toBe(true)
|
||||
expect(tool.parameters[1].name).toBe("name")
|
||||
expect(tool.parameters[1].required).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe("validateParams", () => {
|
||||
it("should return null for valid params", () => {
|
||||
expect(tool.validateParams({ path: "src/index.ts", name: "myFunc" })).toBeNull()
|
||||
})
|
||||
|
||||
it("should return error for missing path", () => {
|
||||
expect(tool.validateParams({ name: "myFunc" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty path", () => {
|
||||
expect(tool.validateParams({ path: "", name: "myFunc" })).toBe(
|
||||
"Parameter 'path' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for missing name", () => {
|
||||
expect(tool.validateParams({ path: "test.ts" })).toBe(
|
||||
"Parameter 'name' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for empty name", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", name: "" })).toBe(
|
||||
"Parameter 'name' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
|
||||
it("should return error for whitespace-only name", () => {
|
||||
expect(tool.validateParams({ path: "test.ts", name: " " })).toBe(
|
||||
"Parameter 'name' is required and must be a non-empty string",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("execute", () => {
|
||||
it("should return function code with line numbers", async () => {
|
||||
const lines = [
|
||||
"function testFunction(arg1) {",
|
||||
" console.log(arg1)",
|
||||
" return arg1",
|
||||
"}",
|
||||
"",
|
||||
]
|
||||
const func = createMockFunction({
|
||||
name: "testFunction",
|
||||
lineStart: 1,
|
||||
lineEnd: 4,
|
||||
})
|
||||
const ast = createMockAST([func])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "testFunction" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetFunctionResult
|
||||
expect(data.path).toBe("test.ts")
|
||||
expect(data.name).toBe("testFunction")
|
||||
expect(data.startLine).toBe(1)
|
||||
expect(data.endLine).toBe(4)
|
||||
expect(data.content).toContain("1│function testFunction(arg1) {")
|
||||
expect(data.content).toContain("4│}")
|
||||
})
|
||||
|
||||
it("should return function metadata", async () => {
|
||||
const lines = ["async function fetchData(url, options) {", " return fetch(url)", "}"]
|
||||
const func = createMockFunction({
|
||||
name: "fetchData",
|
||||
lineStart: 1,
|
||||
lineEnd: 3,
|
||||
isAsync: true,
|
||||
isExported: false,
|
||||
params: [
|
||||
{ name: "url", optional: false, hasDefault: false },
|
||||
{ name: "options", optional: true, hasDefault: false },
|
||||
],
|
||||
returnType: "Promise<Response>",
|
||||
})
|
||||
const ast = createMockAST([func])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "api.ts", name: "fetchData" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetFunctionResult
|
||||
expect(data.isAsync).toBe(true)
|
||||
expect(data.isExported).toBe(false)
|
||||
expect(data.params).toEqual(["url", "options"])
|
||||
expect(data.returnType).toBe("Promise<Response>")
|
||||
})
|
||||
|
||||
it("should return error when AST not found", async () => {
|
||||
const storage = createMockStorage({ lines: [] }, null)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "myFunc" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain('AST not found for "test.ts"')
|
||||
})
|
||||
|
||||
it("should return error when function not found", async () => {
|
||||
const ast = createMockAST([
|
||||
createMockFunction({ name: "existingFunc" }),
|
||||
createMockFunction({ name: "anotherFunc" }),
|
||||
])
|
||||
const storage = createMockStorage({ lines: [] }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "nonExistent" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain('Function "nonExistent" not found')
|
||||
expect(result.error).toContain("Available: existingFunc, anotherFunc")
|
||||
})
|
||||
|
||||
it("should return error when no functions available", async () => {
|
||||
const ast = createMockAST([])
|
||||
const storage = createMockStorage({ lines: [] }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "myFunc" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toContain("Available: none")
|
||||
})
|
||||
|
||||
it("should return error for path outside project root", async () => {
|
||||
const ctx = createMockContext()
|
||||
|
||||
const result = await tool.execute({ path: "../outside/file.ts", name: "myFunc" }, ctx)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBe("Path contains traversal patterns")
|
||||
})
|
||||
|
||||
it("should pad line numbers correctly for large files", async () => {
|
||||
const lines = Array.from({ length: 200 }, (_, i) => `line ${i + 1}`)
|
||||
const func = createMockFunction({
|
||||
name: "bigFunction",
|
||||
lineStart: 95,
|
||||
lineEnd: 105,
|
||||
})
|
||||
const ast = createMockAST([func])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "big.ts", name: "bigFunction" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetFunctionResult
|
||||
expect(data.content).toContain(" 95│line 95")
|
||||
expect(data.content).toContain("100│line 100")
|
||||
expect(data.content).toContain("105│line 105")
|
||||
})
|
||||
|
||||
it("should include callId in result", async () => {
|
||||
const lines = ["function test() {}"]
|
||||
const func = createMockFunction({ name: "test", lineStart: 1, lineEnd: 1 })
|
||||
const ast = createMockAST([func])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "test" }, ctx)
|
||||
|
||||
expect(result.callId).toMatch(/^get_function-\d+$/)
|
||||
})
|
||||
|
||||
it("should handle function with no return type", async () => {
|
||||
const lines = ["function noReturn() {}"]
|
||||
const func = createMockFunction({
|
||||
name: "noReturn",
|
||||
lineStart: 1,
|
||||
lineEnd: 1,
|
||||
returnType: undefined,
|
||||
})
|
||||
const ast = createMockAST([func])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "noReturn" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetFunctionResult
|
||||
expect(data.returnType).toBeUndefined()
|
||||
})
|
||||
|
||||
it("should handle function with no params", async () => {
|
||||
const lines = ["function noParams() {}"]
|
||||
const func = createMockFunction({
|
||||
name: "noParams",
|
||||
lineStart: 1,
|
||||
lineEnd: 1,
|
||||
params: [],
|
||||
})
|
||||
const ast = createMockAST([func])
|
||||
const storage = createMockStorage({ lines }, ast)
|
||||
const ctx = createMockContext(storage)
|
||||
|
||||
const result = await tool.execute({ path: "test.ts", name: "noParams" }, ctx)
|
||||
|
||||
expect(result.success).toBe(true)
|
||||
const data = result.data as GetFunctionResult
|
||||
expect(data.params).toEqual([])
|
||||
})
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user