feat: introduce UA Blocker Middleware (#1181)
* feat: create AI bot blocker middleware * chore: add changeset * fix: uppercase function called too late * chore: don't version-control robots.json * chore: track `robots.json` * ci: add `@hono/ai-robots-txt` workflow script * fix: change initial version * feat: add automatic `robots.json` sync from upstream * feat!: change package name and architecture * refactor(ua-blocker): prebuild compiled regex (#1) * fix: add json data files to tsconfig * chore: rename workflow files * fix: test if string _contains_ "Yes" It might be a markdown link, so not the exact string, but "[Yes](<link>)" * fix: tests reflect the fixed "Yes" check * feat: move generator back to prebuild generated regex should use the version-controled robots.json, not directly the upstream file * chore: add .zed * chore: remove unused files * fix: properly setup workspace before running scripts * chore: remove `prebuild` script from `build`, `typecheck`, and `test` * chore: run `getrobotstxt` and `prebuild` * fix: export `RegExp`s, not `string[]`s * chore: mention RegExp and uppercase matching in docs * fix: adapt tests to regex exports * chore: add tests for direct regex passing * chore: format code --------- Co-authored-by: Jonathan Haines <jonno.haines@gmail.com>pull/1226/head
parent
2f57dd5ebb
commit
be73703184
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
'@hono/ua-blocker': minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Create new AI bots blocker middleware
|
|
@ -0,0 +1,42 @@
|
||||||
|
name: Sync robots.json
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Runs every day at midnight
|
||||||
|
- cron: '0 0 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sync-and-pr:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Node.js and Yarn
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '22'
|
||||||
|
cache: 'yarn'
|
||||||
|
|
||||||
|
- run: yarn workspaces focus hono-middleware @hono/ua-blocker
|
||||||
|
|
||||||
|
- name: Fetch latest robots.json
|
||||||
|
run: yarn workspace @hono/ua-blocker getrobotstxt
|
||||||
|
|
||||||
|
- name: Generate data
|
||||||
|
run: yarn workspace @hono/ua-blocker prebuild
|
||||||
|
|
||||||
|
- name: Create Pull Request if changes exist
|
||||||
|
uses: peter-evans/create-pull-request@v6
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
commit-message: 'chore(ua-blocker): update robots.json from upstream'
|
||||||
|
title: 'chore(ua-blocker): update robots.json from upstream'
|
||||||
|
body: 'This PR was automatically created after detecting changes in the upstream `robots.json` file.'
|
||||||
|
branch: 'chore/sync-robots-json'
|
||||||
|
delete-branch: true
|
||||||
|
# Assignee and labels
|
||||||
|
assignees: finxol
|
||||||
|
reviewers: finxol
|
||||||
|
labels: robots.json
|
|
@ -19,4 +19,7 @@ sandbox
|
||||||
|
|
||||||
# Claude Code local files
|
# Claude Code local files
|
||||||
CLAUDE.local.md
|
CLAUDE.local.md
|
||||||
settings.local.json
|
settings.local.json
|
||||||
|
|
||||||
|
# Code editor
|
||||||
|
.zed
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
.changeset
|
.changeset
|
||||||
.vscode
|
.vscode
|
||||||
.yarn
|
.yarn
|
||||||
|
**/generated.ts
|
||||||
|
|
|
@ -0,0 +1,173 @@
|
||||||
|
# User Agent based blocker middleware for Hono
|
||||||
|
|
||||||
|
[](https://codecov.io/github/honojs/middleware)
|
||||||
|
|
||||||
|
The UA blocker middleware for [Hono](https://honojs.dev) applications.
|
||||||
|
You can block requests based on their User-Agent headers and generate robots.txt files to discourage them.
|
||||||
|
|
||||||
|
This package also exports AI bots lists, allowing you to easily block known AI bots.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### UA Blocker Middleware
|
||||||
|
|
||||||
|
Block requests based on a list of forbidden user-agents:
|
||||||
|
|
||||||
|
```ts
|
||||||
|
import { uaBlocker } from '@hono/ua-blocker'
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
app.use(
|
||||||
|
'*',
|
||||||
|
uaBlocker({
|
||||||
|
// Add your custom blocklist here
|
||||||
|
// Can be either a list of User Agents or a RegExp directly
|
||||||
|
blocklist: ['ForbiddenBot', 'Not You'], // or /(FORBIDDENBOT|NOT YOU)/
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
export default app
|
||||||
|
```
|
||||||
|
|
||||||
|
### Block all known AI bots
|
||||||
|
|
||||||
|
We export a ready-to-use list of AI bots sourced from [ai.robots.txt](https://github.com/ai-robots-txt/ai.robots.txt):
|
||||||
|
|
||||||
|
```ts
|
||||||
|
import { uaBlocker } from '@hono/ua-blocker'
|
||||||
|
import { aiBots } from '@hono/ua-blocker/ai-bots'
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
app.use(
|
||||||
|
'*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: aiBots,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
export default app
|
||||||
|
```
|
||||||
|
|
||||||
|
### Block only non-respecting bots
|
||||||
|
|
||||||
|
Allow bots that respect robots.txt and only block known non-respecting ones:
|
||||||
|
|
||||||
|
```ts
|
||||||
|
import { uaBlocker } from '@hono/ua-blocker'
|
||||||
|
import { nonRespectingAiBots, useAiRobotsTxt } from '@hono/ua-blocker/ai-bots'
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
app.use(
|
||||||
|
'*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: nonRespectingAiBots,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
// serve robots.txt
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
export default app
|
||||||
|
```
|
||||||
|
|
||||||
|
### Serve ready-made AI bots Robots.txt
|
||||||
|
|
||||||
|
Serve a robots.txt file that disallows all known AI bots:
|
||||||
|
|
||||||
|
```ts
|
||||||
|
import { useAiRobotsTxt } from '@hono/ua-blocker/ai-bots'
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
// Serve robots.txt at /robots.txt
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
export default app
|
||||||
|
```
|
||||||
|
|
||||||
|
### Extend the robots.txt content
|
||||||
|
|
||||||
|
Import the robots.txt content directly, allowing you to complete it with other rules.
|
||||||
|
|
||||||
|
```ts
|
||||||
|
import { AI_ROBOTS_TXT } from '@hono/ua-blocker/ai-bots'
|
||||||
|
|
||||||
|
console.log(AI_ROBOTS_TXT)
|
||||||
|
// Output:
|
||||||
|
// User-agent: GPTBot
|
||||||
|
// User-agent: ChatGPT-User
|
||||||
|
// User-agent: Bytespider
|
||||||
|
// User-agent: CCBot
|
||||||
|
// ...
|
||||||
|
// Disallow: /
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
app.use('/robots.txt', (c) => {
|
||||||
|
robotsTxt = AI_ROBOTS_TXT + '\nUser-agent: GoogleBot\nAllow: /'
|
||||||
|
return c.text(robotsTxt, 200)
|
||||||
|
// Output:
|
||||||
|
// User-agent: GPTBot
|
||||||
|
// User-agent: ChatGPT-User
|
||||||
|
// User-agent: Bytespider
|
||||||
|
// User-agent: CCBot
|
||||||
|
// ...
|
||||||
|
// Disallow: /
|
||||||
|
// User-agent: GoogleBot
|
||||||
|
// Allow: /
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
## API
|
||||||
|
|
||||||
|
### `@hono/ua-blocker`
|
||||||
|
|
||||||
|
#### `uaBlocker(options)`
|
||||||
|
|
||||||
|
Middleware that blocks requests based on their User-Agent header.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
- `options.blocklist` (`string[] | RegExp`, default: `[]`) - The list of user-agents to block.
|
||||||
|
If a RegExp is passed, it should match on UPPERCASE User Agents.
|
||||||
|
|
||||||
|
**Returns:** Hono middleware function
|
||||||
|
|
||||||
|
### `@hono/ua-blocker/ai-bots`
|
||||||
|
|
||||||
|
#### `aiBots`
|
||||||
|
|
||||||
|
Pre-made list of AI bots user-agents sourced from [ai.robots.txt](https://github.com/ai-robots-txt/ai.robots.txt),
|
||||||
|
ready to be passed to `uaBlocker()`.
|
||||||
|
|
||||||
|
#### `nonRespectingAiBots`
|
||||||
|
|
||||||
|
Subset of the [`aiBots`](#aibots) list, allowing bots that are known to respect `robots.txt` directives.
|
||||||
|
|
||||||
|
#### `AI_ROBOTS_TXT`
|
||||||
|
|
||||||
|
robots.txt content that disallows all known AI bots.
|
||||||
|
|
||||||
|
#### `useAiRobotsTxt()`
|
||||||
|
|
||||||
|
Middleware that serves the generated robots.txt content for known AI bots.
|
||||||
|
|
||||||
|
**Returns:** Hono middleware function
|
||||||
|
|
||||||
|
## Author
|
||||||
|
|
||||||
|
finxol <https://github.com/finxol>
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT
|
|
@ -0,0 +1,64 @@
|
||||||
|
{
|
||||||
|
"name": "@hono/ua-blocker",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"description": "User agent-based blocker for Hono",
|
||||||
|
"type": "module",
|
||||||
|
"module": "dist/index.js",
|
||||||
|
"types": "dist/index.d.ts",
|
||||||
|
"files": [
|
||||||
|
"dist"
|
||||||
|
],
|
||||||
|
"scripts": {
|
||||||
|
"prebuild": "node --experimental-strip-types ./script/prebuild.ts",
|
||||||
|
"build": "tsup ./src/index.ts ./src/ai-bots.ts",
|
||||||
|
"prepack": "yarn build",
|
||||||
|
"publint": "attw --pack --profile node16 && publint",
|
||||||
|
"typecheck": "tsc -b tsconfig.json",
|
||||||
|
"test": "vitest",
|
||||||
|
"getrobotstxt": "node --experimental-strip-types ./script/get-robots-txt.ts"
|
||||||
|
},
|
||||||
|
"exports": {
|
||||||
|
".": {
|
||||||
|
"import": {
|
||||||
|
"types": "./dist/index.d.ts",
|
||||||
|
"default": "./dist/index.js"
|
||||||
|
},
|
||||||
|
"require": {
|
||||||
|
"types": "./dist/index.d.cts",
|
||||||
|
"default": "./dist/index.cjs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"./ai-bots": {
|
||||||
|
"import": {
|
||||||
|
"types": "./dist/ai-bots.d.ts",
|
||||||
|
"default": "./dist/ai-bots.js"
|
||||||
|
},
|
||||||
|
"require": {
|
||||||
|
"types": "./dist/ai-bots.d.cts",
|
||||||
|
"default": "./dist/ai-bots.cjs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"license": "MIT",
|
||||||
|
"publishConfig": {
|
||||||
|
"registry": "https://registry.npmjs.org",
|
||||||
|
"access": "public"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "git",
|
||||||
|
"url": "git+https://github.com/honojs/middleware.git",
|
||||||
|
"directory": "packages/ua-blocker"
|
||||||
|
},
|
||||||
|
"homepage": "https://github.com/honojs/middleware",
|
||||||
|
"peerDependencies": {
|
||||||
|
"hono": "*"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@arethetypeswrong/cli": "^0.17.4",
|
||||||
|
"@types/node": "^22.15.24",
|
||||||
|
"publint": "^0.3.9",
|
||||||
|
"tsup": "^8.4.0",
|
||||||
|
"typescript": "^5.8.2",
|
||||||
|
"vitest": "^3.0.8"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
import { mkdir, writeFile } from 'node:fs/promises'
|
||||||
|
|
||||||
|
const data = await fetch(
|
||||||
|
'https://raw.githubusercontent.com/ai-robots-txt/ai.robots.txt/refs/heads/main/robots.json'
|
||||||
|
).then((res) => res.json())
|
||||||
|
|
||||||
|
// check if data directory exists
|
||||||
|
const dataDir = 'src/data'
|
||||||
|
await mkdir(dataDir, { recursive: true })
|
||||||
|
|
||||||
|
// write json file
|
||||||
|
await writeFile(`${dataDir}/robots.json`, JSON.stringify(data, null, 2))
|
||||||
|
|
||||||
|
console.log('☑︎ Fetched robots.json data successfully')
|
|
@ -0,0 +1,98 @@
|
||||||
|
import ts from 'typescript'
|
||||||
|
import { writeFile } from 'node:fs/promises'
|
||||||
|
import robotsJson from '../src/data/robots.json' with { type: 'json' }
|
||||||
|
import { escape } from '../src/escape.ts'
|
||||||
|
|
||||||
|
interface Bot {
|
||||||
|
operator?: string
|
||||||
|
respect?: string
|
||||||
|
function?: string
|
||||||
|
frequency?: string
|
||||||
|
description?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
const bots = new Map<string, Bot>(Object.entries(robotsJson))
|
||||||
|
|
||||||
|
// Robots.txt string
|
||||||
|
const userAgentDirectives = []
|
||||||
|
|
||||||
|
// Regexes constructors
|
||||||
|
const allBots = []
|
||||||
|
const nonRespectingBots = []
|
||||||
|
const allBotsExpressions = []
|
||||||
|
const nonRespectingBotsExpressions = []
|
||||||
|
|
||||||
|
for (const [key, value] of bots) {
|
||||||
|
userAgentDirectives.push(`User-agent: ${key}`)
|
||||||
|
const expression = ts.factory.createStringLiteral(key)
|
||||||
|
const item = escape(key.toUpperCase())
|
||||||
|
allBots.push(item)
|
||||||
|
allBotsExpressions.push(expression)
|
||||||
|
|
||||||
|
if (!value?.respect?.includes('Yes')) {
|
||||||
|
nonRespectingBots.push(item)
|
||||||
|
nonRespectingBotsExpressions.push(expression)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const statements = [
|
||||||
|
// export const ROBOTS_TXT = '...'
|
||||||
|
createExportConstStatement(
|
||||||
|
'ROBOTS_TXT',
|
||||||
|
ts.factory.createNoSubstitutionTemplateLiteral(
|
||||||
|
`${userAgentDirectives.join('\n')}\nDisallow: /\n`
|
||||||
|
)
|
||||||
|
),
|
||||||
|
// export const ALL_BOTS = [...]
|
||||||
|
createExportConstStatement(
|
||||||
|
'ALL_BOTS',
|
||||||
|
ts.factory.createArrayLiteralExpression(allBotsExpressions)
|
||||||
|
),
|
||||||
|
// export const NON_RESPECTING_BOTS = [...]
|
||||||
|
createExportConstStatement(
|
||||||
|
'NON_RESPECTING_BOTS',
|
||||||
|
ts.factory.createArrayLiteralExpression(nonRespectingBotsExpressions)
|
||||||
|
),
|
||||||
|
// export const ALL_BOTS_REGEX = /(...)/;
|
||||||
|
createExportConstStatement(
|
||||||
|
'ALL_BOTS_REGEX',
|
||||||
|
ts.factory.createRegularExpressionLiteral(`/(${allBots.join('|')})/`)
|
||||||
|
),
|
||||||
|
// export const NON_RESPECTING_BOTS_REGEX = /(...)/;
|
||||||
|
createExportConstStatement(
|
||||||
|
'NON_RESPECTING_BOTS_REGEX',
|
||||||
|
ts.factory.createRegularExpressionLiteral(`/(${nonRespectingBots.join('|')})/`)
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
ts.addSyntheticLeadingComment(
|
||||||
|
statements[0],
|
||||||
|
ts.SyntaxKind.SingleLineCommentTrivia,
|
||||||
|
' This file is generated by scripts/get-robots-txt.ts. Do not edit manually.'
|
||||||
|
)
|
||||||
|
|
||||||
|
const generatedFile = ts.factory.createSourceFile(
|
||||||
|
statements,
|
||||||
|
ts.factory.createToken(ts.SyntaxKind.EndOfFileToken),
|
||||||
|
ts.NodeFlags.None
|
||||||
|
)
|
||||||
|
|
||||||
|
const printer = ts.createPrinter({ newLine: ts.NewLineKind.LineFeed })
|
||||||
|
|
||||||
|
await writeFile(
|
||||||
|
new URL('../src/generated.ts', import.meta.url),
|
||||||
|
printer.printFile(generatedFile),
|
||||||
|
'utf-8'
|
||||||
|
)
|
||||||
|
|
||||||
|
function createExportConstStatement(name: string, initializer: ts.Expression) {
|
||||||
|
return ts.factory.createVariableStatement(
|
||||||
|
[ts.factory.createModifier(ts.SyntaxKind.ExportKeyword)],
|
||||||
|
ts.factory.createVariableDeclarationList(
|
||||||
|
[ts.factory.createVariableDeclaration(name, undefined, undefined, initializer)],
|
||||||
|
ts.NodeFlags.Const
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('☑︎ Generated bots file successfully.')
|
|
@ -0,0 +1,270 @@
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
import { aiBots, nonRespectingAiBots, AI_ROBOTS_TXT, useAiRobotsTxt } from './ai-bots'
|
||||||
|
import {
|
||||||
|
ALL_BOTS,
|
||||||
|
NON_RESPECTING_BOTS,
|
||||||
|
ROBOTS_TXT,
|
||||||
|
ALL_BOTS_REGEX,
|
||||||
|
NON_RESPECTING_BOTS_REGEX,
|
||||||
|
} from './generated'
|
||||||
|
|
||||||
|
describe('AI Bots module', () => {
|
||||||
|
describe('aiBots export', () => {
|
||||||
|
it('Should export ALL_BOTS_REGEX from generated', () => {
|
||||||
|
expect(aiBots).toBe(ALL_BOTS_REGEX)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be a RegExp object', () => {
|
||||||
|
expect(aiBots instanceof RegExp).toBe(true)
|
||||||
|
expect(aiBots.source.length).toBeGreaterThan(0)
|
||||||
|
expect(aiBots.toString()).toMatch(/^\/.*\/$/)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should include known AI bots', () => {
|
||||||
|
expect(aiBots.test('GPTBOT')).toBe(true)
|
||||||
|
expect(aiBots.test('CLAUDEBOT')).toBe(true)
|
||||||
|
expect(aiBots.test('BYTESPIDER')).toBe(true)
|
||||||
|
expect(aiBots.test('CHATGPT-USER')).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be properly formatted as a regex', () => {
|
||||||
|
expect(aiBots.source).toContain('|')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('nonRespectingAiBots export', () => {
|
||||||
|
it('Should export NON_RESPECTING_BOTS_REGEX from generated', () => {
|
||||||
|
expect(nonRespectingAiBots).toBe(NON_RESPECTING_BOTS_REGEX)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be a RegExp object', () => {
|
||||||
|
expect(nonRespectingAiBots instanceof RegExp).toBe(true)
|
||||||
|
expect(nonRespectingAiBots.source.length).toBeGreaterThan(0)
|
||||||
|
expect(nonRespectingAiBots.toString()).toMatch(/^\/.*\/$/)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be a subset of aiBots', () => {
|
||||||
|
// Check if all non-respecting bots are included in the aiBots pattern
|
||||||
|
// by testing the original string array against both regexes
|
||||||
|
NON_RESPECTING_BOTS.forEach((bot) => {
|
||||||
|
expect(aiBots.test(bot.toUpperCase())).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should include known non-respecting bots', () => {
|
||||||
|
expect(nonRespectingAiBots.test('BYTESPIDER')).toBe(true)
|
||||||
|
expect(nonRespectingAiBots.test('IASKSPIDER/2.0')).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should not include known respecting bots', () => {
|
||||||
|
expect(nonRespectingAiBots.test('GPTBOT')).toBe(false)
|
||||||
|
expect(nonRespectingAiBots.test('CHATGPT-USER')).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should have a pattern that is shorter than aiBots pattern', () => {
|
||||||
|
expect(nonRespectingAiBots.source.length).toBeLessThan(aiBots.source.length)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('AI_ROBOTS_TXT export', () => {
|
||||||
|
it('Should export ROBOTS_TXT from generated', () => {
|
||||||
|
expect(AI_ROBOTS_TXT).toBe(ROBOTS_TXT)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be a non-empty string', () => {
|
||||||
|
expect(typeof AI_ROBOTS_TXT).toBe('string')
|
||||||
|
expect(AI_ROBOTS_TXT.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain User-agent directives', () => {
|
||||||
|
expect(AI_ROBOTS_TXT).toContain('User-agent:')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain Disallow directive', () => {
|
||||||
|
expect(AI_ROBOTS_TXT).toContain('Disallow: /')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should have proper robots.txt format', () => {
|
||||||
|
const lines = AI_ROBOTS_TXT.split('\n')
|
||||||
|
const userAgentLines = lines.filter((line) => line.startsWith('User-agent:'))
|
||||||
|
const disallowIndex = lines.findIndex((line) => line === 'Disallow: /')
|
||||||
|
|
||||||
|
expect(userAgentLines.length).toBeGreaterThan(0)
|
||||||
|
expect(disallowIndex).toBeGreaterThan(0)
|
||||||
|
expect(lines[lines.length - 1]).toBe('')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should include all AI bots', () => {
|
||||||
|
ALL_BOTS.forEach((bot) => {
|
||||||
|
expect(AI_ROBOTS_TXT).toContain(`User-agent: ${bot}`)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('useAiRobotsTxt function', () => {
|
||||||
|
it('Should return a function', () => {
|
||||||
|
const middleware = useAiRobotsTxt()
|
||||||
|
expect(typeof middleware).toBe('function')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should create working Hono middleware', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
|
||||||
|
const res = await app.request('/robots.txt')
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should serve the correct robots.txt content', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
|
||||||
|
const res = await app.request('/robots.txt')
|
||||||
|
const content = await res.text()
|
||||||
|
|
||||||
|
expect(content).toBe(AI_ROBOTS_TXT)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should set correct content type', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
|
||||||
|
const res = await app.request('/robots.txt')
|
||||||
|
expect(res.headers.get('Content-Type')).toBe('text/plain; charset=UTF-8')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should work with different paths', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/custom-robots.txt', useAiRobotsTxt())
|
||||||
|
app.use('/api/robots.txt', useAiRobotsTxt())
|
||||||
|
|
||||||
|
const res1 = await app.request('/custom-robots.txt')
|
||||||
|
const res2 = await app.request('/api/robots.txt')
|
||||||
|
|
||||||
|
expect(res1.status).toBe(200)
|
||||||
|
expect(res2.status).toBe(200)
|
||||||
|
expect(await res1.text()).toBe(AI_ROBOTS_TXT)
|
||||||
|
expect(await res2.text()).toBe(AI_ROBOTS_TXT)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should not interfere with other routes', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
app.get('/other', (c) => c.text('other content'))
|
||||||
|
app.get('/api/data', (c) => c.json({ data: 'test' }))
|
||||||
|
|
||||||
|
const robotsRes = await app.request('/robots.txt')
|
||||||
|
const otherRes = await app.request('/other')
|
||||||
|
const apiRes = await app.request('/api/data')
|
||||||
|
|
||||||
|
expect(robotsRes.status).toBe(200)
|
||||||
|
expect(await robotsRes.text()).toBe(AI_ROBOTS_TXT)
|
||||||
|
|
||||||
|
expect(otherRes.status).toBe(200)
|
||||||
|
expect(await otherRes.text()).toBe('other content')
|
||||||
|
|
||||||
|
expect(apiRes.status).toBe(200)
|
||||||
|
expect(await apiRes.json()).toEqual({ data: 'test' })
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should handle requests with different methods', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
|
||||||
|
// GET request
|
||||||
|
const getRes = await app.request('/robots.txt', { method: 'GET' })
|
||||||
|
expect(getRes.status).toBe(200)
|
||||||
|
expect(await getRes.text()).toBe(AI_ROBOTS_TXT)
|
||||||
|
|
||||||
|
// HEAD request
|
||||||
|
const headRes = await app.request('/robots.txt', { method: 'HEAD' })
|
||||||
|
expect(headRes.status).toBe(200)
|
||||||
|
expect(headRes.headers.get('Content-Type')).toBe('text/plain; charset=UTF-8')
|
||||||
|
|
||||||
|
// POST request should also work since it's middleware
|
||||||
|
const postRes = await app.request('/robots.txt', { method: 'POST' })
|
||||||
|
expect(postRes.status).toBe(200)
|
||||||
|
expect(await postRes.text()).toBe(AI_ROBOTS_TXT)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should serve consistent content across multiple requests', async () => {
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
|
||||||
|
const requests = Array.from({ length: 5 }, () => app.request('/robots.txt'))
|
||||||
|
const responses = await Promise.all(requests)
|
||||||
|
const contents = await Promise.all(responses.map((res) => res.text()))
|
||||||
|
|
||||||
|
responses.forEach((res) => {
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(res.headers.get('Content-Type')).toBe('text/plain; charset=UTF-8')
|
||||||
|
})
|
||||||
|
|
||||||
|
contents.forEach((content) => {
|
||||||
|
expect(content).toBe(AI_ROBOTS_TXT)
|
||||||
|
})
|
||||||
|
|
||||||
|
// All contents should be identical
|
||||||
|
const uniqueContents = [...new Set(contents)]
|
||||||
|
expect(uniqueContents.length).toBe(1)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Integration tests', () => {
|
||||||
|
it('Should work together with uaBlocker middleware', async () => {
|
||||||
|
// This test ensures the ai-bots module integrates well with the main uaBlocker
|
||||||
|
const { uaBlocker } = await import('./index')
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
app.use('*', uaBlocker({ blocklist: nonRespectingAiBots }))
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
// Should serve robots.txt
|
||||||
|
const robotsRes = await app.request('/robots.txt')
|
||||||
|
expect(robotsRes.status).toBe(200)
|
||||||
|
expect(await robotsRes.text()).toBe(AI_ROBOTS_TXT)
|
||||||
|
|
||||||
|
// Should block non-respecting bots
|
||||||
|
const blockedRes = await app.request('/', {
|
||||||
|
headers: { 'User-Agent': 'Bytespider' },
|
||||||
|
})
|
||||||
|
expect(blockedRes.status).toBe(403)
|
||||||
|
|
||||||
|
// Should allow respecting bots
|
||||||
|
const allowedRes = await app.request('/', {
|
||||||
|
headers: { 'User-Agent': 'GPTBot' },
|
||||||
|
})
|
||||||
|
expect(allowedRes.status).toBe(200)
|
||||||
|
expect(await allowedRes.text()).toBe('Hello World')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should work with demo pattern', async () => {
|
||||||
|
const { uaBlocker } = await import('./index')
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
app.use('*', uaBlocker({ blocklist: nonRespectingAiBots }))
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
// Test the same pattern as shown in demo.ts
|
||||||
|
const robotsRes = await app.request('/robots.txt')
|
||||||
|
expect(robotsRes.status).toBe(200)
|
||||||
|
expect(robotsRes.headers.get('Content-Type')).toBe('text/plain; charset=UTF-8')
|
||||||
|
|
||||||
|
const homeRes = await app.request('/')
|
||||||
|
expect(homeRes.status).toBe(200)
|
||||||
|
expect(await homeRes.text()).toBe('Hello World')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Data consistency validation', () => {
|
||||||
|
it('Should have robots.txt that matches bot lists', () => {
|
||||||
|
const userAgentLines = AI_ROBOTS_TXT.split('\n')
|
||||||
|
.filter((line) => line.startsWith('User-agent:'))
|
||||||
|
.map((line) => line.replace('User-agent: ', ''))
|
||||||
|
|
||||||
|
expect(userAgentLines.sort()).toEqual(ALL_BOTS.sort())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,14 @@
|
||||||
|
import { createMiddleware } from 'hono/factory'
|
||||||
|
import { ROBOTS_TXT } from './generated'
|
||||||
|
|
||||||
|
export {
|
||||||
|
ALL_BOTS_REGEX as aiBots,
|
||||||
|
NON_RESPECTING_BOTS_REGEX as nonRespectingAiBots,
|
||||||
|
ROBOTS_TXT as AI_ROBOTS_TXT,
|
||||||
|
} from './generated'
|
||||||
|
|
||||||
|
export function useAiRobotsTxt() {
|
||||||
|
return createMiddleware(async (c) => {
|
||||||
|
return c.text(ROBOTS_TXT, 200)
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,541 @@
|
||||||
|
{
|
||||||
|
"AI2Bot": {
|
||||||
|
"operator": "[Ai2](https://allenai.org/crawler)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Content is used to train open language models.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Explores 'certain domains' to find web content."
|
||||||
|
},
|
||||||
|
"Ai2Bot-Dolma": {
|
||||||
|
"description": "Explores 'certain domains' to find web content.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"function": "Content is used to train open language models.",
|
||||||
|
"operator": "[Ai2](https://allenai.org/crawler)",
|
||||||
|
"respect": "Yes"
|
||||||
|
},
|
||||||
|
"aiHitBot": {
|
||||||
|
"operator": "[aiHit](https://www.aihitdata.com/about)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "A massive, artificial intelligence/machine learning, automated system.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes data for AI systems."
|
||||||
|
},
|
||||||
|
"Amazonbot": {
|
||||||
|
"operator": "Amazon",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Service improvement and enabling answers for Alexa users.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses."
|
||||||
|
},
|
||||||
|
"Andibot": {
|
||||||
|
"operator": "[Andi](https://andisearch.com/)",
|
||||||
|
"respect": "Unclear at this time",
|
||||||
|
"function": "Search engine using generative AI, AI Search Assistant",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes website and provides AI summary."
|
||||||
|
},
|
||||||
|
"anthropic-ai": {
|
||||||
|
"operator": "[Anthropic](https://www.anthropic.com)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Scrapes data to train Anthropic's AI products.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes data to train LLMs and AI products offered by Anthropic."
|
||||||
|
},
|
||||||
|
"Applebot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Search Crawlers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot"
|
||||||
|
},
|
||||||
|
"Applebot-Extended": {
|
||||||
|
"operator": "[Apple](https://support.apple.com/en-us/119829#datausage)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools."
|
||||||
|
},
|
||||||
|
"bedrockbot": {
|
||||||
|
"operator": "[Amazon](https://amazon.com)",
|
||||||
|
"respect": "[Yes](https://docs.aws.amazon.com/bedrock/latest/userguide/webcrawl-data-source-connector.html#configuration-webcrawl-connector)",
|
||||||
|
"function": "Data scraping for custom AI applications.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Connects to and crawls URLs that have been selected for use in a user's AWS bedrock application."
|
||||||
|
},
|
||||||
|
"Brightbot 1.0": {
|
||||||
|
"operator": "Browsing.ai",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "LLM/AI training.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Scrapes data to train LLMs and AI products focused on website customer support."
|
||||||
|
},
|
||||||
|
"Bytespider": {
|
||||||
|
"operator": "ByteDance",
|
||||||
|
"respect": "No",
|
||||||
|
"function": "LLM training.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Downloads data to train LLMS, including ChatGPT competitors."
|
||||||
|
},
|
||||||
|
"CCBot": {
|
||||||
|
"operator": "[Common Crawl Foundation](https://commoncrawl.org)",
|
||||||
|
"respect": "[Yes](https://commoncrawl.org/ccbot)",
|
||||||
|
"function": "Provides open crawl dataset, used for many purposes, including Machine Learning/AI.",
|
||||||
|
"frequency": "Monthly at present.",
|
||||||
|
"description": "Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers)."
|
||||||
|
},
|
||||||
|
"ChatGPT-User": {
|
||||||
|
"operator": "[OpenAI](https://openai.com)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Takes action based on user prompts.",
|
||||||
|
"frequency": "Only when prompted by a user.",
|
||||||
|
"description": "Used by plugins in ChatGPT to answer queries based on user input."
|
||||||
|
},
|
||||||
|
"Claude-SearchBot": {
|
||||||
|
"operator": "[Anthropic](https://www.anthropic.com)",
|
||||||
|
"respect": "[Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler)",
|
||||||
|
"function": "Claude-SearchBot navigates the web to improve search result quality for users. It analyzes online content specifically to enhance the relevance and accuracy of search responses.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Claude-SearchBot navigates the web to improve search result quality for users. It analyzes online content specifically to enhance the relevance and accuracy of search responses."
|
||||||
|
},
|
||||||
|
"Claude-User": {
|
||||||
|
"operator": "[Anthropic](https://www.anthropic.com)",
|
||||||
|
"respect": "[Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler)",
|
||||||
|
"function": "Claude-User supports Claude AI users. When individuals ask questions to Claude, it may access websites using a Claude-User agent.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Claude-User supports Claude AI users. When individuals ask questions to Claude, it may access websites using a Claude-User agent."
|
||||||
|
},
|
||||||
|
"Claude-Web": {
|
||||||
|
"operator": "Anthropic",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Undocumented AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Claude-Web is an AI-related agent operated by Anthropic. It's currently unclear exactly what it's used for, since there's no official documentation. If you can provide more detail, please contact us. More info can be found at https://darkvisitors.com/agents/agents/claude-web"
|
||||||
|
},
|
||||||
|
"ClaudeBot": {
|
||||||
|
"operator": "[Anthropic](https://www.anthropic.com)",
|
||||||
|
"respect": "[Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler)",
|
||||||
|
"function": "Scrapes data to train Anthropic's AI products.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes data to train LLMs and AI products offered by Anthropic."
|
||||||
|
},
|
||||||
|
"cohere-ai": {
|
||||||
|
"operator": "[Cohere](https://cohere.com)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Retrieves data to provide responses to user-initiated prompts.",
|
||||||
|
"frequency": "Takes action based on user prompts.",
|
||||||
|
"description": "Retrieves data based on user prompts."
|
||||||
|
},
|
||||||
|
"cohere-training-data-crawler": {
|
||||||
|
"operator": "Cohere to download training data for its LLMs (Large Language Models) that power its enterprise AI products",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "cohere-training-data-crawler is a web crawler operated by Cohere to download training data for its LLMs (Large Language Models) that power its enterprise AI products. More info can be found at https://darkvisitors.com/agents/agents/cohere-training-data-crawler"
|
||||||
|
},
|
||||||
|
"Cotoyogi": {
|
||||||
|
"operator": "[ROIS](https://ds.rois.ac.jp/en_center8/en_crawler/)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "AI LLM Scraper.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes data for AI training in Japanese language."
|
||||||
|
},
|
||||||
|
"Crawlspace": {
|
||||||
|
"operator": "[Crawlspace](https://crawlspace.dev)",
|
||||||
|
"respect": "[Yes](https://news.ycombinator.com/item?id=42756654)",
|
||||||
|
"function": "Scrapes data",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Provides crawling services for any purpose, probably including AI model training."
|
||||||
|
},
|
||||||
|
"Diffbot": {
|
||||||
|
"operator": "[Diffbot](https://www.diffbot.com/)",
|
||||||
|
"respect": "At the discretion of Diffbot users.",
|
||||||
|
"function": "Aggregates structured web data for monitoring and AI model training.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training."
|
||||||
|
},
|
||||||
|
"DuckAssistBot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "DuckAssistBot is used by DuckDuckGo's DuckAssist feature to fetch content and generate realtime AI answers to user searches. More info can be found at https://darkvisitors.com/agents/agents/duckassistbot"
|
||||||
|
},
|
||||||
|
"EchoboxBot": {
|
||||||
|
"operator": "[Echobox](https://echobox.com)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Data collection to support AI-powered products.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Supports company's AI-powered social and email management products."
|
||||||
|
},
|
||||||
|
"FacebookBot": {
|
||||||
|
"operator": "Meta/Facebook",
|
||||||
|
"respect": "[Yes](https://developers.facebook.com/docs/sharing/bot/)",
|
||||||
|
"function": "Training language models",
|
||||||
|
"frequency": "Up to 1 page per second",
|
||||||
|
"description": "Officially used for training Meta \"speech recognition technology,\" unknown if used to train Meta AI specifically."
|
||||||
|
},
|
||||||
|
"Factset_spyderbot": {
|
||||||
|
"operator": "[Factset](https://www.factset.com/ai)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI model training.",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes data for AI training."
|
||||||
|
},
|
||||||
|
"FirecrawlAgent": {
|
||||||
|
"operator": "[Firecrawl](https://www.firecrawl.dev/)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "AI scraper and LLM training",
|
||||||
|
"frequency": "No information provided.",
|
||||||
|
"description": "Scrapes data for AI systems and LLM training."
|
||||||
|
},
|
||||||
|
"FriendlyCrawler": {
|
||||||
|
"description": "Unclear who the operator is; but data is used for training/machine learning.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"function": "We are using the data from the crawler to build datasets for machine learning experiments.",
|
||||||
|
"operator": "Unknown",
|
||||||
|
"respect": "[Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler)"
|
||||||
|
},
|
||||||
|
"Google-CloudVertexBot": {
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)",
|
||||||
|
"function": "Build and manage AI models for businesses employing Vertex AI",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Google-CloudVertexBot crawls sites on the site owners' request when building Vertex AI Agents."
|
||||||
|
},
|
||||||
|
"Google-Extended": {
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)",
|
||||||
|
"function": "LLM training.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search."
|
||||||
|
},
|
||||||
|
"GoogleOther": {
|
||||||
|
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data.",
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)"
|
||||||
|
},
|
||||||
|
"GoogleOther-Image": {
|
||||||
|
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data.",
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)"
|
||||||
|
},
|
||||||
|
"GoogleOther-Video": {
|
||||||
|
"description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data.",
|
||||||
|
"operator": "Google",
|
||||||
|
"respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)"
|
||||||
|
},
|
||||||
|
"GPTBot": {
|
||||||
|
"operator": "[OpenAI](https://openai.com)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Scrapes data to train OpenAI's products.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies."
|
||||||
|
},
|
||||||
|
"iaskspider/2.0": {
|
||||||
|
"description": "Used to provide answers to user queries.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"function": "Crawls sites to provide answers to user queries.",
|
||||||
|
"operator": "iAsk",
|
||||||
|
"respect": "No"
|
||||||
|
},
|
||||||
|
"ICC-Crawler": {
|
||||||
|
"description": "Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data to train and support AI technologies.",
|
||||||
|
"operator": "[NICT](https://nict.go.jp)",
|
||||||
|
"respect": "Yes"
|
||||||
|
},
|
||||||
|
"ImagesiftBot": {
|
||||||
|
"description": "Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products",
|
||||||
|
"operator": "[ImageSift](https://imagesift.com)",
|
||||||
|
"respect": "[Yes](https://imagesift.com/about)"
|
||||||
|
},
|
||||||
|
"img2dataset": {
|
||||||
|
"description": "Downloads large sets of images into datasets for LLM training or other purposes.",
|
||||||
|
"frequency": "At the discretion of img2dataset users.",
|
||||||
|
"function": "Scrapes images for use in LLMs.",
|
||||||
|
"operator": "[img2dataset](https://github.com/rom1504/img2dataset)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"ISSCyberRiskCrawler": {
|
||||||
|
"description": "Used to train machine learning based models to quantify cyber risk.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data to train machine learning models.",
|
||||||
|
"operator": "[ISS-Corporate](https://iss-cyber.com)",
|
||||||
|
"respect": "No"
|
||||||
|
},
|
||||||
|
"Kangaroo Bot": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot"
|
||||||
|
},
|
||||||
|
"meta-externalagent": {
|
||||||
|
"operator": "[Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers)",
|
||||||
|
"respect": "Yes",
|
||||||
|
"function": "Used to train models and improve products.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "\"The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly.\""
|
||||||
|
},
|
||||||
|
"Meta-ExternalAgent": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Meta-ExternalAgent is a web crawler used by Meta to download training data for its AI models and improve its products by indexing content directly. More info can be found at https://darkvisitors.com/agents/agents/meta-externalagent"
|
||||||
|
},
|
||||||
|
"meta-externalfetcher": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher"
|
||||||
|
},
|
||||||
|
"Meta-ExternalFetcher": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Assistants",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher"
|
||||||
|
},
|
||||||
|
"MistralAI-User/1.0": {
|
||||||
|
"operator": "Mistral AI",
|
||||||
|
"function": "Takes action based on user prompts.",
|
||||||
|
"frequency": "Only when prompted by a user.",
|
||||||
|
"description": "MistralAI-User is for user actions in LeChat. When users ask LeChat a question, it may visit a web page to help answer and include a link to the source in its response.",
|
||||||
|
"respect": "Yes"
|
||||||
|
},
|
||||||
|
"NovaAct": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Nova Act is an AI agent created by Amazon that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/novaact"
|
||||||
|
},
|
||||||
|
"OAI-SearchBot": {
|
||||||
|
"operator": "[OpenAI](https://openai.com)",
|
||||||
|
"respect": "[Yes](https://platform.openai.com/docs/bots)",
|
||||||
|
"function": "Search result generation.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Crawls sites to surface as results in SearchGPT."
|
||||||
|
},
|
||||||
|
"omgili": {
|
||||||
|
"operator": "[Webz.io](https://webz.io/)",
|
||||||
|
"respect": "[Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/)",
|
||||||
|
"function": "Data is sold.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training."
|
||||||
|
},
|
||||||
|
"omgilibot": {
|
||||||
|
"description": "Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Data is sold.",
|
||||||
|
"operator": "[Webz.io](https://webz.io/)",
|
||||||
|
"respect": "[Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html)"
|
||||||
|
},
|
||||||
|
"Operator": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Agents",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Operator is an AI agent created by OpenAI that can use a web browser. It can intelligently navigate and interact with websites to complete multi-step tasks on behalf of a human user. More info can be found at https://darkvisitors.com/agents/agents/operator"
|
||||||
|
},
|
||||||
|
"PanguBot": {
|
||||||
|
"operator": "the Chinese company Huawei",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "PanguBot is a web crawler operated by the Chinese company Huawei. It's used to download training data for its multimodal LLM (Large Language Model) called PanGu. More info can be found at https://darkvisitors.com/agents/agents/pangubot"
|
||||||
|
},
|
||||||
|
"Panscient": {
|
||||||
|
"operator": "[Panscient](https://panscient.com)",
|
||||||
|
"respect": "[Yes](https://panscient.com/faq.htm)",
|
||||||
|
"function": "Data collection and analysis using machine learning and AI.",
|
||||||
|
"frequency": "The Panscient web crawler will request a page at most once every second from the same domain name or the same IP address.",
|
||||||
|
"description": "Compiles data on businesses and business professionals that is structured using AI and machine learning."
|
||||||
|
},
|
||||||
|
"panscient.com": {
|
||||||
|
"operator": "[Panscient](https://panscient.com)",
|
||||||
|
"respect": "[Yes](https://panscient.com/faq.htm)",
|
||||||
|
"function": "Data collection and analysis using machine learning and AI.",
|
||||||
|
"frequency": "The Panscient web crawler will request a page at most once every second from the same domain name or the same IP address.",
|
||||||
|
"description": "Compiles data on businesses and business professionals that is structured using AI and machine learning."
|
||||||
|
},
|
||||||
|
"Perplexity-User": {
|
||||||
|
"operator": "[Perplexity](https://www.perplexity.ai/)",
|
||||||
|
"respect": "[No](https://docs.perplexity.ai/guides/bots)",
|
||||||
|
"function": "Used to answer queries at the request of users.",
|
||||||
|
"frequency": "Only when prompted by a user.",
|
||||||
|
"description": "Visit web pages to help provide an accurate answer and include links to the page in Perplexity response."
|
||||||
|
},
|
||||||
|
"PerplexityBot": {
|
||||||
|
"operator": "[Perplexity](https://www.perplexity.ai/)",
|
||||||
|
"respect": "[Yes](https://docs.perplexity.ai/guides/bots)",
|
||||||
|
"function": "Search result generation.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Crawls sites to surface as results in Perplexity."
|
||||||
|
},
|
||||||
|
"PetalBot": {
|
||||||
|
"description": "Operated by Huawei to provide search and AI assistant services.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"function": "Used to provide recommendations in Hauwei assistant and AI search services.",
|
||||||
|
"operator": "[Huawei](https://huawei.com/)",
|
||||||
|
"respect": "Yes"
|
||||||
|
},
|
||||||
|
"PhindBot": {
|
||||||
|
"description": "Company offers an AI agent that uses AI and generate extra web query on the fly",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"function": "AI-enhanced search engine.",
|
||||||
|
"operator": "[phind](https://www.phind.com/)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"QualifiedBot": {
|
||||||
|
"description": "Operated by Qualified as part of their suite of AI product offerings.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"function": "Company offers AI agents and other related products; usage can be assumed to support said products.",
|
||||||
|
"operator": "[Qualified](https://www.qualified.com)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"QuillBot": {
|
||||||
|
"description": "Operated by QuillBot as part of their suite of AI product offerings.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"function": "Company offers AI detection, writing tools and other services.",
|
||||||
|
"operator": "[Quillbot](https://quillbot.com)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"quillbot.com": {
|
||||||
|
"description": "Operated by QuillBot as part of their suite of AI product offerings.",
|
||||||
|
"frequency": "No explicit frequency provided.",
|
||||||
|
"function": "Company offers AI detection, writing tools and other services.",
|
||||||
|
"operator": "[Quillbot](https://quillbot.com)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"SBIntuitionsBot": {
|
||||||
|
"description": "AI development and information analysis",
|
||||||
|
"respect": "[Yes](https://www.sbintuitions.co.jp/en/bot/)",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Uses data gathered in AI development and information analysis.",
|
||||||
|
"operator": "[SB Intuitions](https://www.sbintuitions.co.jp/en/)"
|
||||||
|
},
|
||||||
|
"Scrapy": {
|
||||||
|
"description": "\"AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets.\"",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data for a variety of uses including training AI.",
|
||||||
|
"operator": "[Zyte](https://www.zyte.com)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"SemrushBot": {
|
||||||
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
|
"function": "Crawls your site for ContentShake AI tool.",
|
||||||
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
|
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
||||||
|
},
|
||||||
|
"SemrushBot-BA": {
|
||||||
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
|
"function": "Crawls your site for ContentShake AI tool.",
|
||||||
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
|
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
||||||
|
},
|
||||||
|
"SemrushBot-CT": {
|
||||||
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
|
"function": "Crawls your site for ContentShake AI tool.",
|
||||||
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
|
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
||||||
|
},
|
||||||
|
"SemrushBot-OCOB": {
|
||||||
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
|
"function": "Crawls your site for ContentShake AI tool.",
|
||||||
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
|
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
||||||
|
},
|
||||||
|
"SemrushBot-SI": {
|
||||||
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
|
"function": "Crawls your site for ContentShake AI tool.",
|
||||||
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
|
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
||||||
|
},
|
||||||
|
"SemrushBot-SWA": {
|
||||||
|
"operator": "[Semrush](https://www.semrush.com/)",
|
||||||
|
"respect": "[Yes](https://www.semrush.com/bot/)",
|
||||||
|
"function": "Checks URLs on your site for SWA tool.",
|
||||||
|
"frequency": "Roughly once every 10 seconds.",
|
||||||
|
"description": "You enter one text (on-demand) and we will make suggestions on it (the tool uses AI but we are not actively crawling the web, you need to manually enter one text/URL)."
|
||||||
|
},
|
||||||
|
"Sidetrade indexer bot": {
|
||||||
|
"description": "AI product training.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Extracts data for a variety of uses including training AI.",
|
||||||
|
"operator": "[Sidetrade](https://www.sidetrade.com)",
|
||||||
|
"respect": "Unclear at this time."
|
||||||
|
},
|
||||||
|
"TikTokSpider": {
|
||||||
|
"operator": "ByteDance",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "LLM training.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Downloads data to train LLMS, as per Bytespider."
|
||||||
|
},
|
||||||
|
"Timpibot": {
|
||||||
|
"operator": "[Timpi](https://timpi.io)",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "Scrapes data for use in training LLMs.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Makes data available for training AI models."
|
||||||
|
},
|
||||||
|
"VelenPublicWebCrawler": {
|
||||||
|
"description": "\"Our goal with this crawler is to build business datasets and machine learning models to better understand the web.\"",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"function": "Scrapes data for business data sets and machine learning models.",
|
||||||
|
"operator": "[Velen Crawler](https://velen.io)",
|
||||||
|
"respect": "[Yes](https://velen.io)"
|
||||||
|
},
|
||||||
|
"Webzio-Extended": {
|
||||||
|
"operator": "Unclear at this time.",
|
||||||
|
"respect": "Unclear at this time.",
|
||||||
|
"function": "AI Data Scrapers",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended"
|
||||||
|
},
|
||||||
|
"wpbot": {
|
||||||
|
"operator": "[QuantumCloud](https://www.quantumcloud.com)",
|
||||||
|
"respect": "Unclear at this time; opt out provided via [Google Form](https://forms.gle/ajBaxygz9jSR8p8G9)",
|
||||||
|
"function": "Live chat support and lead generation.",
|
||||||
|
"frequency": "Unclear at this time.",
|
||||||
|
"description": "wpbot is a used to support the functionality of the AI Chatbot for WordPress plugin. It supports the use of customer models, data collection and customer support."
|
||||||
|
},
|
||||||
|
"YandexAdditional": {
|
||||||
|
"operator": "[Yandex](https://yandex.ru)",
|
||||||
|
"respect": "[Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en)",
|
||||||
|
"function": "Scrapes/analyzes data for the YandexGPT LLM.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Retrieves data used for YandexGPT quick answers features."
|
||||||
|
},
|
||||||
|
"YandexAdditionalBot": {
|
||||||
|
"operator": "[Yandex](https://yandex.ru)",
|
||||||
|
"respect": "[Yes](https://yandex.ru/support/webmaster/en/search-appearance/fast.html?lang=en)",
|
||||||
|
"function": "Scrapes/analyzes data for the YandexGPT LLM.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Retrieves data used for YandexGPT quick answers features."
|
||||||
|
},
|
||||||
|
"YouBot": {
|
||||||
|
"operator": "[You](https://about.you.com/youchat/)",
|
||||||
|
"respect": "[Yes](https://about.you.com/youbot/)",
|
||||||
|
"function": "Scrapes data for search engine and LLMs.",
|
||||||
|
"frequency": "No information.",
|
||||||
|
"description": "Retrieves data used for You.com web search engine and LLMs."
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
import bots from './robots.json' with { type: 'json' }
|
||||||
|
|
||||||
|
describe('Robots data module', () => {
|
||||||
|
describe('bots object', () => {
|
||||||
|
it('Should be an object', () => {
|
||||||
|
expect(typeof bots).toBe('object')
|
||||||
|
expect(bots).not.toBeNull()
|
||||||
|
expect(Array.isArray(bots)).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should have bot entries', () => {
|
||||||
|
const keys = Object.keys(bots)
|
||||||
|
expect(keys.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should have proper structure for each bot entry', () => {
|
||||||
|
Object.entries(bots).forEach(([name, entry]) => {
|
||||||
|
expect(typeof name).toBe('string')
|
||||||
|
expect(name.length).toBeGreaterThan(0)
|
||||||
|
|
||||||
|
expect(typeof entry).toBe('object')
|
||||||
|
expect(entry).not.toBeNull()
|
||||||
|
|
||||||
|
// Check required properties
|
||||||
|
expect(entry).toHaveProperty('operator')
|
||||||
|
expect(entry).toHaveProperty('respect')
|
||||||
|
expect(entry).toHaveProperty('function')
|
||||||
|
expect(entry).toHaveProperty('frequency')
|
||||||
|
expect(entry).toHaveProperty('description')
|
||||||
|
|
||||||
|
// Check property types
|
||||||
|
expect(typeof entry.operator).toBe('string')
|
||||||
|
expect(typeof entry.respect).toBe('string')
|
||||||
|
expect(typeof entry.function).toBe('string')
|
||||||
|
expect(typeof entry.frequency).toBe('string')
|
||||||
|
expect(typeof entry.description).toBe('string')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should have valid respect values', () => {
|
||||||
|
Object.entries(bots).forEach(([, entry]) => {
|
||||||
|
expect(entry.respect).toBeDefined()
|
||||||
|
expect(entry.respect.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,17 @@
|
||||||
|
import { uaBlocker } from '@hono/ua-blocker'
|
||||||
|
import { nonRespectingAiBots, useAiRobotsTxt } from '@hono/ua-blocker/ai-bots'
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
app.use(
|
||||||
|
'*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: nonRespectingAiBots,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
// serve robots.txt
|
||||||
|
app.use('/robots.txt', useAiRobotsTxt())
|
||||||
|
app.get('/', (c) => c.text('Hello World'))
|
||||||
|
|
||||||
|
export default app
|
|
@ -0,0 +1,99 @@
|
||||||
|
// Constants derived from typical core-js patterns for string escaping
|
||||||
|
// (e.g., related to JS_STRING_ESCAPE or similar proposals)
|
||||||
|
const FIRST_DIGIT_OR_ASCII = /^[0-9A-Za-z]/
|
||||||
|
const SYNTAX_SOLIDUS = /[/]/
|
||||||
|
const OTHER_PUNCTUATORS_AND_WHITESPACES = /[!"#$&'()*+,./:;<=>?@[\]^`{|}~\s]/
|
||||||
|
|
||||||
|
// Control characters to escape, mapping to their escape sequence character
|
||||||
|
const ControlEscape: { [key: string]: string } = {
|
||||||
|
'\0': '0', // Null character
|
||||||
|
'\b': 'b', // Backspace
|
||||||
|
'\t': 't', // Horizontal tab
|
||||||
|
'\n': 'n', // Line feed (new line)
|
||||||
|
'\v': 'v', // Vertical tab
|
||||||
|
'\f': 'f', // Form feed
|
||||||
|
'\r': 'r', // Carriage return
|
||||||
|
'"': '"', // Double quote
|
||||||
|
"'": "'", // Single quote
|
||||||
|
'\\': '\\', // Backslash
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Escapes a character for use in a string literal.
|
||||||
|
* Primarily handles:
|
||||||
|
* - Control characters (0x00-0x1F) -> \u00XX
|
||||||
|
* - Unpaired surrogates -> \uDXXX
|
||||||
|
* Other characters are returned as is, as their escaping is handled by
|
||||||
|
* ControlEscape, SYNTAX_SOLIDUS, or they are passed through.
|
||||||
|
*/
|
||||||
|
function _escapeChar(char: string): string {
|
||||||
|
const charCode = char.charCodeAt(0)
|
||||||
|
|
||||||
|
if (charCode < 0x20) {
|
||||||
|
// ASCII Control characters
|
||||||
|
return '\\u' + charCode.toString(16).padStart(4, '0')
|
||||||
|
}
|
||||||
|
if (charCode >= 0xd800 && charCode <= 0xdfff) {
|
||||||
|
// Surrogates (typically unpaired if passed to this function directly)
|
||||||
|
return '\\u' + charCode.toString(16).padStart(4, '0')
|
||||||
|
}
|
||||||
|
// For other characters (printable ASCII, other Unicode symbols) that might
|
||||||
|
// be passed via FIRST_DIGIT_OR_ASCII or OTHER_PUNCTUATORS_AND_WHITESPACES,
|
||||||
|
// they should remain themselves if they don't require \uXXXX escaping.
|
||||||
|
return char
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* `RegExp.escape` polyfill inspired by core-js
|
||||||
|
*/
|
||||||
|
function $escape(S: string): string {
|
||||||
|
const length = S.length
|
||||||
|
if (length === 0) {
|
||||||
|
return ''
|
||||||
|
}
|
||||||
|
// Pre-allocate array; join is efficient for this.
|
||||||
|
const result = new Array<string>(length)
|
||||||
|
|
||||||
|
for (let i = 0; i < length; i++) {
|
||||||
|
const chr = S.charAt(i)
|
||||||
|
|
||||||
|
if (i === 0 && FIRST_DIGIT_OR_ASCII.exec(chr)) {
|
||||||
|
result[i] = _escapeChar(chr)
|
||||||
|
} else if (Object.prototype.hasOwnProperty.call(ControlEscape, chr)) {
|
||||||
|
result[i] = '\\' + ControlEscape[chr]
|
||||||
|
} else if (SYNTAX_SOLIDUS.exec(chr)) {
|
||||||
|
result[i] = '\\' + chr
|
||||||
|
} else if (OTHER_PUNCTUATORS_AND_WHITESPACES.exec(chr)) {
|
||||||
|
result[i] = _escapeChar(chr)
|
||||||
|
} else {
|
||||||
|
const charCode = chr.charCodeAt(0)
|
||||||
|
// Single UTF-16 code unit (non-surrogate)
|
||||||
|
if ((charCode & 0xf800) !== 0xd800) {
|
||||||
|
result[i] = chr
|
||||||
|
}
|
||||||
|
// Unpaired surrogate
|
||||||
|
else if (
|
||||||
|
charCode >= 0xdc00 || // Low surrogate (implies it's unpaired if we are here)
|
||||||
|
i + 1 >= length || // High surrogate at end of string
|
||||||
|
(S.charCodeAt(i + 1) & 0xfc00) !== 0xdc00 // High surrogate not followed by low surrogate
|
||||||
|
) {
|
||||||
|
result[i] = _escapeChar(chr)
|
||||||
|
}
|
||||||
|
// Surrogate pair
|
||||||
|
else {
|
||||||
|
result[i] = chr // High surrogate
|
||||||
|
i++ // Advance to include the low surrogate
|
||||||
|
result[i] = S.charAt(i) // Low surrogate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.join('')
|
||||||
|
}
|
||||||
|
|
||||||
|
interface RegExpConstructor {
|
||||||
|
escape(str: string): string
|
||||||
|
}
|
||||||
|
|
||||||
|
//@ts-expect-error: RegExp.escape is only included in Node.js v24+ and Deno v2.3+
|
||||||
|
export const escape = (RegExp.escape as RegExpConstructor['escape']) || $escape
|
|
@ -0,0 +1,135 @@
|
||||||
|
import { ROBOTS_TXT, ALL_BOTS, NON_RESPECTING_BOTS } from './generated'
|
||||||
|
|
||||||
|
describe('Generated constants', () => {
|
||||||
|
describe('ROBOTS_TXT', () => {
|
||||||
|
it('Should be a non-empty string', () => {
|
||||||
|
expect(typeof ROBOTS_TXT).toBe('string')
|
||||||
|
expect(ROBOTS_TXT.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain User-agent directives', () => {
|
||||||
|
const userAgentLines = ROBOTS_TXT.split('\n').filter((line) => line.startsWith('User-agent:'))
|
||||||
|
expect(userAgentLines.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain Disallow directive', () => {
|
||||||
|
expect(ROBOTS_TXT).toContain('Disallow: /')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should end with proper format', () => {
|
||||||
|
const lines = ROBOTS_TXT.split('\n')
|
||||||
|
expect(lines.at(-2)).toBe('Disallow: /')
|
||||||
|
expect(lines.at(-1)).toBe('')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should have User-agent lines before Disallow line', () => {
|
||||||
|
const lines = ROBOTS_TXT.split('\n')
|
||||||
|
const userAgentLines = lines.filter((line) => line.startsWith('User-agent:'))
|
||||||
|
const disallowLineIndex = lines.findIndex((line) => line === 'Disallow: /')
|
||||||
|
|
||||||
|
expect(userAgentLines.length).toBeGreaterThan(0)
|
||||||
|
expect(disallowLineIndex).toBeGreaterThan(0)
|
||||||
|
|
||||||
|
// All User-agent lines should come before Disallow
|
||||||
|
lines.forEach((line, index) => {
|
||||||
|
if (line.startsWith('User-agent:')) {
|
||||||
|
expect(index).toBeLessThan(disallowLineIndex)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should match ALL_BOTS list', () => {
|
||||||
|
const userAgentLines = ROBOTS_TXT.split('\n').filter((line) => line.startsWith('User-agent:'))
|
||||||
|
const botsInRobotsTxt = userAgentLines.map((line) => line.replace('User-agent: ', ''))
|
||||||
|
|
||||||
|
expect(botsInRobotsTxt.sort()).toEqual(ALL_BOTS.sort())
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should match expected robots.txt conventions', () => {
|
||||||
|
// Should not have any empty User-agent directives
|
||||||
|
const userAgentLines = ROBOTS_TXT.split('\n').filter((line) => line.startsWith('User-agent:'))
|
||||||
|
userAgentLines.forEach((line) => {
|
||||||
|
expect(line.length).toBeGreaterThan('User-agent: '.length)
|
||||||
|
expect(line.replace('User-agent: ', '').trim().length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Should have exactly one Disallow: / directive
|
||||||
|
const disallowLines = ROBOTS_TXT.split('\n').filter((line) => line === 'Disallow: /')
|
||||||
|
expect(disallowLines.length).toBe(1)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('ALL_BOTS', () => {
|
||||||
|
it('Should be an array', () => {
|
||||||
|
expect(Array.isArray(ALL_BOTS)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain bot names', () => {
|
||||||
|
expect(ALL_BOTS.length).toBeGreaterThan(0)
|
||||||
|
ALL_BOTS.forEach((bot) => {
|
||||||
|
expect(typeof bot).toBe('string')
|
||||||
|
expect(bot.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should not contain duplicates', () => {
|
||||||
|
const uniqueBots = [...new Set(ALL_BOTS)]
|
||||||
|
expect(uniqueBots.length).toBe(ALL_BOTS.length)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain only non-empty strings', () => {
|
||||||
|
ALL_BOTS.forEach((bot) => {
|
||||||
|
expect(typeof bot).toBe('string')
|
||||||
|
expect(bot.trim().length).toBeGreaterThan(0)
|
||||||
|
expect(bot).toBe(bot.trim()) // No leading/trailing whitespace
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('NON_RESPECTING_BOTS', () => {
|
||||||
|
it('Should be an array', () => {
|
||||||
|
expect(Array.isArray(NON_RESPECTING_BOTS)).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain bot names', () => {
|
||||||
|
expect(NON_RESPECTING_BOTS.length).toBeGreaterThan(0)
|
||||||
|
NON_RESPECTING_BOTS.forEach((bot) => {
|
||||||
|
expect(typeof bot).toBe('string')
|
||||||
|
expect(bot.length).toBeGreaterThan(0)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be a subset of ALL_BOTS', () => {
|
||||||
|
NON_RESPECTING_BOTS.forEach((bot) => {
|
||||||
|
expect(ALL_BOTS).toContain(bot)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should be smaller than or equal to ALL_BOTS', () => {
|
||||||
|
expect(NON_RESPECTING_BOTS.length).toBeLessThanOrEqual(ALL_BOTS.length)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should include expected non-respecting bots', () => {
|
||||||
|
expect(NON_RESPECTING_BOTS).toContain('Bytespider')
|
||||||
|
expect(NON_RESPECTING_BOTS).toContain('iaskspider/2.0')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should not include known respecting bots', () => {
|
||||||
|
expect(NON_RESPECTING_BOTS).not.toContain('GPTBot')
|
||||||
|
expect(NON_RESPECTING_BOTS).not.toContain('ChatGPT-User')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should not contain duplicates', () => {
|
||||||
|
const uniqueBots = [...new Set(NON_RESPECTING_BOTS)]
|
||||||
|
expect(uniqueBots.length).toBe(NON_RESPECTING_BOTS.length)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should contain only non-empty strings', () => {
|
||||||
|
NON_RESPECTING_BOTS.forEach((bot) => {
|
||||||
|
expect(typeof bot).toBe('string')
|
||||||
|
expect(bot.trim().length).toBeGreaterThan(0)
|
||||||
|
expect(bot).toBe(bot.trim()) // No leading/trailing whitespace
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,84 @@
|
||||||
|
// This file is generated by scripts/get-robots-txt.ts. Do not edit manually.
|
||||||
|
export const ROBOTS_TXT = `User-agent: AI2Bot
|
||||||
|
User-agent: Ai2Bot-Dolma
|
||||||
|
User-agent: aiHitBot
|
||||||
|
User-agent: Amazonbot
|
||||||
|
User-agent: Andibot
|
||||||
|
User-agent: anthropic-ai
|
||||||
|
User-agent: Applebot
|
||||||
|
User-agent: Applebot-Extended
|
||||||
|
User-agent: bedrockbot
|
||||||
|
User-agent: Brightbot 1.0
|
||||||
|
User-agent: Bytespider
|
||||||
|
User-agent: CCBot
|
||||||
|
User-agent: ChatGPT-User
|
||||||
|
User-agent: Claude-SearchBot
|
||||||
|
User-agent: Claude-User
|
||||||
|
User-agent: Claude-Web
|
||||||
|
User-agent: ClaudeBot
|
||||||
|
User-agent: cohere-ai
|
||||||
|
User-agent: cohere-training-data-crawler
|
||||||
|
User-agent: Cotoyogi
|
||||||
|
User-agent: Crawlspace
|
||||||
|
User-agent: Diffbot
|
||||||
|
User-agent: DuckAssistBot
|
||||||
|
User-agent: EchoboxBot
|
||||||
|
User-agent: FacebookBot
|
||||||
|
User-agent: Factset_spyderbot
|
||||||
|
User-agent: FirecrawlAgent
|
||||||
|
User-agent: FriendlyCrawler
|
||||||
|
User-agent: Google-CloudVertexBot
|
||||||
|
User-agent: Google-Extended
|
||||||
|
User-agent: GoogleOther
|
||||||
|
User-agent: GoogleOther-Image
|
||||||
|
User-agent: GoogleOther-Video
|
||||||
|
User-agent: GPTBot
|
||||||
|
User-agent: iaskspider/2.0
|
||||||
|
User-agent: ICC-Crawler
|
||||||
|
User-agent: ImagesiftBot
|
||||||
|
User-agent: img2dataset
|
||||||
|
User-agent: ISSCyberRiskCrawler
|
||||||
|
User-agent: Kangaroo Bot
|
||||||
|
User-agent: meta-externalagent
|
||||||
|
User-agent: Meta-ExternalAgent
|
||||||
|
User-agent: meta-externalfetcher
|
||||||
|
User-agent: Meta-ExternalFetcher
|
||||||
|
User-agent: MistralAI-User/1.0
|
||||||
|
User-agent: NovaAct
|
||||||
|
User-agent: OAI-SearchBot
|
||||||
|
User-agent: omgili
|
||||||
|
User-agent: omgilibot
|
||||||
|
User-agent: Operator
|
||||||
|
User-agent: PanguBot
|
||||||
|
User-agent: Panscient
|
||||||
|
User-agent: panscient.com
|
||||||
|
User-agent: Perplexity-User
|
||||||
|
User-agent: PerplexityBot
|
||||||
|
User-agent: PetalBot
|
||||||
|
User-agent: PhindBot
|
||||||
|
User-agent: QualifiedBot
|
||||||
|
User-agent: QuillBot
|
||||||
|
User-agent: quillbot.com
|
||||||
|
User-agent: SBIntuitionsBot
|
||||||
|
User-agent: Scrapy
|
||||||
|
User-agent: SemrushBot
|
||||||
|
User-agent: SemrushBot-BA
|
||||||
|
User-agent: SemrushBot-CT
|
||||||
|
User-agent: SemrushBot-OCOB
|
||||||
|
User-agent: SemrushBot-SI
|
||||||
|
User-agent: SemrushBot-SWA
|
||||||
|
User-agent: Sidetrade indexer bot
|
||||||
|
User-agent: TikTokSpider
|
||||||
|
User-agent: Timpibot
|
||||||
|
User-agent: VelenPublicWebCrawler
|
||||||
|
User-agent: Webzio-Extended
|
||||||
|
User-agent: wpbot
|
||||||
|
User-agent: YandexAdditional
|
||||||
|
User-agent: YandexAdditionalBot
|
||||||
|
User-agent: YouBot
|
||||||
|
Disallow: /
|
||||||
|
`;
|
||||||
|
export const ALL_BOTS = ["AI2Bot", "Ai2Bot-Dolma", "aiHitBot", "Amazonbot", "Andibot", "anthropic-ai", "Applebot", "Applebot-Extended", "bedrockbot", "Brightbot 1.0", "Bytespider", "CCBot", "ChatGPT-User", "Claude-SearchBot", "Claude-User", "Claude-Web", "ClaudeBot", "cohere-ai", "cohere-training-data-crawler", "Cotoyogi", "Crawlspace", "Diffbot", "DuckAssistBot", "EchoboxBot", "FacebookBot", "Factset_spyderbot", "FirecrawlAgent", "FriendlyCrawler", "Google-CloudVertexBot", "Google-Extended", "GoogleOther", "GoogleOther-Image", "GoogleOther-Video", "GPTBot", "iaskspider/2.0", "ICC-Crawler", "ImagesiftBot", "img2dataset", "ISSCyberRiskCrawler", "Kangaroo Bot", "meta-externalagent", "Meta-ExternalAgent", "meta-externalfetcher", "Meta-ExternalFetcher", "MistralAI-User/1.0", "NovaAct", "OAI-SearchBot", "omgili", "omgilibot", "Operator", "PanguBot", "Panscient", "panscient.com", "Perplexity-User", "PerplexityBot", "PetalBot", "PhindBot", "QualifiedBot", "QuillBot", "quillbot.com", "SBIntuitionsBot", "Scrapy", "SemrushBot", "SemrushBot-BA", "SemrushBot-CT", "SemrushBot-OCOB", "SemrushBot-SI", "SemrushBot-SWA", "Sidetrade indexer bot", "TikTokSpider", "Timpibot", "VelenPublicWebCrawler", "Webzio-Extended", "wpbot", "YandexAdditional", "YandexAdditionalBot", "YouBot"];
|
||||||
|
export const NON_RESPECTING_BOTS = ["Andibot", "anthropic-ai", "Applebot", "Brightbot 1.0", "Bytespider", "Claude-Web", "cohere-ai", "cohere-training-data-crawler", "Diffbot", "DuckAssistBot", "EchoboxBot", "Factset_spyderbot", "iaskspider/2.0", "img2dataset", "ISSCyberRiskCrawler", "Kangaroo Bot", "Meta-ExternalAgent", "meta-externalfetcher", "Meta-ExternalFetcher", "NovaAct", "Operator", "PanguBot", "Perplexity-User", "PhindBot", "QualifiedBot", "QuillBot", "quillbot.com", "Scrapy", "Sidetrade indexer bot", "TikTokSpider", "Timpibot", "Webzio-Extended", "wpbot"];
|
||||||
|
export const ALL_BOTS_REGEX = /(AI2BOT|AI2BOT-DOLMA|AIHITBOT|AMAZONBOT|ANDIBOT|ANTHROPIC-AI|APPLEBOT|APPLEBOT-EXTENDED|BEDROCKBOT|BRIGHTBOT 1.0|BYTESPIDER|CCBOT|CHATGPT-USER|CLAUDE-SEARCHBOT|CLAUDE-USER|CLAUDE-WEB|CLAUDEBOT|COHERE-AI|COHERE-TRAINING-DATA-CRAWLER|COTOYOGI|CRAWLSPACE|DIFFBOT|DUCKASSISTBOT|ECHOBOXBOT|FACEBOOKBOT|FACTSET_SPYDERBOT|FIRECRAWLAGENT|FRIENDLYCRAWLER|GOOGLE-CLOUDVERTEXBOT|GOOGLE-EXTENDED|GOOGLEOTHER|GOOGLEOTHER-IMAGE|GOOGLEOTHER-VIDEO|GPTBOT|IASKSPIDER\/2.0|ICC-CRAWLER|IMAGESIFTBOT|IMG2DATASET|ISSCYBERRISKCRAWLER|KANGAROO BOT|META-EXTERNALAGENT|META-EXTERNALAGENT|META-EXTERNALFETCHER|META-EXTERNALFETCHER|MISTRALAI-USER\/1.0|NOVAACT|OAI-SEARCHBOT|OMGILI|OMGILIBOT|OPERATOR|PANGUBOT|PANSCIENT|PANSCIENT.COM|PERPLEXITY-USER|PERPLEXITYBOT|PETALBOT|PHINDBOT|QUALIFIEDBOT|QUILLBOT|QUILLBOT.COM|SBINTUITIONSBOT|SCRAPY|SEMRUSHBOT|SEMRUSHBOT-BA|SEMRUSHBOT-CT|SEMRUSHBOT-OCOB|SEMRUSHBOT-SI|SEMRUSHBOT-SWA|SIDETRADE INDEXER BOT|TIKTOKSPIDER|TIMPIBOT|VELENPUBLICWEBCRAWLER|WEBZIO-EXTENDED|WPBOT|YANDEXADDITIONAL|YANDEXADDITIONALBOT|YOUBOT)/;
|
||||||
|
export const NON_RESPECTING_BOTS_REGEX = /(ANDIBOT|ANTHROPIC-AI|APPLEBOT|BRIGHTBOT 1.0|BYTESPIDER|CLAUDE-WEB|COHERE-AI|COHERE-TRAINING-DATA-CRAWLER|DIFFBOT|DUCKASSISTBOT|ECHOBOXBOT|FACTSET_SPYDERBOT|IASKSPIDER\/2.0|IMG2DATASET|ISSCYBERRISKCRAWLER|KANGAROO BOT|META-EXTERNALAGENT|META-EXTERNALFETCHER|META-EXTERNALFETCHER|NOVAACT|OPERATOR|PANGUBOT|PERPLEXITY-USER|PHINDBOT|QUALIFIEDBOT|QUILLBOT|QUILLBOT.COM|SCRAPY|SIDETRADE INDEXER BOT|TIKTOKSPIDER|TIMPIBOT|WEBZIO-EXTENDED|WPBOT)/;
|
|
@ -0,0 +1,323 @@
|
||||||
|
import { Hono } from 'hono'
|
||||||
|
import { aiBots, nonRespectingAiBots } from './ai-bots'
|
||||||
|
import { uaBlocker } from './index'
|
||||||
|
|
||||||
|
describe('uaBlocker middleware', () => {
|
||||||
|
const app = new Hono()
|
||||||
|
|
||||||
|
// Custom blocklist test
|
||||||
|
app.use(
|
||||||
|
'/custom/*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: ['BadBot', 'EvilCrawler', 'SpamBot'],
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/custom/test', (c) => c.text('custom'))
|
||||||
|
|
||||||
|
// AI bots blocklist test
|
||||||
|
app.use(
|
||||||
|
'/ai/*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: aiBots,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/ai/test', (c) => c.text('ai'))
|
||||||
|
|
||||||
|
// Non-respecting AI bots blocklist test
|
||||||
|
app.use(
|
||||||
|
'/non-respecting/*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: nonRespectingAiBots,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/non-respecting/test', (c) => c.text('non-respecting'))
|
||||||
|
|
||||||
|
// Empty blocklist test
|
||||||
|
app.use(
|
||||||
|
'/empty/*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: [],
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/empty/test', (c) => c.text('empty'))
|
||||||
|
|
||||||
|
// Default parameters test (empty blocklist)
|
||||||
|
app.use('/default/*', uaBlocker())
|
||||||
|
app.get('/default/test', (c) => c.text('default'))
|
||||||
|
|
||||||
|
// Custom RegExp test
|
||||||
|
app.use(
|
||||||
|
'/custom-regex/*',
|
||||||
|
uaBlocker({
|
||||||
|
blocklist: /BADREGEXBOT|EVILREGEXBOT/,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
app.get('/custom-regex/test', (c) => c.text('custom-regex'))
|
||||||
|
|
||||||
|
describe('Custom blocklist', () => {
|
||||||
|
it('Should block user agents in custom blocklist', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'BadBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should block user agents with case insensitive matching', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'badbot/2.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should block user agents with version info', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'EvilCrawler/3.0 (compatible; MSIE 6.0)',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow user agents not in blocklist', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'GoodBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('custom')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow regular browser user agents', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('custom')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('AI bots blocklist', () => {
|
||||||
|
it('Should block known AI bots from the list', async () => {
|
||||||
|
const res = await app.request('http://localhost/ai/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'GPTBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should block Bytespider (non-respecting bot)', async () => {
|
||||||
|
const res = await app.request('http://localhost/ai/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'Bytespider',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should block ClaudeBot', async () => {
|
||||||
|
const res = await app.request('http://localhost/ai/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'ClaudeBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow unknown bots not in the AI list', async () => {
|
||||||
|
const res = await app.request('http://localhost/ai/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'UnknownBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('ai')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Non-respecting AI bots blocklist', () => {
|
||||||
|
it('Should block non-respecting bots like Bytespider', async () => {
|
||||||
|
const res = await app.request('http://localhost/non-respecting/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'Bytespider',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow respecting bots like GPTBot', async () => {
|
||||||
|
const res = await app.request('http://localhost/non-respecting/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'GPTBot',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('non-respecting')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow ChatGPT-User (respecting bot)', async () => {
|
||||||
|
const res = await app.request('http://localhost/non-respecting/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'ChatGPT-User',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('non-respecting')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Edge cases', () => {
|
||||||
|
it('Should allow requests with no User-Agent header', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test')
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('custom')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow requests with empty User-Agent header', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': '',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('custom')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow all requests with empty blocklist', async () => {
|
||||||
|
const res = await app.request('http://localhost/empty/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'AnyBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('empty')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow all requests with explicit empty array blocklist', async () => {
|
||||||
|
const emptyBlocklist: string[] = []
|
||||||
|
const testApp = new Hono()
|
||||||
|
testApp.use(uaBlocker({ blocklist: emptyBlocklist }))
|
||||||
|
testApp.get('/', (c) => c.text('success'))
|
||||||
|
|
||||||
|
const res = await testApp.request('/', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'BadBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('success')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow all requests with default parameters (empty blocklist)', async () => {
|
||||||
|
const res = await app.request('http://localhost/default/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'AnyBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('default')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Empty blocklist behavior tests', () => {
|
||||||
|
it('Should handle empty blocklist without error', () => {
|
||||||
|
const testApp = new Hono()
|
||||||
|
const emptyMiddleware = uaBlocker({ blocklist: [] })
|
||||||
|
testApp.use(emptyMiddleware)
|
||||||
|
testApp.get('/', (c) => c.text('success'))
|
||||||
|
|
||||||
|
// Implementation should not error when creating middleware with empty blocklist
|
||||||
|
expect(emptyMiddleware).toBeDefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow all user agents when blocklist is empty', async () => {
|
||||||
|
const testApp = new Hono()
|
||||||
|
testApp.use(uaBlocker({ blocklist: [] }))
|
||||||
|
testApp.get('/', (c) => c.text('success'))
|
||||||
|
|
||||||
|
const testBots = ['TestBot', 'CrawlerBot', 'BadBot']
|
||||||
|
|
||||||
|
for (const bot of testBots) {
|
||||||
|
const res = await testApp.request('/', {
|
||||||
|
headers: { 'User-Agent': bot },
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('success')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Custom RegExp blocklist', () => {
|
||||||
|
it('Should block user agents matching the RegExp pattern', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom-regex/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'BadRegexBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should block user agents with case insensitive matching', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom-regex/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'badregexbot/2.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(403)
|
||||||
|
expect(await res.text()).toBe('Forbidden')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('Should allow user agents not matching the RegExp pattern', async () => {
|
||||||
|
const res = await app.request('http://localhost/custom-regex/test', {
|
||||||
|
headers: {
|
||||||
|
'User-Agent': 'GoodBot/1.0',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
expect(res.status).toBe(200)
|
||||||
|
expect(await res.text()).toBe('custom-regex')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Multiple bots validation', () => {
|
||||||
|
const testCases = [
|
||||||
|
{ agent: 'AI2Bot', isNonRespecting: false },
|
||||||
|
{ agent: 'Bytespider', isNonRespecting: true },
|
||||||
|
{ agent: 'ChatGPT-User', isNonRespecting: false },
|
||||||
|
{ agent: 'ClaudeBot', isNonRespecting: false },
|
||||||
|
{ agent: 'GPTBot', isNonRespecting: false },
|
||||||
|
{ agent: 'CCBot', isNonRespecting: false },
|
||||||
|
{ agent: 'iaskspider/2.0', isNonRespecting: true },
|
||||||
|
]
|
||||||
|
|
||||||
|
testCases.forEach(({ agent, isNonRespecting }) => {
|
||||||
|
it(`Should handle ${agent} correctly (isNonRespecting: ${isNonRespecting})`, async () => {
|
||||||
|
// Test with all AI bots
|
||||||
|
const allRes = await app.request('http://localhost/ai/test', {
|
||||||
|
headers: { 'User-Agent': agent },
|
||||||
|
})
|
||||||
|
expect(allRes.status).toBe(403)
|
||||||
|
|
||||||
|
// Test with non-respecting bots only
|
||||||
|
const nonRespectingRes = await app.request('http://localhost/non-respecting/test', {
|
||||||
|
headers: { 'User-Agent': agent },
|
||||||
|
})
|
||||||
|
expect(nonRespectingRes.status).toBe(isNonRespecting ? 403 : 200)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,49 @@
|
||||||
|
import { createMiddleware } from 'hono/factory'
|
||||||
|
import { escape } from './escape'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts a list of strings into a regular expression group.
|
||||||
|
* Each string in the list is escaped using `RegExp.escape()` or polyfill
|
||||||
|
* and then joined by a '|' (OR) operator. The entire result is wrapped in
|
||||||
|
* parentheses to form a capturing group.
|
||||||
|
*
|
||||||
|
* @param list An array of strings to include in the regex.
|
||||||
|
* @returns A RegExp matching any of the strings in the capture group.
|
||||||
|
*/
|
||||||
|
function listToRegex(list: string[]): RegExp | undefined {
|
||||||
|
let regex
|
||||||
|
|
||||||
|
if (list.length > 0) {
|
||||||
|
const formatted = list.map((item) => escape(item.toUpperCase())).join('|')
|
||||||
|
regex = new RegExp(`(${formatted})`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return regex
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param params - `blocklist`: An array of user-agents to block, or a RegExp to match against. NOTE: If passing a RegExp, it should match on UPPERCASE User Agents.
|
||||||
|
* @returns the Hono middleware to block requests based on User-Agent header.
|
||||||
|
*/
|
||||||
|
export function uaBlocker(params = { blocklist: [] as string[] | RegExp }) {
|
||||||
|
const regex = Array.isArray(params.blocklist) ? listToRegex(params.blocklist) : params.blocklist
|
||||||
|
|
||||||
|
return createMiddleware(async (c, next) => {
|
||||||
|
const userAgent = c.req.header('User-Agent')?.toUpperCase()
|
||||||
|
|
||||||
|
if (userAgent && regex && userAgent.match(regex)) {
|
||||||
|
return c.text('Forbidden', 403)
|
||||||
|
}
|
||||||
|
|
||||||
|
await next()
|
||||||
|
return
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
export default uaBlocker
|
||||||
|
|
||||||
|
// Export for testing purposes
|
||||||
|
export const __test = {
|
||||||
|
listToRegex,
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"extends": "../../tsconfig.base.json",
|
||||||
|
"compilerOptions": {
|
||||||
|
"rootDir": "src",
|
||||||
|
"outDir": "dist",
|
||||||
|
"tsBuildInfoFile": "dist/tsconfig.build.tsbuildinfo",
|
||||||
|
"emitDeclarationOnly": false
|
||||||
|
},
|
||||||
|
"include": ["src/**/*.ts", "src/**/*.json"],
|
||||||
|
"exclude": ["**/*.test.ts"],
|
||||||
|
"references": []
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
{
|
||||||
|
"extends": "../../tsconfig.base.json",
|
||||||
|
"files": [],
|
||||||
|
"include": [],
|
||||||
|
"references": [
|
||||||
|
{
|
||||||
|
"path": "./tsconfig.build.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "./tsconfig.script.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "./tsconfig.spec.json"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
{
|
||||||
|
"extends": "../../tsconfig.base.json",
|
||||||
|
"compilerOptions": {
|
||||||
|
"outDir": "dist",
|
||||||
|
"tsBuildInfoFile": "dist/tsconfig.script.tsbuildinfo",
|
||||||
|
"allowImportingTsExtensions": true,
|
||||||
|
"noEmit": true,
|
||||||
|
"types": ["node"]
|
||||||
|
},
|
||||||
|
"include": ["**/*.ts", "src/data/*.json"],
|
||||||
|
"exclude": ["**/*.test.ts"],
|
||||||
|
"references": []
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
{
|
||||||
|
"extends": "../../tsconfig.base.json",
|
||||||
|
"compilerOptions": {
|
||||||
|
"outDir": "./dist/out-tsc/packages/ua-blocker",
|
||||||
|
"types": ["vitest/globals"]
|
||||||
|
},
|
||||||
|
"include": ["**/*.test.ts", "vitest.config.ts"],
|
||||||
|
"references": [
|
||||||
|
{
|
||||||
|
"path": "./tsconfig.build.json"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
import { defineProject } from 'vitest/config'
|
||||||
|
|
||||||
|
export default defineProject({
|
||||||
|
test: {
|
||||||
|
globals: true,
|
||||||
|
},
|
||||||
|
})
|
|
@ -38,6 +38,7 @@
|
||||||
{ "path": "packages/tsyringe" },
|
{ "path": "packages/tsyringe" },
|
||||||
{ "path": "packages/typebox-validator" },
|
{ "path": "packages/typebox-validator" },
|
||||||
{ "path": "packages/typia-validator" },
|
{ "path": "packages/typia-validator" },
|
||||||
|
{ "path": "packages/ua-blocker" },
|
||||||
{ "path": "packages/valibot-validator" },
|
{ "path": "packages/valibot-validator" },
|
||||||
{ "path": "packages/zod-openapi" },
|
{ "path": "packages/zod-openapi" },
|
||||||
{ "path": "packages/zod-validator" },
|
{ "path": "packages/zod-validator" },
|
||||||
|
|
24
yarn.lock
24
yarn.lock
|
@ -2374,6 +2374,21 @@ __metadata:
|
||||||
languageName: unknown
|
languageName: unknown
|
||||||
linkType: soft
|
linkType: soft
|
||||||
|
|
||||||
|
"@hono/ua-blocker@workspace:packages/ua-blocker":
|
||||||
|
version: 0.0.0-use.local
|
||||||
|
resolution: "@hono/ua-blocker@workspace:packages/ua-blocker"
|
||||||
|
dependencies:
|
||||||
|
"@arethetypeswrong/cli": "npm:^0.17.4"
|
||||||
|
"@types/node": "npm:^22.15.24"
|
||||||
|
publint: "npm:^0.3.9"
|
||||||
|
tsup: "npm:^8.4.0"
|
||||||
|
typescript: "npm:^5.8.2"
|
||||||
|
vitest: "npm:^3.0.8"
|
||||||
|
peerDependencies:
|
||||||
|
hono: "*"
|
||||||
|
languageName: unknown
|
||||||
|
linkType: soft
|
||||||
|
|
||||||
"@hono/valibot-validator@workspace:packages/valibot-validator":
|
"@hono/valibot-validator@workspace:packages/valibot-validator":
|
||||||
version: 0.0.0-use.local
|
version: 0.0.0-use.local
|
||||||
resolution: "@hono/valibot-validator@workspace:packages/valibot-validator"
|
resolution: "@hono/valibot-validator@workspace:packages/valibot-validator"
|
||||||
|
@ -3967,6 +3982,15 @@ __metadata:
|
||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
|
"@types/node@npm:^22.15.24":
|
||||||
|
version: 22.15.24
|
||||||
|
resolution: "@types/node@npm:22.15.24"
|
||||||
|
dependencies:
|
||||||
|
undici-types: "npm:~6.21.0"
|
||||||
|
checksum: 16c38e98168fa6c3d2f2b6e95f14f80878d969b39093bc5384385a884d73a7fe361c563b36f14bc27536b337f5baad74321f717b31d2c061b9c48074567eb8c6
|
||||||
|
languageName: node
|
||||||
|
linkType: hard
|
||||||
|
|
||||||
"@types/prop-types@npm:*":
|
"@types/prop-types@npm:*":
|
||||||
version: 15.7.14
|
version: 15.7.14
|
||||||
resolution: "@types/prop-types@npm:15.7.14"
|
resolution: "@types/prop-types@npm:15.7.14"
|
||||||
|
|
Loading…
Reference in New Issue