mirror of
https://github.com/neocities/neocities.git
synced 2025-04-24 17:22:35 +02:00
Add robots.txt for new sites, with instructions for blocking AI crawlers
This commit is contained in:
parent
d63467c4ca
commit
db35971217
4 changed files with 56 additions and 2 deletions
|
@ -494,6 +494,11 @@ class Site < Sequel::Model
|
|||
FileUtils.cp template_file_path('neocities.png'), tmpfile.path
|
||||
files << {filename: 'neocities.png', tempfile: tmpfile}
|
||||
|
||||
tmpfile = Tempfile.new 'robots.txt'
|
||||
tmpfile.close
|
||||
FileUtils.cp template_file_path('robots.txt'), tmpfile.path
|
||||
files << {filename: 'robots.txt', tempfile: tmpfile}
|
||||
|
||||
store_files files, new_install: true
|
||||
end
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ describe 'signup' do
|
|||
_(File.exist?(index_file_path)).must_equal true
|
||||
|
||||
site = Site[username: @site[:username]]
|
||||
_(site.site_files.length).must_equal 4
|
||||
_(site.site_files.length).must_equal 5
|
||||
_(site.site_changed).must_equal false
|
||||
_(site.site_updated_at).must_be_nil
|
||||
_(site.is_education).must_equal true
|
||||
|
|
|
@ -55,7 +55,7 @@ describe 'signup' do
|
|||
_(File.exist?(index_file_path)).must_equal true
|
||||
|
||||
site = Site[username: @site[:username]]
|
||||
_(site.site_files.length).must_equal 4
|
||||
_(site.site_files.length).must_equal 5
|
||||
_(site.site_changed).must_equal false
|
||||
_(site.site_updated_at).must_be_nil
|
||||
_(site.is_education).must_equal false
|
||||
|
|
49
views/templates/robots.txt
Normal file
49
views/templates/robots.txt
Normal file
|
@ -0,0 +1,49 @@
|
|||
# This file tells search engines and bots what they are allowed to see on your site.
|
||||
|
||||
# This is the default rule, which allows search engines to crawl your site (recommended).
|
||||
User-agent: *
|
||||
Allow: /
|
||||
|
||||
# If you do not want AI bots to crawl your site, remove the # from the following lines:
|
||||
#User-agent: AI2Bot
|
||||
#User-agent: Ai2Bot-Dolma
|
||||
#User-agent: Amazonbot
|
||||
#User-agent: anthropic-ai
|
||||
#User-agent: Applebot
|
||||
#User-agent: Applebot-Extended
|
||||
#User-agent: Bytespider
|
||||
#User-agent: CCBot
|
||||
#User-agent: ChatGPT-User
|
||||
#User-agent: Claude-Web
|
||||
#User-agent: ClaudeBot
|
||||
#User-agent: cohere-ai
|
||||
#User-agent: Diffbot
|
||||
#User-agent: DuckAssistBot
|
||||
#User-agent: FacebookBot
|
||||
#User-agent: FriendlyCrawler
|
||||
#User-agent: Google-Extended
|
||||
#User-agent: GoogleOther
|
||||
#User-agent: GoogleOther-Image
|
||||
#User-agent: GoogleOther-Video
|
||||
#User-agent: GPTBot
|
||||
#User-agent: iaskspider/2.0
|
||||
#User-agent: ICC-Crawler
|
||||
#User-agent: ImagesiftBot
|
||||
#User-agent: img2dataset
|
||||
#User-agent: ISSCyberRiskCrawler
|
||||
#User-agent: Kangaroo Bot
|
||||
#User-agent: Meta-ExternalAgent
|
||||
#User-agent: Meta-ExternalFetcher
|
||||
#User-agent: OAI-SearchBot
|
||||
#User-agent: omgili
|
||||
#User-agent: omgilibot
|
||||
#User-agent: PanguBot
|
||||
#User-agent: PerplexityBot
|
||||
#User-agent: PetalBot
|
||||
#User-agent: Scrapy
|
||||
#User-agent: Sidetrade indexer bot
|
||||
#User-agent: Timpibot
|
||||
#User-agent: VelenPublicWebCrawler
|
||||
#User-agent: Webzio-Extended
|
||||
#User-agent: YouBot
|
||||
#Disallow: /
|
Loading…
Add table
Reference in a new issue