[Python-checkins] cpython (merge 3.4 -> default): merge
raymond.hettinger
python-checkins at python.org
Tue May 13 06:57:28 CEST 2014
http://hg.python.org/cpython/rev/ce6187f97a51
changeset: 90679:ce6187f97a51
parent: 90677:7e1b3f804279
parent: 90678:4ea86cd87f95
user: Raymond Hettinger <python at rcn.com>
date: Mon May 12 21:57:19 2014 -0700
summary:
merge
files:
Lib/urllib/robotparser.py | 11 +++++++++--
1 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/Lib/urllib/robotparser.py b/Lib/urllib/robotparser.py
--- a/Lib/urllib/robotparser.py
+++ b/Lib/urllib/robotparser.py
@@ -7,7 +7,7 @@
2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
- http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
+ http://www.robotstxt.org/norobots-rfc.txt
"""
import urllib.parse, urllib.request
@@ -57,7 +57,7 @@
except urllib.error.HTTPError as err:
if err.code in (401, 403):
self.disallow_all = True
- elif err.code >= 400:
+ elif err.code >= 400 and err.code < 500:
self.allow_all = True
else:
raw = f.read()
@@ -85,6 +85,7 @@
state = 0
entry = Entry()
+ self.modified()
for line in lines:
if not line:
if state == 1:
@@ -129,6 +130,12 @@
return False
if self.allow_all:
return True
+ # Until the robots.txt file has been read or found not
+ # to exist, we must assume that no url is allowable.
+ # This prevents false positives when a user erronenously
+ # calls can_fetch() before calling read().
+ if not self.last_checked:
+ return False
# search for given user agent matches
# the first match counts
parsed_url = urllib.parse.urlparse(urllib.parse.unquote(url))
--
Repository URL: http://hg.python.org/cpython
More information about the Python-checkins
mailing list