/[gentoo-x86]/dev-python/beautifulsoup/files/beautifulsoup-3.1.0.1-python-3.patch
Gentoo

Contents of /dev-python/beautifulsoup/files/beautifulsoup-3.1.0.1-python-3.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.3 - (show annotations) (download) (as text)
Wed May 22 20:41:46 2013 UTC (7 years, 2 months ago) by idella4
Branch: MAIN
CVS Tags: HEAD
Changes since 1.2: +0 -0 lines
File MIME type: text/x-diff
Revert of -3.1.0.1-r1 ebuild with patch, added ~amd-fbsd

(Portage version: 2.1.11.62/cvs/Linux x86_64, signed Manifest commit with key 0xB8072B0D)

1 --- BeautifulSoup.py
2 +++ BeautifulSoup.py
3 @@ -76,7 +76,7 @@
4 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
5
6 """
7 -from __future__ import generators
8 +
9
10 __author__ = "Leonard Richardson (leonardr@segfault.org)"
11 __version__ = "3.1.0.1"
12 @@ -84,12 +84,12 @@
13 __license__ = "New-style BSD"
14
15 import codecs
16 -import markupbase
17 +import _markupbase
18 import types
19 import re
20 -from HTMLParser import HTMLParser, HTMLParseError
21 +from html.parser import HTMLParser, HTMLParseError
22 try:
23 - from htmlentitydefs import name2codepoint
24 + from html.entities import name2codepoint
25 except ImportError:
26 name2codepoint = {}
27 try:
28 @@ -98,18 +98,18 @@
29 from sets import Set as set
30
31 #These hacks make Beautiful Soup able to parse XML with namespaces
32 -markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
33 +_markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
34
35 DEFAULT_OUTPUT_ENCODING = "utf-8"
36
37 # First, the classes that represent markup elements.
38
39 -def sob(unicode, encoding):
40 +def sob(str, encoding):
41 """Returns either the given Unicode string or its encoding."""
42 if encoding is None:
43 - return unicode
44 + return str
45 else:
46 - return unicode.encode(encoding)
47 + return str.encode(encoding)
48
49 class PageElement:
50 """Contains the navigational information for some part of the page
51 @@ -178,8 +178,8 @@
52 return lastChild
53
54 def insert(self, position, newChild):
55 - if (isinstance(newChild, basestring)
56 - or isinstance(newChild, unicode)) \
57 + if (isinstance(newChild, str)
58 + or isinstance(newChild, str)) \
59 and not isinstance(newChild, NavigableString):
60 newChild = NavigableString(newChild)
61
62 @@ -334,7 +334,7 @@
63 g = generator()
64 while True:
65 try:
66 - i = g.next()
67 + i = g.__next__()
68 except StopIteration:
69 break
70 if i:
71 @@ -385,22 +385,22 @@
72 def toEncoding(self, s, encoding=None):
73 """Encodes an object to a string in some encoding, or to Unicode.
74 ."""
75 - if isinstance(s, unicode):
76 + if isinstance(s, str):
77 if encoding:
78 s = s.encode(encoding)
79 elif isinstance(s, str):
80 if encoding:
81 s = s.encode(encoding)
82 else:
83 - s = unicode(s)
84 + s = str(s)
85 else:
86 if encoding:
87 s = self.toEncoding(str(s), encoding)
88 else:
89 - s = unicode(s)
90 + s = str(s)
91 return s
92
93 -class NavigableString(unicode, PageElement):
94 +class NavigableString(str, PageElement):
95
96 def __new__(cls, value):
97 """Create a new NavigableString.
98 @@ -410,12 +410,12 @@
99 passed in to the superclass's __new__ or the superclass won't know
100 how to handle non-ASCII characters.
101 """
102 - if isinstance(value, unicode):
103 - return unicode.__new__(cls, value)
104 - return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
105 + if isinstance(value, str):
106 + return str.__new__(cls, value)
107 + return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
108
109 def __getnewargs__(self):
110 - return (unicode(self),)
111 + return (str(self),)
112
113 def __getattr__(self, attr):
114 """text.string gives you text. This is for backwards
115 @@ -424,7 +424,7 @@
116 if attr == 'string':
117 return self
118 else:
119 - raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
120 + raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr))
121
122 def encode(self, encoding=DEFAULT_OUTPUT_ENCODING):
123 return self.decode().encode(encoding)
124 @@ -435,23 +435,23 @@
125 class CData(NavigableString):
126
127 def decodeGivenEventualEncoding(self, eventualEncoding):
128 - return u'<![CDATA[' + self + u']]>'
129 + return '<![CDATA[' + self + ']]>'
130
131 class ProcessingInstruction(NavigableString):
132
133 def decodeGivenEventualEncoding(self, eventualEncoding):
134 output = self
135 - if u'%SOUP-ENCODING%' in output:
136 + if '%SOUP-ENCODING%' in output:
137 output = self.substituteEncoding(output, eventualEncoding)
138 - return u'<?' + output + u'?>'
139 + return '<?' + output + '?>'
140
141 class Comment(NavigableString):
142 def decodeGivenEventualEncoding(self, eventualEncoding):
143 - return u'<!--' + self + u'-->'
144 + return '<!--' + self + '-->'
145
146 class Declaration(NavigableString):
147 def decodeGivenEventualEncoding(self, eventualEncoding):
148 - return u'<!' + self + u'>'
149 + return '<!' + self + '>'
150
151 class Tag(PageElement):
152
153 @@ -460,7 +460,7 @@
154 def _invert(h):
155 "Cheap function to invert a hash."
156 i = {}
157 - for k,v in h.items():
158 + for k,v in list(h.items()):
159 i[v] = k
160 return i
161
162 @@ -479,23 +479,23 @@
163 escaped."""
164 x = match.group(1)
165 if self.convertHTMLEntities and x in name2codepoint:
166 - return unichr(name2codepoint[x])
167 + return chr(name2codepoint[x])
168 elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
169 if self.convertXMLEntities:
170 return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
171 else:
172 - return u'&%s;' % x
173 + return '&%s;' % x
174 elif len(x) > 0 and x[0] == '#':
175 # Handle numeric entities
176 if len(x) > 1 and x[1] == 'x':
177 - return unichr(int(x[2:], 16))
178 + return chr(int(x[2:], 16))
179 else:
180 - return unichr(int(x[1:]))
181 + return chr(int(x[1:]))
182
183 elif self.escapeUnrecognizedEntities:
184 - return u'&amp;%s;' % x
185 + return '&amp;%s;' % x
186 else:
187 - return u'&%s;' % x
188 + return '&%s;' % x
189
190 def __init__(self, parser, name, attrs=None, parent=None,
191 previous=None):
192 @@ -524,7 +524,7 @@
193 return kval
194 return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
195 self._convertEntities, val))
196 - self.attrs = map(convert, self.attrs)
197 + self.attrs = list(map(convert, self.attrs))
198
199 def get(self, key, default=None):
200 """Returns the value of the 'key' attribute for the tag, or
201 @@ -533,7 +533,7 @@
202 return self._getAttrMap().get(key, default)
203
204 def has_key(self, key):
205 - return self._getAttrMap().has_key(key)
206 + return key in self._getAttrMap()
207
208 def __getitem__(self, key):
209 """tag[key] returns the value of the 'key' attribute for the tag,
210 @@ -551,7 +551,7 @@
211 def __contains__(self, x):
212 return x in self.contents
213
214 - def __nonzero__(self):
215 + def __bool__(self):
216 "A tag is non-None even if it has no contents."
217 return True
218
219 @@ -577,14 +577,14 @@
220 #We don't break because bad HTML can define the same
221 #attribute multiple times.
222 self._getAttrMap()
223 - if self.attrMap.has_key(key):
224 + if key in self.attrMap:
225 del self.attrMap[key]
226
227 def __call__(self, *args, **kwargs):
228 """Calling a tag like a function is the same as calling its
229 findAll() method. Eg. tag('a') returns a list of all the A tags
230 found within this tag."""
231 - return apply(self.findAll, args, kwargs)
232 + return self.findAll(*args, **kwargs)
233
234 def __getattr__(self, tag):
235 #print "Getattr %s.%s" % (self.__class__, tag)
236 @@ -592,7 +592,7 @@
237 return self.find(tag[:-3])
238 elif tag.find('__') != 0:
239 return self.find(tag)
240 - raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
241 + raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__, tag))
242
243 def __eq__(self, other):
244 """Returns true iff this tag has the same name, the same attributes,
245 @@ -868,7 +868,7 @@
246 if isinstance(markupName, Tag):
247 markup = markupName
248 markupAttrs = markup
249 - callFunctionWithTagData = callable(self.name) \
250 + callFunctionWithTagData = hasattr(self.name, '__call__') \
251 and not isinstance(markupName, Tag)
252
253 if (not self.name) \
254 @@ -880,7 +880,7 @@
255 else:
256 match = True
257 markupAttrMap = None
258 - for attr, matchAgainst in self.attrs.items():
259 + for attr, matchAgainst in list(self.attrs.items()):
260 if not markupAttrMap:
261 if hasattr(markupAttrs, 'get'):
262 markupAttrMap = markupAttrs
263 @@ -921,16 +921,16 @@
264 if self._matches(markup, self.text):
265 found = markup
266 else:
267 - raise Exception, "I don't know how to match against a %s" \
268 - % markup.__class__
269 + raise Exception("I don't know how to match against a %s" \
270 + % markup.__class__)
271 return found
272
273 def _matches(self, markup, matchAgainst):
274 #print "Matching %s against %s" % (markup, matchAgainst)
275 result = False
276 - if matchAgainst == True and type(matchAgainst) == types.BooleanType:
277 + if matchAgainst == True and type(matchAgainst) == bool:
278 result = markup != None
279 - elif callable(matchAgainst):
280 + elif hasattr(matchAgainst, '__call__'):
281 result = matchAgainst(markup)
282 else:
283 #Custom match methods take the tag as an argument, but all
284 @@ -938,7 +938,7 @@
285 if isinstance(markup, Tag):
286 markup = markup.name
287 if markup is not None and not isString(markup):
288 - markup = unicode(markup)
289 + markup = str(markup)
290 #Now we know that chunk is either a string, or None.
291 if hasattr(matchAgainst, 'match'):
292 # It's a regexp object.
293 @@ -947,10 +947,10 @@
294 and (markup is not None or not isString(matchAgainst))):
295 result = markup in matchAgainst
296 elif hasattr(matchAgainst, 'items'):
297 - result = markup.has_key(matchAgainst)
298 + result = matchAgainst in markup
299 elif matchAgainst and isString(markup):
300 - if isinstance(markup, unicode):
301 - matchAgainst = unicode(matchAgainst)
302 + if isinstance(markup, str):
303 + matchAgainst = str(matchAgainst)
304 else:
305 matchAgainst = str(matchAgainst)
306
307 @@ -971,13 +971,13 @@
308 """Convenience method that works with all 2.x versions of Python
309 to determine whether or not something is listlike."""
310 return ((hasattr(l, '__iter__') and not isString(l))
311 - or (type(l) in (types.ListType, types.TupleType)))
312 + or (type(l) in (list, tuple)))
313
314 def isString(s):
315 """Convenience method that works with all 2.x versions of Python
316 to determine whether or not something is stringlike."""
317 try:
318 - return isinstance(s, unicode) or isinstance(s, basestring)
319 + return isinstance(s, str) or isinstance(s, str)
320 except NameError:
321 return isinstance(s, str)
322
323 @@ -989,7 +989,7 @@
324 for portion in args:
325 if hasattr(portion, 'items'):
326 #It's a map. Merge it.
327 - for k,v in portion.items():
328 + for k,v in list(portion.items()):
329 built[k] = v
330 elif isList(portion) and not isString(portion):
331 #It's a list. Map each item to the default.
332 @@ -1034,7 +1034,7 @@
333 object, possibly one with a %SOUP-ENCODING% slot into which an
334 encoding will be plugged later."""
335 if text[:3] == "xml":
336 - text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
337 + text = "xml version='1.0' encoding='%SOUP-ENCODING%'"
338 self._toStringSubclass(text, ProcessingInstruction)
339
340 def handle_comment(self, text):
341 @@ -1044,7 +1044,7 @@
342 def handle_charref(self, ref):
343 "Handle character references as data."
344 if self.soup.convertEntities:
345 - data = unichr(int(ref))
346 + data = chr(int(ref))
347 else:
348 data = '&#%s;' % ref
349 self.handle_data(data)
350 @@ -1056,7 +1056,7 @@
351 data = None
352 if self.soup.convertHTMLEntities:
353 try:
354 - data = unichr(name2codepoint[ref])
355 + data = chr(name2codepoint[ref])
356 except KeyError:
357 pass
358
359 @@ -1147,7 +1147,7 @@
360 lambda x: '<!' + x.group(1) + '>')
361 ]
362
363 - ROOT_TAG_NAME = u'[document]'
364 + ROOT_TAG_NAME = '[document]'
365
366 HTML_ENTITIES = "html"
367 XML_ENTITIES = "xml"
368 @@ -1236,14 +1236,14 @@
369 def _feed(self, inDocumentEncoding=None, isHTML=False):
370 # Convert the document to Unicode.
371 markup = self.markup
372 - if isinstance(markup, unicode):
373 + if isinstance(markup, str):
374 if not hasattr(self, 'originalEncoding'):
375 self.originalEncoding = None
376 else:
377 dammit = UnicodeDammit\
378 (markup, [self.fromEncoding, inDocumentEncoding],
379 smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
380 - markup = dammit.unicode
381 + markup = dammit.str
382 self.originalEncoding = dammit.originalEncoding
383 self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
384 if markup:
385 @@ -1269,8 +1269,8 @@
386 def isSelfClosingTag(self, name):
387 """Returns true iff the given string is the name of a
388 self-closing tag according to this parser."""
389 - return self.SELF_CLOSING_TAGS.has_key(name) \
390 - or self.instanceSelfClosingTags.has_key(name)
391 + return name in self.SELF_CLOSING_TAGS \
392 + or name in self.instanceSelfClosingTags
393
394 def reset(self):
395 Tag.__init__(self, self, self.ROOT_TAG_NAME)
396 @@ -1305,7 +1305,7 @@
397
398 def endData(self, containerClass=NavigableString):
399 if self.currentData:
400 - currentData = u''.join(self.currentData)
401 + currentData = ''.join(self.currentData)
402 if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
403 not set([tag.name for tag in self.tagStack]).intersection(
404 self.PRESERVE_WHITESPACE_TAGS)):
405 @@ -1368,7 +1368,7 @@
406
407 nestingResetTriggers = self.NESTABLE_TAGS.get(name)
408 isNestable = nestingResetTriggers != None
409 - isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
410 + isResetNesting = name in self.RESET_NESTING_TAGS
411 popTo = None
412 inclusive = True
413 for i in range(len(self.tagStack)-1, 0, -1):
414 @@ -1381,7 +1381,7 @@
415 if (nestingResetTriggers != None
416 and p.name in nestingResetTriggers) \
417 or (nestingResetTriggers == None and isResetNesting
418 - and self.RESET_NESTING_TAGS.has_key(p.name)):
419 + and p.name in self.RESET_NESTING_TAGS):
420
421 #If we encounter one of the nesting reset triggers
422 #peculiar to this tag, or we encounter another tag
423 @@ -1399,7 +1399,7 @@
424 if self.quoteStack:
425 #This is not a real tag.
426 #print "<%s> is not real!" % name
427 - attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
428 + attrs = ''.join([' %s="%s"' % (x_y[0], x_y[1]) for x_y in attrs])
429 self.handle_data('<%s%s>' % (name, attrs))
430 return
431 self.endData()
432 @@ -1493,7 +1493,7 @@
433 BeautifulStoneSoup before writing your own subclass."""
434
435 def __init__(self, *args, **kwargs):
436 - if not kwargs.has_key('smartQuotesTo'):
437 + if 'smartQuotesTo' not in kwargs:
438 kwargs['smartQuotesTo'] = self.HTML_ENTITIES
439 kwargs['isHTML'] = True
440 BeautifulStoneSoup.__init__(self, *args, **kwargs)
441 @@ -1677,7 +1677,7 @@
442 parent._getAttrMap()
443 if (isinstance(tag, Tag) and len(tag.contents) == 1 and
444 isinstance(tag.contents[0], NavigableString) and
445 - not parent.attrMap.has_key(tag.name)):
446 + tag.name not in parent.attrMap):
447 parent[tag.name] = tag.contents[0]
448 BeautifulStoneSoup.popTag(self)
449
450 @@ -1751,9 +1751,9 @@
451 self._detectEncoding(markup, isHTML)
452 self.smartQuotesTo = smartQuotesTo
453 self.triedEncodings = []
454 - if markup == '' or isinstance(markup, unicode):
455 + if markup == '' or isinstance(markup, str):
456 self.originalEncoding = None
457 - self.unicode = unicode(markup)
458 + self.str = str(markup)
459 return
460
461 u = None
462 @@ -1766,7 +1766,7 @@
463 if u: break
464
465 # If no luck and we have auto-detection library, try that:
466 - if not u and chardet and not isinstance(self.markup, unicode):
467 + if not u and chardet and not isinstance(self.markup, str):
468 u = self._convertFrom(chardet.detect(self.markup)['encoding'])
469
470 # As a last resort, try utf-8 and windows-1252:
471 @@ -1775,7 +1775,7 @@
472 u = self._convertFrom(proposed_encoding)
473 if u: break
474
475 - self.unicode = u
476 + self.str = u
477 if not u: self.originalEncoding = None
478
479 def _subMSChar(self, match):
480 @@ -1783,7 +1783,7 @@
481 entity."""
482 orig = match.group(1)
483 sub = self.MS_CHARS.get(orig)
484 - if type(sub) == types.TupleType:
485 + if type(sub) == tuple:
486 if self.smartQuotesTo == 'xml':
487 sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
488 else:
489 @@ -1804,7 +1804,7 @@
490 if self.smartQuotesTo and proposed.lower() in("windows-1252",
491 "iso-8859-1",
492 "iso-8859-2"):
493 - smart_quotes_re = "([\x80-\x9f])"
494 + smart_quotes_re = b"([\x80-\x9f])"
495 smart_quotes_compiled = re.compile(smart_quotes_re)
496 markup = smart_quotes_compiled.sub(self._subMSChar, markup)
497
498 @@ -1813,7 +1813,7 @@
499 u = self._toUnicode(markup, proposed)
500 self.markup = u
501 self.originalEncoding = proposed
502 - except Exception, e:
503 + except Exception as e:
504 # print "That didn't work!"
505 # print e
506 return None
507 @@ -1842,7 +1842,7 @@
508 elif data[:4] == '\xff\xfe\x00\x00':
509 encoding = 'utf-32le'
510 data = data[4:]
511 - newdata = unicode(data, encoding)
512 + newdata = str(data, encoding)
513 return newdata
514
515 def _detectEncoding(self, xml_data, isHTML=False):
516 @@ -1855,41 +1855,41 @@
517 elif xml_data[:4] == '\x00\x3c\x00\x3f':
518 # UTF-16BE
519 sniffed_xml_encoding = 'utf-16be'
520 - xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
521 + xml_data = str(xml_data, 'utf-16be').encode('utf-8')
522 elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
523 and (xml_data[2:4] != '\x00\x00'):
524 # UTF-16BE with BOM
525 sniffed_xml_encoding = 'utf-16be'
526 - xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
527 + xml_data = str(xml_data[2:], 'utf-16be').encode('utf-8')
528 elif xml_data[:4] == '\x3c\x00\x3f\x00':
529 # UTF-16LE
530 sniffed_xml_encoding = 'utf-16le'
531 - xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
532 + xml_data = str(xml_data, 'utf-16le').encode('utf-8')
533 elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
534 (xml_data[2:4] != '\x00\x00'):
535 # UTF-16LE with BOM
536 sniffed_xml_encoding = 'utf-16le'
537 - xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
538 + xml_data = str(xml_data[2:], 'utf-16le').encode('utf-8')
539 elif xml_data[:4] == '\x00\x00\x00\x3c':
540 # UTF-32BE
541 sniffed_xml_encoding = 'utf-32be'
542 - xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
543 + xml_data = str(xml_data, 'utf-32be').encode('utf-8')
544 elif xml_data[:4] == '\x3c\x00\x00\x00':
545 # UTF-32LE
546 sniffed_xml_encoding = 'utf-32le'
547 - xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
548 + xml_data = str(xml_data, 'utf-32le').encode('utf-8')
549 elif xml_data[:4] == '\x00\x00\xfe\xff':
550 # UTF-32BE with BOM
551 sniffed_xml_encoding = 'utf-32be'
552 - xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
553 + xml_data = str(xml_data[4:], 'utf-32be').encode('utf-8')
554 elif xml_data[:4] == '\xff\xfe\x00\x00':
555 # UTF-32LE with BOM
556 sniffed_xml_encoding = 'utf-32le'
557 - xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
558 + xml_data = str(xml_data[4:], 'utf-32le').encode('utf-8')
559 elif xml_data[:3] == '\xef\xbb\xbf':
560 # UTF-8 with BOM
561 sniffed_xml_encoding = 'utf-8'
562 - xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
563 + xml_data = str(xml_data[3:], 'utf-8').encode('utf-8')
564 else:
565 sniffed_xml_encoding = 'ascii'
566 pass
567 @@ -1954,41 +1954,41 @@
568 250,251,252,253,254,255)
569 import string
570 c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
571 - ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
572 + ''.join(map(chr, list(range(256)))), ''.join(map(chr, emap)))
573 return s.translate(c.EBCDIC_TO_ASCII_MAP)
574
575 - MS_CHARS = { '\x80' : ('euro', '20AC'),
576 - '\x81' : ' ',
577 - '\x82' : ('sbquo', '201A'),
578 - '\x83' : ('fnof', '192'),
579 - '\x84' : ('bdquo', '201E'),
580 - '\x85' : ('hellip', '2026'),
581 - '\x86' : ('dagger', '2020'),
582 - '\x87' : ('Dagger', '2021'),
583 - '\x88' : ('circ', '2C6'),
584 - '\x89' : ('permil', '2030'),
585 - '\x8A' : ('Scaron', '160'),
586 - '\x8B' : ('lsaquo', '2039'),
587 - '\x8C' : ('OElig', '152'),
588 - '\x8D' : '?',
589 - '\x8E' : ('#x17D', '17D'),
590 - '\x8F' : '?',
591 - '\x90' : '?',
592 - '\x91' : ('lsquo', '2018'),
593 - '\x92' : ('rsquo', '2019'),
594 - '\x93' : ('ldquo', '201C'),
595 - '\x94' : ('rdquo', '201D'),
596 - '\x95' : ('bull', '2022'),
597 - '\x96' : ('ndash', '2013'),
598 - '\x97' : ('mdash', '2014'),
599 - '\x98' : ('tilde', '2DC'),
600 - '\x99' : ('trade', '2122'),
601 - '\x9a' : ('scaron', '161'),
602 - '\x9b' : ('rsaquo', '203A'),
603 - '\x9c' : ('oelig', '153'),
604 - '\x9d' : '?',
605 - '\x9e' : ('#x17E', '17E'),
606 - '\x9f' : ('Yuml', ''),}
607 + MS_CHARS = { b'\x80' : ('euro', '20AC'),
608 + b'\x81' : ' ',
609 + b'\x82' : ('sbquo', '201A'),
610 + b'\x83' : ('fnof', '192'),
611 + b'\x84' : ('bdquo', '201E'),
612 + b'\x85' : ('hellip', '2026'),
613 + b'\x86' : ('dagger', '2020'),
614 + b'\x87' : ('Dagger', '2021'),
615 + b'\x88' : ('circ', '2C6'),
616 + b'\x89' : ('permil', '2030'),
617 + b'\x8A' : ('Scaron', '160'),
618 + b'\x8B' : ('lsaquo', '2039'),
619 + b'\x8C' : ('OElig', '152'),
620 + b'\x8D' : '?',
621 + b'\x8E' : ('#x17D', '17D'),
622 + b'\x8F' : '?',
623 + b'\x90' : '?',
624 + b'\x91' : ('lsquo', '2018'),
625 + b'\x92' : ('rsquo', '2019'),
626 + b'\x93' : ('ldquo', '201C'),
627 + b'\x94' : ('rdquo', '201D'),
628 + b'\x95' : ('bull', '2022'),
629 + b'\x96' : ('ndash', '2013'),
630 + b'\x97' : ('mdash', '2014'),
631 + b'\x98' : ('tilde', '2DC'),
632 + b'\x99' : ('trade', '2122'),
633 + b'\x9a' : ('scaron', '161'),
634 + b'\x9b' : ('rsaquo', '203A'),
635 + b'\x9c' : ('oelig', '153'),
636 + b'\x9d' : '?',
637 + b'\x9e' : ('#x17E', '17E'),
638 + b'\x9f' : ('Yuml', ''),}
639
640 #######################################################################
641
642 @@ -1997,4 +1997,4 @@
643 if __name__ == '__main__':
644 import sys
645 soup = BeautifulSoup(sys.stdin)
646 - print soup.prettify()
647 + print(soup.prettify())
648 --- BeautifulSoupTests.py
649 +++ BeautifulSoupTests.py
650 @@ -82,7 +82,7 @@
651 def testFindAllText(self):
652 soup = BeautifulSoup("<html>\xbb</html>")
653 self.assertEqual(soup.findAll(text=re.compile('.*')),
654 - [u'\xbb'])
655 + ['\xbb'])
656
657 def testFindAllByRE(self):
658 import re
659 @@ -215,7 +215,7 @@
660 soup = BeautifulSoup(self.x, parseOnlyThese=strainer)
661 self.assertEquals(len(soup), 10)
662
663 - strainer = SoupStrainer(text=lambda(x):x[8]=='3')
664 + strainer = SoupStrainer(text=lambda x:x[8]=='3')
665 soup = BeautifulSoup(self.x, parseOnlyThese=strainer)
666 self.assertEquals(len(soup), 3)
667
668 @@ -256,7 +256,7 @@
669 self.assertEqual(copied.decode(), self.soup.decode())
670
671 def testUnicodePickle(self):
672 - import cPickle as pickle
673 + import pickle as pickle
674 html = "<b>" + chr(0xc3) + "</b>"
675 soup = BeautifulSoup(html)
676 dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
677 @@ -586,23 +586,23 @@
678 self.assertEquals(soup.decode(), "<<sacr&eacute; bleu!>>")
679
680 soup = BeautifulStoneSoup(text, convertEntities=htmlEnt)
681 - self.assertEquals(soup.decode(), u"<<sacr\xe9 bleu!>>")
682 + self.assertEquals(soup.decode(), "<<sacr\xe9 bleu!>>")
683
684 # Make sure the "XML", "HTML", and "XHTML" settings work.
685 text = "&lt;&trade;&apos;"
686 soup = BeautifulStoneSoup(text, convertEntities=xmlEnt)
687 - self.assertEquals(soup.decode(), u"<&trade;'")
688 + self.assertEquals(soup.decode(), "<&trade;'")
689
690 soup = BeautifulStoneSoup(text, convertEntities=htmlEnt)
691 - self.assertEquals(soup.decode(), u"<\u2122&apos;")
692 + self.assertEquals(soup.decode(), "<\u2122&apos;")
693
694 soup = BeautifulStoneSoup(text, convertEntities=xhtmlEnt)
695 - self.assertEquals(soup.decode(), u"<\u2122'")
696 + self.assertEquals(soup.decode(), "<\u2122'")
697
698 def testNonBreakingSpaces(self):
699 soup = BeautifulSoup("<a>&nbsp;&nbsp;</a>",
700 convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
701 - self.assertEquals(soup.decode(), u"<a>\xa0\xa0</a>")
702 + self.assertEquals(soup.decode(), "<a>\xa0\xa0</a>")
703
704 def testWhitespaceInDeclaration(self):
705 self.assertSoupEquals('<! DOCTYPE>', '<!DOCTYPE>')
706 @@ -617,27 +617,27 @@
707 self.assertSoupEquals('<b>hello&nbsp;there</b>')
708
709 def testEntitiesInAttributeValues(self):
710 - self.assertSoupEquals('<x t="x&#241;">', '<x t="x\xc3\xb1"></x>',
711 + self.assertSoupEquals('<x t="x&#241;">', b'<x t="x\xc3\xb1"></x>',
712 encoding='utf-8')
713 - self.assertSoupEquals('<x t="x&#xf1;">', '<x t="x\xc3\xb1"></x>',
714 + self.assertSoupEquals('<x t="x&#xf1;">', b'<x t="x\xc3\xb1"></x>',
715 encoding='utf-8')
716
717 soup = BeautifulSoup('<x t="&gt;&trade;">',
718 convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
719 - self.assertEquals(soup.decode(), u'<x t="&gt;\u2122"></x>')
720 + self.assertEquals(soup.decode(), '<x t="&gt;\u2122"></x>')
721
722 uri = "http://crummy.com?sacr&eacute;&amp;bleu"
723 link = '<a href="%s"></a>' % uri
724
725 soup = BeautifulSoup(link, convertEntities=BeautifulSoup.HTML_ENTITIES)
726 self.assertEquals(soup.decode(),
727 - link.replace("&eacute;", u"\xe9"))
728 + link.replace("&eacute;", "\xe9"))
729
730 uri = "http://crummy.com?sacr&eacute;&bleu"
731 link = '<a href="%s"></a>' % uri
732 soup = BeautifulSoup(link, convertEntities=BeautifulSoup.HTML_ENTITIES)
733 self.assertEquals(soup.a['href'],
734 - uri.replace("&eacute;", u"\xe9"))
735 + uri.replace("&eacute;", "\xe9"))
736
737 def testNakedAmpersands(self):
738 html = {'convertEntities':BeautifulStoneSoup.HTML_ENTITIES}
739 @@ -663,13 +663,13 @@
740 smart quote fixes."""
741
742 def testUnicodeDammitStandalone(self):
743 - markup = "<foo>\x92</foo>"
744 + markup = b"<foo>\x92</foo>"
745 dammit = UnicodeDammit(markup)
746 - self.assertEquals(dammit.unicode, "<foo>&#x2019;</foo>")
747 + self.assertEquals(dammit.str, "<foo>&#x2019;</foo>")
748
749 - hebrew = "\xed\xe5\xec\xf9"
750 + hebrew = b"\xed\xe5\xec\xf9"
751 dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
752 - self.assertEquals(dammit.unicode, u'\u05dd\u05d5\u05dc\u05e9')
753 + self.assertEquals(dammit.str, '\u05dd\u05d5\u05dc\u05e9')
754 self.assertEquals(dammit.originalEncoding, 'iso-8859-8')
755
756 def testGarbageInGarbageOut(self):
757 @@ -677,13 +677,13 @@
758 asciiSoup = BeautifulStoneSoup(ascii)
759 self.assertEquals(ascii, asciiSoup.decode())
760
761 - unicodeData = u"<foo>\u00FC</foo>"
762 + unicodeData = "<foo>\u00FC</foo>"
763 utf8 = unicodeData.encode("utf-8")
764 - self.assertEquals(utf8, '<foo>\xc3\xbc</foo>')
765 + self.assertEquals(utf8, b'<foo>\xc3\xbc</foo>')
766
767 unicodeSoup = BeautifulStoneSoup(unicodeData)
768 self.assertEquals(unicodeData, unicodeSoup.decode())
769 - self.assertEquals(unicodeSoup.foo.string, u'\u00FC')
770 + self.assertEquals(unicodeSoup.foo.string, '\u00FC')
771
772 utf8Soup = BeautifulStoneSoup(utf8, fromEncoding='utf-8')
773 self.assertEquals(utf8, utf8Soup.encode('utf-8'))
774 @@ -696,18 +696,18 @@
775
776 def testHandleInvalidCodec(self):
777 for bad_encoding in ['.utf8', '...', 'utF---16.!']:
778 - soup = BeautifulSoup(u"Räksmörgås".encode("utf-8"),
779 + soup = BeautifulSoup("Räksmörgås".encode("utf-8"),
780 fromEncoding=bad_encoding)
781 self.assertEquals(soup.originalEncoding, 'utf-8')
782
783 def testUnicodeSearch(self):
784 - html = u'<html><body><h1>Räksmörgås</h1></body></html>'
785 + html = '<html><body><h1>Räksmörgås</h1></body></html>'
786 soup = BeautifulSoup(html)
787 - self.assertEqual(soup.find(text=u'Räksmörgås'),u'Räksmörgås')
788 + self.assertEqual(soup.find(text='Räksmörgås'),'Räksmörgås')
789
790 def testRewrittenXMLHeader(self):
791 - euc_jp = '<?xml version="1.0 encoding="euc-jp"?>\n<foo>\n\xa4\xb3\xa4\xec\xa4\xcfEUC-JP\xa4\xc7\xa5\xb3\xa1\xbc\xa5\xc7\xa5\xa3\xa5\xf3\xa5\xb0\xa4\xb5\xa4\xec\xa4\xbf\xc6\xfc\xcb\xdc\xb8\xec\xa4\xce\xa5\xd5\xa5\xa1\xa5\xa4\xa5\xeb\xa4\xc7\xa4\xb9\xa1\xa3\n</foo>\n'
792 - utf8 = "<?xml version='1.0' encoding='utf-8'?>\n<foo>\n\xe3\x81\x93\xe3\x82\x8c\xe3\x81\xafEUC-JP\xe3\x81\xa7\xe3\x82\xb3\xe3\x83\xbc\xe3\x83\x87\xe3\x82\xa3\xe3\x83\xb3\xe3\x82\xb0\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe3\x81\xae\xe3\x83\x95\xe3\x82\xa1\xe3\x82\xa4\xe3\x83\xab\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\n</foo>\n"
793 + euc_jp = b'<?xml version="1.0 encoding="euc-jp"?>\n<foo>\n\xa4\xb3\xa4\xec\xa4\xcfEUC-JP\xa4\xc7\xa5\xb3\xa1\xbc\xa5\xc7\xa5\xa3\xa5\xf3\xa5\xb0\xa4\xb5\xa4\xec\xa4\xbf\xc6\xfc\xcb\xdc\xb8\xec\xa4\xce\xa5\xd5\xa5\xa1\xa5\xa4\xa5\xeb\xa4\xc7\xa4\xb9\xa1\xa3\n</foo>\n'
794 + utf8 = b"<?xml version='1.0' encoding='utf-8'?>\n<foo>\n\xe3\x81\x93\xe3\x82\x8c\xe3\x81\xafEUC-JP\xe3\x81\xa7\xe3\x82\xb3\xe3\x83\xbc\xe3\x83\x87\xe3\x82\xa3\xe3\x83\xb3\xe3\x82\xb0\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe3\x81\xae\xe3\x83\x95\xe3\x82\xa1\xe3\x82\xa4\xe3\x83\xab\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\n</foo>\n"
795 soup = BeautifulStoneSoup(euc_jp)
796 if soup.originalEncoding != "euc-jp":
797 raise Exception("Test failed when parsing euc-jp document. "
798 @@ -718,12 +718,12 @@
799 self.assertEquals(soup.originalEncoding, "euc-jp")
800 self.assertEquals(soup.renderContents('utf-8'), utf8)
801
802 - old_text = "<?xml encoding='windows-1252'><foo>\x92</foo>"
803 + old_text = b"<?xml encoding='windows-1252'><foo>\x92</foo>"
804 new_text = "<?xml version='1.0' encoding='utf-8'?><foo>&rsquo;</foo>"
805 self.assertSoupEquals(old_text, new_text)
806
807 def testRewrittenMetaTag(self):
808 - no_shift_jis_html = '''<html><head>\n<meta http-equiv="Content-language" content="ja" /></head><body><pre>\n\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B\n</pre></body></html>'''
809 + no_shift_jis_html = b'''<html><head>\n<meta http-equiv="Content-language" content="ja" /></head><body><pre>\n\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B\n</pre></body></html>'''
810 soup = BeautifulSoup(no_shift_jis_html)
811
812 # Beautiful Soup used to try to rewrite the meta tag even if the
813 @@ -733,16 +733,16 @@
814 soup = BeautifulSoup(no_shift_jis_html, parseOnlyThese=strainer)
815 self.assertEquals(soup.contents[0].name, 'pre')
816
817 - meta_tag = ('<meta content="text/html; charset=x-sjis" '
818 - 'http-equiv="Content-type" />')
819 + meta_tag = (b'<meta content="text/html; charset=x-sjis" '
820 + b'http-equiv="Content-type" />')
821 shift_jis_html = (
822 - '<html><head>\n%s\n'
823 - '<meta http-equiv="Content-language" content="ja" />'
824 - '</head><body><pre>\n'
825 - '\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
826 - '\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
827 - '\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B\n'
828 - '</pre></body></html>') % meta_tag
829 + b'<html><head>\n' + meta_tag + b'\n'
830 + b'<meta http-equiv="Content-language" content="ja" />'
831 + b'</head><body><pre>\n'
832 + b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
833 + b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
834 + b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B\n'
835 + b'</pre></body></html>')
836 soup = BeautifulSoup(shift_jis_html)
837 if soup.originalEncoding != "shift-jis":
838 raise Exception("Test failed when parsing shift-jis document "
839 @@ -755,59 +755,59 @@
840 content_type_tag = soup.meta['content']
841 self.assertEquals(content_type_tag[content_type_tag.find('charset='):],
842 'charset=%SOUP-ENCODING%')
843 - content_type = str(soup.meta)
844 + content_type = soup.meta.decode()
845 index = content_type.find('charset=')
846 self.assertEqual(content_type[index:index+len('charset=utf8')+1],
847 'charset=utf-8')
848 content_type = soup.meta.encode('shift-jis')
849 - index = content_type.find('charset=')
850 + index = content_type.find(b'charset=')
851 self.assertEqual(content_type[index:index+len('charset=shift-jis')],
852 'charset=shift-jis'.encode())
853
854 self.assertEquals(soup.encode('utf-8'), (
855 - '<html><head>\n'
856 - '<meta content="text/html; charset=utf-8" '
857 - 'http-equiv="Content-type" />\n'
858 - '<meta http-equiv="Content-language" content="ja" />'
859 - '</head><body><pre>\n'
860 - '\xe3\x81\x93\xe3\x82\x8c\xe3\x81\xafShift-JIS\xe3\x81\xa7\xe3'
861 - '\x82\xb3\xe3\x83\xbc\xe3\x83\x87\xe3\x82\xa3\xe3\x83\xb3\xe3'
862 - '\x82\xb0\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe6\x97\xa5\xe6'
863 - '\x9c\xac\xe8\xaa\x9e\xe3\x81\xae\xe3\x83\x95\xe3\x82\xa1\xe3'
864 - '\x82\xa4\xe3\x83\xab\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\n'
865 - '</pre></body></html>'))
866 + b'<html><head>\n'
867 + b'<meta content="text/html; charset=utf-8" '
868 + b'http-equiv="Content-type" />\n'
869 + b'<meta http-equiv="Content-language" content="ja" />'
870 + b'</head><body><pre>\n'
871 + b'\xe3\x81\x93\xe3\x82\x8c\xe3\x81\xafShift-JIS\xe3\x81\xa7\xe3'
872 + b'\x82\xb3\xe3\x83\xbc\xe3\x83\x87\xe3\x82\xa3\xe3\x83\xb3\xe3'
873 + b'\x82\xb0\xe3\x81\x95\xe3\x82\x8c\xe3\x81\x9f\xe6\x97\xa5\xe6'
874 + b'\x9c\xac\xe8\xaa\x9e\xe3\x81\xae\xe3\x83\x95\xe3\x82\xa1\xe3'
875 + b'\x82\xa4\xe3\x83\xab\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\n'
876 + b'</pre></body></html>'))
877 self.assertEquals(soup.encode("shift-jis"),
878 shift_jis_html.replace('x-sjis'.encode(),
879 'shift-jis'.encode()))
880
881 - isolatin = """<html><meta http-equiv="Content-type" content="text/html; charset=ISO-Latin-1" />Sacr\xe9 bleu!</html>"""
882 + isolatin = b"""<html><meta http-equiv="Content-type" content="text/html; charset=ISO-Latin-1" />Sacr\xe9 bleu!</html>"""
883 soup = BeautifulSoup(isolatin)
884
885 utf8 = isolatin.replace("ISO-Latin-1".encode(), "utf-8".encode())
886 - utf8 = utf8.replace("\xe9", "\xc3\xa9")
887 + utf8 = utf8.replace(b"\xe9", b"\xc3\xa9")
888 self.assertSoupEquals(soup.encode("utf-8"), utf8, encoding='utf-8')
889
890 def testHebrew(self):
891 - iso_8859_8= '<HEAD>\n<TITLE>Hebrew (ISO 8859-8) in Visual Directionality</TITLE>\n\n\n\n</HEAD>\n<BODY>\n<H1>Hebrew (ISO 8859-8) in Visual Directionality</H1>\n\xed\xe5\xec\xf9\n</BODY>\n'
892 - utf8 = '<head>\n<title>Hebrew (ISO 8859-8) in Visual Directionality</title>\n</head>\n<body>\n<h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\n\xd7\x9d\xd7\x95\xd7\x9c\xd7\xa9\n</body>\n'
893 + iso_8859_8= b'<HEAD>\n<TITLE>Hebrew (ISO 8859-8) in Visual Directionality</TITLE>\n\n\n\n</HEAD>\n<BODY>\n<H1>Hebrew (ISO 8859-8) in Visual Directionality</H1>\n\xed\xe5\xec\xf9\n</BODY>\n'
894 + utf8 = b'<head>\n<title>Hebrew (ISO 8859-8) in Visual Directionality</title>\n</head>\n<body>\n<h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\n\xd7\x9d\xd7\x95\xd7\x9c\xd7\xa9\n</body>\n'
895 soup = BeautifulStoneSoup(iso_8859_8, fromEncoding="iso-8859-8")
896 self.assertEquals(soup.encode('utf-8'), utf8)
897
898 def testSmartQuotesNotSoSmartAnymore(self):
899 - self.assertSoupEquals("\x91Foo\x92 <!--blah-->",
900 + self.assertSoupEquals(b"\x91Foo\x92 <!--blah-->",
901 '&lsquo;Foo&rsquo; <!--blah-->')
902
903 def testDontConvertSmartQuotesWhenAlsoConvertingEntities(self):
904 - smartQuotes = "Il a dit, \x8BSacr&eacute; bl&#101;u!\x9b"
905 + smartQuotes = b"Il a dit, \x8BSacr&eacute; bl&#101;u!\x9b"
906 soup = BeautifulSoup(smartQuotes)
907 self.assertEquals(soup.decode(),
908 'Il a dit, &lsaquo;Sacr&eacute; bl&#101;u!&rsaquo;')
909 soup = BeautifulSoup(smartQuotes, convertEntities="html")
910 self.assertEquals(soup.encode('utf-8'),
911 - 'Il a dit, \xe2\x80\xb9Sacr\xc3\xa9 bleu!\xe2\x80\xba')
912 + b'Il a dit, \xe2\x80\xb9Sacr\xc3\xa9 bleu!\xe2\x80\xba')
913
914 def testDontSeeSmartQuotesWhereThereAreNone(self):
915 - utf_8 = "\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
916 + utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
917 self.assertSoupEquals(utf_8, encoding='utf-8')
918
919
920 --- setup.py
921 +++ setup.py
922 @@ -19,19 +19,19 @@
923 suite = loader.loadTestsFromModule(BeautifulSoupTests)
924 suite.run(result)
925 if not result.wasSuccessful():
926 - print "Unit tests have failed!"
927 + print("Unit tests have failed!")
928 for l in result.errors, result.failures:
929 for case, error in l:
930 - print "-" * 80
931 + print("-" * 80)
932 desc = case.shortDescription()
933 if desc:
934 - print desc
935 - print error
936 - print '''If you see an error like: "'ascii' codec can't encode character...", see\nthe Beautiful Soup documentation:\n http://www.crummy.com/software/BeautifulSoup/documentation.html#Why%20can't%20Beautiful%20Soup%20print%20out%20the%20non-ASCII%20characters%20I%20gave%20it?'''
937 - print "This might or might not be a problem depending on what you plan to do with\nBeautiful Soup."
938 + print(desc)
939 + print(error)
940 + print('''If you see an error like: "'ascii' codec can't encode character...", see\nthe Beautiful Soup documentation:\n http://www.crummy.com/software/BeautifulSoup/documentation.html#Why%20can't%20Beautiful%20Soup%20print%20out%20the%20non-ASCII%20characters%20I%20gave%20it?''')
941 + print("This might or might not be a problem depending on what you plan to do with\nBeautiful Soup.")
942 if sys.argv[1] == 'sdist':
943 - print
944 - print "I'm not going to make a source distribution since the tests don't pass."
945 + print()
946 + print("I'm not going to make a source distribution since the tests don't pass.")
947 sys.exit(1)
948
949 setup(name="BeautifulSoup",

  ViewVC Help
Powered by ViewVC 1.1.20