from mechanize import Browser
br = Browser()
br.open('http://somewebpage')
html = br.response().readlines()
for line in html:
print line
当在HTML文件中打印一行时,我试图找到一种方法,只显示每个HTML元素的内容,而不是格式本身。如果它发现'<a href="等等。例如">some text</a>',它只会打印'some text', '<b>hello</b>'打印'hello',等等。该怎么做呢?
对于一个项目,我需要这样剥离HTML,但也css和js。因此,我对eloff的回答做了一个变化:
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
self.css = False
def handle_starttag(self, tag, attrs):
if tag == "style" or tag=="script":
self.css = True
def handle_endtag(self, tag):
if tag=="style" or tag=="script":
self.css=False
def handle_data(self, d):
if not self.css:
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
使用HTML-Parser的解决方案都是可破坏的,如果它们只运行一次:
html_to_text('<<b>script>alert("hacked")<</b>/script>
结果:
<script>alert("hacked")</script>
你想要阻止什么。如果你使用HTML-Parser,计数标签直到0被替换:
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
self.containstags = False
def handle_starttag(self, tag, attrs):
self.containstags = True
def handle_data(self, d):
self.fed.append(d)
def has_tags(self):
return self.containstags
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
must_filtered = True
while ( must_filtered ):
s = MLStripper()
s.feed(html)
html = s.get_data()
must_filtered = s.has_tags()
return html
我正在解析Github自述,我发现下面的工作真的很好:
import re
import lxml.html
def strip_markdown(x):
links_sub = re.sub(r'\[(.+)\]\([^\)]+\)', r'\1', x)
bold_sub = re.sub(r'\*\*([^*]+)\*\*', r'\1', links_sub)
emph_sub = re.sub(r'\*([^*]+)\*', r'\1', bold_sub)
return emph_sub
def strip_html(x):
return lxml.html.fromstring(x).text_content() if x else ''
然后
readme = """<img src="https://raw.githubusercontent.com/kootenpv/sky/master/resources/skylogo.png" />
sky is a web scraping framework, implemented with the latest python versions in mind (3.4+).
It uses the asynchronous `asyncio` framework, as well as many popular modules
and extensions.
Most importantly, it aims for **next generation** web crawling where machine intelligence
is used to speed up the development/maintainance/reliability of crawling.
It mainly does this by considering the user to be interested in content
from *domains*, not just a collection of *single pages*
([templating approach](#templating-approach))."""
strip_markdown(strip_html(readme))
正确移除所有markdown和html。
这是一个快速修复,甚至可以更优化,但它将工作良好。这段代码将用""替换所有非空标记,并从给定的输入文本中剥离所有html标记。你可以使用./file.py输入输出运行它
#!/usr/bin/python
import sys
def replace(strng,replaceText):
rpl = 0
while rpl > -1:
rpl = strng.find(replaceText)
if rpl != -1:
strng = strng[0:rpl] + strng[rpl + len(replaceText):]
return strng
lessThanPos = -1
count = 0
listOf = []
try:
#write File
writeto = open(sys.argv[2],'w')
#read file and store it in list
f = open(sys.argv[1],'r')
for readLine in f.readlines():
listOf.append(readLine)
f.close()
#remove all tags
for line in listOf:
count = 0;
lessThanPos = -1
lineTemp = line
for char in lineTemp:
if char == "<":
lessThanPos = count
if char == ">":
if lessThanPos > -1:
if line[lessThanPos:count + 1] != '<>':
lineTemp = replace(lineTemp,line[lessThanPos:count + 1])
lessThanPos = -1
count = count + 1
lineTemp = lineTemp.replace("<","<")
lineTemp = lineTemp.replace(">",">")
writeto.write(lineTemp)
writeto.close()
print "Write To --- >" , sys.argv[2]
except:
print "Help: invalid arguments or exception"
print "Usage : ",sys.argv[0]," inputfile outputfile"