j***@gmail.com
2018-04-16 09:55:23 UTC
recientemente los estados unidos operaron militarmente a syria por venganza de el uso de armas quimicas. rusia esta desinformando sus ciudadanos y se espera escalacion de hostilidades. con mi prueba a la cinta, yo creo que se debe usar este codigo para avisar los ciudadanos de rusia en facebook sobre estos ataques. el codigo es este: from bs4 import BeautifulSoup
import urllib2
import urlparse
import sys
import string
reload(sys)
sys.setdefaultencoding('utf8')
rr = open('rr2')
rr = rr.read()
rr = rr.split(',')
u = []
d = open('f3.txt')
d2 = d.read()
d.close()
d3 = d2.split('\n')
o = {}
g = 0
for x in d3:
try:
r = urllib2.urlopen(x, timeout=30).read()
soup = BeautifulSoup(r, "html.parser")
if x:
e = urlparse.urlparse(x)
else:
continue
for link in soup.findAll('a'):
print link
if link!=None:
v = link.text
y = link.get('href')
if y:
t = urlparse.urlparse(y)
else:
g = 0
for x in d3:
try:
r = urllib2.urlopen(x, timeout=30).read()
soup = BeautifulSoup(r, "html.parser")
if x:
e = urlparse.urlparse(x)
else:
continue
for link in soup.findAll('a'):
print link
if link!=None:
v = link.text
y = link.get('href')
if y:
t = urlparse.urlparse(y)
else:
continue
if t[0]=='':
y = e[0]+'://'+e[1]+y
v = v.rstrip(' \n\t\0')
v = v.lstrip(' \n\t\0')
for x in rr:
if string.find(v, x)>0:
if o.has_key(v)==False:
o[v] = y
except:
g=g+1
continue
r = ''
s = open('rr7', 'w')
for x in o.keys():
r = r+'<a href="'+o[x]+'">'+x+'</a><br>'
y = e[0]+'://'+e[1]+y
v = v.rstrip(' \n\t\0')
v = v.lstrip(' \n\t\0')
for x in rr:
if string.find(v, x)>0:
if o.has_key(v)==False:
o[v] = y
except:
g=g+1
continue
r = ''
for x in o.keys():
r = r+'<a href="'+o[x]+'">'+x+'</a><br>''
#print g
s.write(r)
s.close()
import urllib2
import urlparse
import sys
import string
reload(sys)
sys.setdefaultencoding('utf8')
rr = open('rr2')
rr = rr.read()
rr = rr.split(',')
u = []
d = open('f3.txt')
d2 = d.read()
d.close()
d3 = d2.split('\n')
o = {}
g = 0
for x in d3:
try:
r = urllib2.urlopen(x, timeout=30).read()
soup = BeautifulSoup(r, "html.parser")
if x:
e = urlparse.urlparse(x)
else:
continue
for link in soup.findAll('a'):
print link
if link!=None:
v = link.text
y = link.get('href')
if y:
t = urlparse.urlparse(y)
else:
g = 0
for x in d3:
try:
r = urllib2.urlopen(x, timeout=30).read()
soup = BeautifulSoup(r, "html.parser")
if x:
e = urlparse.urlparse(x)
else:
continue
for link in soup.findAll('a'):
print link
if link!=None:
v = link.text
y = link.get('href')
if y:
t = urlparse.urlparse(y)
else:
continue
if t[0]=='':
y = e[0]+'://'+e[1]+y
v = v.rstrip(' \n\t\0')
v = v.lstrip(' \n\t\0')
for x in rr:
if string.find(v, x)>0:
if o.has_key(v)==False:
o[v] = y
except:
g=g+1
continue
r = ''
s = open('rr7', 'w')
for x in o.keys():
r = r+'<a href="'+o[x]+'">'+x+'</a><br>'
y = e[0]+'://'+e[1]+y
v = v.rstrip(' \n\t\0')
v = v.lstrip(' \n\t\0')
for x in rr:
if string.find(v, x)>0:
if o.has_key(v)==False:
o[v] = y
except:
g=g+1
continue
r = ''
for x in o.keys():
r = r+'<a href="'+o[x]+'">'+x+'</a><br>''
#print g
s.write(r)
s.close()