From e3ae4ce7f5bbd253502349c02ac56de1fe168d54 Mon Sep 17 00:00:00 2001 From: acgjuli <57471917+acgjuli@users.noreply.github.com> Date: Sun, 21 Mar 2021 16:18:53 +0800 Subject: [PATCH] Update 2-4-practice-baidu-baike.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 防止链接中出现中文时报错,所以进行url转码 --- source_code/2-4-practice-baidu-baike.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/source_code/2-4-practice-baidu-baike.py b/source_code/2-4-practice-baidu-baike.py index 4814e9f..1d14c23 100644 --- a/source_code/2-4-practice-baidu-baike.py +++ b/source_code/2-4-practice-baidu-baike.py @@ -2,13 +2,13 @@ from urllib.request import urlopen import re import random - +from urllib.parse import quote base_url = "https://baike.baidu.com" + his = ["/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711"] for i in range(20): - # dealing with Chinese symbols url = base_url + his[-1] html = urlopen(url).read().decode('utf-8') @@ -16,10 +16,16 @@ print(i, soup.find('h1').get_text(), ' url: ', his[-1]) # find valid urls - sub_urls = soup.find_all("a", {"target": "_blank", "href": re.compile("/item/(%.{2})+$")}) + sub_urls = soup.find_all("a", {"target": "_blank", "href": re.compile("^(/item/)((%.{2})+|[\u4e00-\u9fa5]+)$")}) if len(sub_urls) != 0: - his.append(random.sample(sub_urls, 1)[0]['href']) + temp=random.sample(sub_urls, 1)[0]['href'] + if len(re.findall("([\u4e00-\u9fa5]+)",temp))==0: + his.append(temp) + else: + a,=re.findall("([\u4e00-\u9fa5]+)", temp) + b="/item/"+quote(a) + his.append(b) else: # no valid sub link found his.pop()