知乎盐选小说-技术论坛-技术-6KE论坛-综合开放交流论坛

知乎盐选小说

最近在研究短剧和小说,短剧目前已经正式运营了,但是小说一直没有解决掉,网络上找了很多大神的教程,也花了大量的时间,过程我就省略了,直接把结果分享出来,目前这个是我遇到唯一能使用的脚本,另外这个方法需要自己转链,如果有大佬可以把知乎盐选分享链接转换成需要的链接也请评论指导一番或私信我,非常感谢!

需要:

1.知乎会员

2.会抓ck

3.会用python

教程:

1.把代码复制到python解释器里

import os
import time
import requests
from bs4 import BeautifulSoup
import re
import base64
from fontTools.ttLib import TTFont
import ddddocr
from PIL import ImageFont, Image, ImageDraw
from tkinter import Tk
from tkinter.filedialog import askdirectory


class FontDecoder:
    def __init__(self, headers, cookies_raw):
        self.headers = headers
        self.cookies_dict = self._parse_cookies(cookies_raw)
        self.ocr_engine = ddddocr.DdddOcr()
        self.session = requests.Session()
        self.session.headers.update(headers)
        self.session.cookies.update(self.cookies_dict)

    @staticmethod
    def _parse_cookies(cookies_raw):
        return {cookie.split('=')[0]: '='.join(cookie.split('=')[1:]) for cookie in cookies_raw.split('; ')}

    def fetch_content(self, url):
        response = self.session.get(url)
        response.raise_for_status()
        time.sleep(2)
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup, response.text

    def save_content(self, soup, title, folder_path, file_type='txt'):
        filename = f"{title}.{file_type}"
        full_path = os.path.join(folder_path, filename)
        if file_type == 'html':
            content = str(soup)
        else:
            content = '\n'.join(tag.get_text() for tag in soup.find_all('p'))
        with open(full_path, 'w', encoding='utf-8') as file:
            file.write(content)
        print(f"文件已保存到:{full_path}")

    def recognize_font(self, font_path):
        with open(font_path, 'rb') as f:
            font = TTFont(f)
            cmap = font.getBestCmap()
            unicode_list = list(cmap.keys())

        recognition_dict = {}
        failed_recognitions = []

        for unicode_code in unicode_list:
            char = chr(unicode_code)
            img_size = 128
            img = Image.new('RGB', (img_size, img_size), 'white')
            draw = ImageDraw.Draw(img)
            font_size = int(img_size * 0.7)
            font = ImageFont.truetype(font_path, font_size)

            # 使用 textbbox 获取文本的边界框
            bbox = draw.textbbox((0, 0), char, font=font)
            text_width = bbox[2] - bbox[0]
            text_height = bbox[3] - bbox[1]

            draw.text(((img_size - text_width) / 2, (img_size - text_height) / 2), char, fill='black', font=font)

            try:
                recognized_text = self.ocr_engine.classification(img)
                if recognized_text:
                    recognition_dict[char] = recognized_text[0]
                else:
                    failed_recognitions.append(char)
            except Exception as e:
                print(f"在识别字符 {char} 时发生错误: {e}")
                failed_recognitions.append(char)

        if failed_recognitions:
            print(f"以下字符未能成功识别: {failed_recognitions}")
        else:
            print("所有字符识别成功并构建了映射字典。")

        print("字体映射字典:", recognition_dict)

        return recognition_dict

    def replace_string_matches(self, input_str, mapping_dict):
        pattern = re.compile("|".join(re.escape(key) for key in mapping_dict.keys()))

        def replace_callback(match):
            key = match.group(0)
            return mapping_dict[key]

        output_str = pattern.sub(replace_callback, input_str)
        return output_str

    def my_replace_text(self, input_file, output_file, replace_dict, folder_path):
        input_path = os.path.join(folder_path, input_file)
        output_path = os.path.join(folder_path, output_file)
        with open(input_path, 'r', encoding='utf-8') as f:
            content = f.read()
            content = self.replace_string_matches(content, replace_dict)
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(content)
        print("文本替换完成,结果已保存至:", output_path)
        os.remove(input_path)
        print(f"已删除文件:{input_path}")


def get_firstsession(url, i, folder_path, decoder):
    try:
        soup, text_response = decoder.fetch_content(url)
    except requests.exceptions.HTTPError as err:
        print(f"HTTP error occurred: {err}")
        return None
    except requests.exceptions.RequestException as err:
        print(f"Error occurred: {err}")
        return None

    title_tag = soup.find('h1')
    title = title_tag.text if title_tag else "未找到标题"

    decoder.save_content(soup, title, folder_path, file_type='txt')

    pattern = r"@font-face\s*\{[^\}]*?src:\s*url\(data:font/ttf;charset=utf-8;base64,([A-Za-z0-9+/=]+)\)"
    matches = re.findall(pattern, text_response)
    if matches and len(matches) > 2:
        base64_font_data = matches[2]
        decoded_font_data = base64.b64decode(base64_font_data)
        font_file_path = "/tmp/font_file.ttf"
        with open(font_file_path, "wb") as font_file:
            font_file.write(decoded_font_data)
        print(f"字体文件已成功保存到:{font_file_path}")

        mapping_dict = decoder.recognize_font(font_file_path)
        input_file = f'{title}.txt'
        output_file = f'第{i}节{title}.txt'
        decoder.my_replace_text(input_file, output_file, mapping_dict, folder_path)
        os.remove(font_file_path)

    url_pattern = re.compile(r'"next_section":{[^}]*"url":"(https?://[^"]+)"')
    match = url_pattern.search(text_response)
    if match:
        url = match.group(1)
        print("下一节连接:" + url)
        return url
    else:
        print("未找到下一节URL。")
        return None


if __name__ == '__main__':
    root = Tk()
    root.withdraw()  # 隐藏主窗口
    folder_path = askdirectory(title="选择下载路径")
    root.destroy()  # 关闭主窗口

    if not folder_path:
        print("未选择下载路径,程序退出。")
        exit()

    firstsession_url = input(
        "请输入下载的第一节URL (例如:https://www.zhihu.com/market/paid_column/1634930379587784704/section/1634931949033426944): ")

    try:
        with open("ck.txt", "r", encoding="utf-8") as file:
            cookies = file.read().strip()
    except FileNotFoundError:
        print("ck.txt 文件未找到,请确保该文件存在并包含 cookies 信息。")
        exit()

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
        'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8'
    }

    decoder = FontDecoder(headers, cookies)

    try:
        os.makedirs(folder_path, exist_ok=True)
        print(f"成功创建或确认文件夹存在:{folder_path}")
    except Exception as e:
        print(f"创建文件夹 {folder_path} 时发生错误:{e}")

    i = 1
    next_url = get_firstsession(firstsession_url, i, folder_path, decoder)
    while next_url:
        i += 1
        time.sleep(5)
        next_url = get_firstsession(next_url, i, folder_path, decoder)

2.同目录下创建ck.txt,然后把从知乎抓取到的ck放到里面,没有格式,直接一行粘贴到里面就可以

3.运行按照提示输入相应内容即可

ps:只支持爬取下方这样的链接

https://www.zhihu.com/market/paid_column/1634930379587784704/section/1634931949033426944

因为知乎限制,链接可以通过手机app收藏,然后网页通过收藏内容来获取到,注意,链接是看到小说内容后到界面URL,可自行对比一下

脚本来自吾爱@jing99

功能介绍
1. 从指定的知乎盐选URL下载内容。
2. 处理页面中使用的特殊字体,使用OCR技术识别并替换这些字体。
3. 将处理后的内容保存为文本文件。
4. 自动识别并下载下一节内容,直到没有下一节为止。

 

由于使用OCR技术识别,所以会有一些乱码,但是基本不影响正常阅读

本文仅供参考学习,如果涉及侵权等一系列问题请联系论坛所有者人皇删除,谢谢。

 
© 免责声明

请登录后发表评论