|
| 1 | +import asyncio |
| 2 | + |
| 3 | +from lxml import html |
| 4 | +from pydantic import ValidationError |
| 5 | +from saxonche import PySaxonProcessor |
| 6 | + |
| 7 | +from crawlee import Request |
| 8 | +from crawlee.crawlers import HttpCrawler, HttpCrawlingContext |
| 9 | + |
| 10 | + |
| 11 | +async def main() -> None: |
| 12 | + crawler = HttpCrawler( |
| 13 | + max_request_retries=1, |
| 14 | + max_requests_per_crawl=10, |
| 15 | + ) |
| 16 | + |
| 17 | + # Create Saxon processor once and reuse across requests. |
| 18 | + saxon_proc = PySaxonProcessor(license=False) |
| 19 | + xpath_proc = saxon_proc.new_xpath_processor() |
| 20 | + |
| 21 | + @crawler.router.default_handler |
| 22 | + async def request_handler(context: HttpCrawlingContext) -> None: |
| 23 | + context.log.info(f'Processing {context.request.url} ...') |
| 24 | + |
| 25 | + # Parse HTML with lxml. |
| 26 | + parsed_html = html.fromstring(await context.http_response.read()) |
| 27 | + # Convert relative URLs to absolute before extracting links. |
| 28 | + parsed_html.make_links_absolute(context.request.url, resolve_base_href=True) |
| 29 | + # Convert parsed HTML to XML for Saxon processing. |
| 30 | + xml = html.tostring(parsed_html, encoding='unicode', method='xml') |
| 31 | + # Parse XML with Saxon. |
| 32 | + parsed_xml = saxon_proc.parse_xml(xml_text=xml) |
| 33 | + # Set the parsed context for XPath evaluation. |
| 34 | + xpath_proc.set_context(xdm_item=parsed_xml) |
| 35 | + |
| 36 | + # Extract data using XPath 2.0 string() function. |
| 37 | + data = { |
| 38 | + 'url': context.request.url, |
| 39 | + 'title': xpath_proc.evaluate_single('.//title/string()'), |
| 40 | + 'h1s': [str(h) for h in (xpath_proc.evaluate('//h1/string()') or [])], |
| 41 | + 'h2s': [str(h) for h in (xpath_proc.evaluate('//h2/string()') or [])], |
| 42 | + 'h3s': [str(h) for h in (xpath_proc.evaluate('//h3/string()') or [])], |
| 43 | + } |
| 44 | + await context.push_data(data) |
| 45 | + |
| 46 | + # XPath 2.0 with distinct-values() to get unique links and remove fragments. |
| 47 | + links_xpath = """ |
| 48 | + distinct-values( |
| 49 | + for $href in //a/@href[ |
| 50 | + not(starts-with(., "#")) |
| 51 | + and not(starts-with(., "javascript:")) |
| 52 | + and not(starts-with(., "mailto:")) |
| 53 | + ] |
| 54 | + return replace($href, "#.*$", "") |
| 55 | + ) |
| 56 | + """ |
| 57 | + |
| 58 | + extracted_requests = [] |
| 59 | + |
| 60 | + # Extract links. |
| 61 | + for item in xpath_proc.evaluate(links_xpath) or []: |
| 62 | + url = item.string_value |
| 63 | + try: |
| 64 | + request = Request.from_url(url) |
| 65 | + except ValidationError as exc: |
| 66 | + context.log.warning(f'Skipping invalid URL "{url}": {exc}') |
| 67 | + continue |
| 68 | + extracted_requests.append(request) |
| 69 | + |
| 70 | + # Add extracted requests to the queue with the same-domain strategy. |
| 71 | + await context.add_requests(extracted_requests, strategy='same-domain') |
| 72 | + |
| 73 | + await crawler.run(['https://crawlee.dev']) |
| 74 | + |
| 75 | + |
| 76 | +if __name__ == '__main__': |
| 77 | + asyncio.run(main()) |
0 commit comments