|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +Basic synchronous example demonstrating how to use the Scrape API. |
| 4 | +
|
| 5 | +This example shows: |
| 6 | +1. How to make a basic scrape request |
| 7 | +2. How to use render_heavy_js for JavaScript-heavy websites |
| 8 | +3. How to add custom headers |
| 9 | +4. How to handle the response |
| 10 | +
|
| 11 | +Equivalent curl command: |
| 12 | +curl -X POST https://api.scrapegraphai.com/v1/scrape \ |
| 13 | + -H "Content-Type: application/json" \ |
| 14 | + -H "SGAI-APIKEY: your-api-key-here" \ |
| 15 | + -d '{ |
| 16 | + "website_url": "https://example.com", |
| 17 | + "render_heavy_js": false |
| 18 | + }' |
| 19 | +
|
| 20 | +Requirements: |
| 21 | +- Python 3.7+ |
| 22 | +- scrapegraph-py |
| 23 | +- python-dotenv |
| 24 | +- A .env file with your SGAI_API_KEY |
| 25 | +
|
| 26 | +Example .env file: |
| 27 | +SGAI_API_KEY=your_api_key_here |
| 28 | +""" |
| 29 | + |
| 30 | +import time |
| 31 | +from pathlib import Path |
| 32 | + |
| 33 | +from dotenv import load_dotenv |
| 34 | +from scrapegraph_py import Client |
| 35 | + |
| 36 | +# Load environment variables from .env file |
| 37 | +load_dotenv() |
| 38 | + |
| 39 | + |
| 40 | +def basic_scrape_example(): |
| 41 | + """Demonstrate basic scrape functionality.""" |
| 42 | + print("🌐 Basic Scrape Example") |
| 43 | + print("=" * 30) |
| 44 | + |
| 45 | + # Initialize client |
| 46 | + client = Client.from_env() |
| 47 | + |
| 48 | + try: |
| 49 | + # Basic scrape request |
| 50 | + print("Making basic scrape request...") |
| 51 | + result = client.scrape(website_url="https://example.com", render_heavy_js=False) |
| 52 | + |
| 53 | + # Display results |
| 54 | + html_content = result.get("html", "") |
| 55 | + print(f"✅ Success! Received {len(html_content):,} characters of HTML") |
| 56 | + print(f"Request ID: {result.get('request_id', 'N/A')}") |
| 57 | + |
| 58 | + return result |
| 59 | + |
| 60 | + except Exception as e: |
| 61 | + print(f"❌ Error: {str(e)}") |
| 62 | + return None |
| 63 | + finally: |
| 64 | + client.close() |
| 65 | + |
| 66 | + |
| 67 | +def scrape_with_heavy_js(): |
| 68 | + """Demonstrate scraping with heavy JavaScript rendering.""" |
| 69 | + print("\n🚀 Heavy JavaScript Rendering Example") |
| 70 | + print("=" * 45) |
| 71 | + |
| 72 | + client = Client.from_env() |
| 73 | + |
| 74 | + try: |
| 75 | + print("Making scrape request with heavy JS rendering...") |
| 76 | + start_time = time.time() |
| 77 | + |
| 78 | + result = client.scrape( |
| 79 | + website_url="https://example.com", |
| 80 | + render_heavy_js=True, # Enable JavaScript rendering |
| 81 | + ) |
| 82 | + |
| 83 | + execution_time = time.time() - start_time |
| 84 | + html_content = result.get("html", "") |
| 85 | + |
| 86 | + print(f"✅ Success! Received {len(html_content):,} characters of HTML") |
| 87 | + print(f"⏱️ Execution time: {execution_time:.2f} seconds") |
| 88 | + print(f"Request ID: {result.get('request_id', 'N/A')}") |
| 89 | + |
| 90 | + return result |
| 91 | + |
| 92 | + except Exception as e: |
| 93 | + print(f"❌ Error: {str(e)}") |
| 94 | + return None |
| 95 | + finally: |
| 96 | + client.close() |
| 97 | + |
| 98 | + |
| 99 | +def scrape_with_custom_headers(): |
| 100 | + """Demonstrate scraping with custom headers.""" |
| 101 | + print("\n🔧 Custom Headers Example") |
| 102 | + print("=" * 30) |
| 103 | + |
| 104 | + client = Client.from_env() |
| 105 | + |
| 106 | + # Custom headers for better compatibility |
| 107 | + custom_headers = { |
| 108 | + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", |
| 109 | + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", |
| 110 | + "Accept-Language": "en-US,en;q=0.5", |
| 111 | + "Accept-Encoding": "gzip, deflate, br", |
| 112 | + "Connection": "keep-alive", |
| 113 | + "Upgrade-Insecure-Requests": "1", |
| 114 | + } |
| 115 | + |
| 116 | + try: |
| 117 | + print("Making scrape request with custom headers...") |
| 118 | + result = client.scrape( |
| 119 | + website_url="https://httpbin.org/html", |
| 120 | + render_heavy_js=False, |
| 121 | + headers=custom_headers, |
| 122 | + ) |
| 123 | + |
| 124 | + html_content = result.get("html", "") |
| 125 | + print(f"✅ Success! Received {len(html_content):,} characters of HTML") |
| 126 | + print(f"Request ID: {result.get('request_id', 'N/A')}") |
| 127 | + |
| 128 | + # Show a preview of the HTML |
| 129 | + preview = html_content[:200].replace("\n", " ").strip() |
| 130 | + print(f"HTML Preview: {preview}...") |
| 131 | + |
| 132 | + return result |
| 133 | + |
| 134 | + except Exception as e: |
| 135 | + print(f"❌ Error: {str(e)}") |
| 136 | + return None |
| 137 | + finally: |
| 138 | + client.close() |
| 139 | + |
| 140 | + |
| 141 | +def save_html_to_file(html_content: str, filename: str): |
| 142 | + """Save HTML content to a file.""" |
| 143 | + output_dir = Path("scrape_output") |
| 144 | + output_dir.mkdir(exist_ok=True) |
| 145 | + |
| 146 | + file_path = output_dir / f"{filename}.html" |
| 147 | + with open(file_path, "w", encoding="utf-8") as f: |
| 148 | + f.write(html_content) |
| 149 | + |
| 150 | + print(f"💾 HTML saved to: {file_path}") |
| 151 | + return file_path |
| 152 | + |
| 153 | + |
| 154 | +def demonstrate_curl_equivalent(): |
| 155 | + """Show the equivalent curl commands.""" |
| 156 | + print("\n🌐 Equivalent curl commands:") |
| 157 | + print("=" * 35) |
| 158 | + |
| 159 | + print("1. Basic scrape:") |
| 160 | + print("curl -X POST https://api.scrapegraphai.com/v1/scrape \\") |
| 161 | + print(' -H "Content-Type: application/json" \\') |
| 162 | + print(' -H "SGAI-APIKEY: your-api-key-here" \\') |
| 163 | + print(" -d '{") |
| 164 | + print(' "website_url": "https://example.com",') |
| 165 | + print(' "render_heavy_js": false') |
| 166 | + print(" }'") |
| 167 | + |
| 168 | + print("\n2. With heavy JS rendering:") |
| 169 | + print("curl -X POST https://api.scrapegraphai.com/v1/scrape \\") |
| 170 | + print(' -H "Content-Type: application/json" \\') |
| 171 | + print(' -H "SGAI-APIKEY: your-api-key-here" \\') |
| 172 | + print(" -d '{") |
| 173 | + print(' "website_url": "https://example.com",') |
| 174 | + print(' "render_heavy_js": true') |
| 175 | + print(" }'") |
| 176 | + |
| 177 | + |
| 178 | +def main(): |
| 179 | + """Main function demonstrating scrape functionality.""" |
| 180 | + print("🚀 Scrape API Examples") |
| 181 | + print("=" * 25) |
| 182 | + |
| 183 | + # Show curl equivalents first |
| 184 | + demonstrate_curl_equivalent() |
| 185 | + |
| 186 | + try: |
| 187 | + # Run examples |
| 188 | + result1 = basic_scrape_example() |
| 189 | + result2 = scrape_with_heavy_js() |
| 190 | + result3 = scrape_with_custom_headers() |
| 191 | + |
| 192 | + # Save results if successful |
| 193 | + if result1: |
| 194 | + html1 = result1.get("html", "") |
| 195 | + if html1: |
| 196 | + save_html_to_file(html1, "basic_scrape") |
| 197 | + |
| 198 | + if result3: |
| 199 | + html3 = result3.get("html", "") |
| 200 | + if html3: |
| 201 | + save_html_to_file(html3, "custom_headers_scrape") |
| 202 | + |
| 203 | + print("\n🎯 Summary:") |
| 204 | + print(f"✅ Basic scrape: {'Success' if result1 else 'Failed'}") |
| 205 | + print(f"✅ Heavy JS scrape: {'Success' if result2 else 'Failed'}") |
| 206 | + print(f"✅ Custom headers scrape: {'Success' if result3 else 'Failed'}") |
| 207 | + |
| 208 | + except Exception as e: |
| 209 | + print(f"❌ Unexpected error: {str(e)}") |
| 210 | + |
| 211 | + print("\n📚 Next steps:") |
| 212 | + print("• Try the curl commands in your terminal") |
| 213 | + print("• Experiment with different websites") |
| 214 | + print("• Test with your own custom headers") |
| 215 | + print("• Compare render_heavy_js=true vs false for dynamic sites") |
| 216 | + |
| 217 | + |
| 218 | +if __name__ == "__main__": |
| 219 | + main() |
0 commit comments