1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
| """ 批量检查网站可用性 用途:监控多个网站的响应状态和加载时间 """
import requests import concurrent.futures from datetime import datetime import csv
def check_website(url, timeout=10): """检查单个网站""" try: start_time = datetime.now() response = requests.get(url, timeout=timeout) end_time = datetime.now() return { 'url': url, 'status': response.status_code, 'response_time': (end_time - start_time).total_seconds(), 'success': True, 'error': None } except Exception as e: return { 'url': url, 'status': None, 'response_time': None, 'success': False, 'error': str(e) }
def batch_check_websites(urls, max_workers=10): """批量检查网站""" results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: future_to_url = {executor.submit(check_website, url): url for url in urls} for future in concurrent.futures.as_completed(future_to_url): result = future.result() results.append(result) status = "✅" if result['success'] else "❌" time_str = f"{result['response_time']:.2f}s" if result['response_time'] else "N/A" print(f"{status} {result['url']} - {result['status']} - {time_str}") return results
def save_results(results, output_file): """保存结果到 CSV""" with open(output_file, 'w', newline='', encoding='utf-8') as f: writer = csv.DictWriter(f, fieldnames=['url', 'status', 'response_time', 'success', 'error']) writer.writeheader() writer.writerows(results)
if __name__ == "__main__": websites = [ 'https://www.google.com', 'https://www.github.com', 'https://www.stackoverflow.com', 'https://www.python.org', 'https://blog.sharezone.cn' ] results = batch_check_websites(websites) save_results(results, 'website_status.csv') success_count = sum(1 for r in results if r['success']) print(f"\nTotal: {len(results)}, Success: {success_count}, Failed: {len(results) - success_count}")
|