from selenium import webdriver from bs4 import BeautifulSoup
driver = webdriver.Chrome()
driver.get('http://example.com/table')
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table')
next_button = driver.find_element_by_id('next_button') next_button.click()
updated_html = driver.page_source updated_soup = BeautifulSoup(updated_html, 'html.parser') updated_table = updated_soup.find('table')
driver.quit()
import requests from bs4 import BeautifulSoup
url = 'http://example.com/table' page = 1
while True: # 请求当前页的内容 params = {'page': page} response = requests.get(url, params=params) html = response.text
# 使用BeautifulSoup解析网页源代码
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table')
# 处理表格数据
# 查找是否有下一页
next_link = soup.find('a', class_='next')
if next_link:
page += 1
url = next_link['href']
else:
break