This is a quick and dirty Python script to automatically download all the photos from a public Facebook page. The Python code clicks into the lightbox and then loops each photo by pressing the right-arrow key, if the code gets stuck entering the lightbox, just manually perform that bit and just run the loop.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import requests
import shutil
import re
driver = webdriver.Firefox()
# cookies = pickle.load(open("cookiespy2.txt", "rb"))
# for cookie in cookies:
# driver.add_cookie(cookie)
driver.get("https://www.facebook.com/XXXXXXXXX/photos")
element = WebDriverWait(driver, 10).until(
EC.title_is("Ali's Magic Carpet Pre-Kindy - Photos | Facebook")
)
html = driver.page_source
match = re.search(r'"id":"(\d*?)"},"media"', html).group(1)
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, match))
)
driver.find_element_by_id(match).click()
while True:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "spotlight"))
)
image_url = driver.find_element_by_class_name("spotlight").get_attribute("src")
## Set up the image URL and filename
filename = image_url.split("/")[-1]
filename = filename.split("?")[0]
# Open the url image, set stream to True, this will return the stream content.
r = requests.get(image_url, stream = True)
# Check if the image was retrieved successfully
if r.status_code == 200:
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
print('Image sucessfully Downloaded: ',filename)
else:
print('Image Couldn\'t be retreived')
driver.find_element_by_css_selector('body').send_keys(Keys.RIGHT)
driver.close()
Leave a Reply