Scrape Google Flights with Python

Β·

8 min read

What will be scraped

wwbs-google-floghts

Full Code

from playwright.sync_api import sync_playwright
from selectolax.lexbor import LexborHTMLParser
import json, time


def get_page(playwright, from_place, to_place, departure_date, return_date):
    page = playwright.chromium.launch(headless=False).new_page()
    page.goto('https://www.google.com/travel/flights?hl=en-US&curr=USD')

    # type "From"
    from_place_field = page.query_selector_all('.e5F5td')[0]
    from_place_field.click()
    time.sleep(1)
    from_place_field.type(from_place)
    time.sleep(1)
    page.keyboard.press('Enter')

    # type "To"
    to_place_field = page.query_selector_all('.e5F5td')[1]
    to_place_field.click()
    time.sleep(1)
    to_place_field.type(to_place)
    time.sleep(1)
    page.keyboard.press('Enter')

    # type "Departure date"
    departure_date_field = page.query_selector_all('[jscontroller="s0nXec"] [aria-label="Departure"]')[0]
    departure_date_field.click()
    time.sleep(1)
    departure_date_field.type(departure_date)
    time.sleep(1)
    page.query_selector('.WXaAwc .VfPpkd-LgbsSe').click()
    time.sleep(1)

    # type "Return date"
    return_date_field = page.query_selector_all('[jscontroller="pxWpE"] [aria-label="Return"]')[0]
    return_date_field.click()
    time.sleep(1)
    return_date_field.type(return_date)
    time.sleep(1)
    page.query_selector('.WXaAwc .VfPpkd-LgbsSe').click()
    time.sleep(1)

    # press "Explore"
    page.query_selector('.MXvFbd .VfPpkd-LgbsSe').click()
    time.sleep(2)

    # press "More flights"
    page.query_selector('.zISZ5c button').click()
    time.sleep(2)

    parser = LexborHTMLParser(page.content())
    page.close()

    return parser


def scrape_google_flights(parser):
    data = {}

    categories = parser.root.css('.zBTtmb')
    category_results = parser.root.css('.Rk10dc')

    for category, category_result in zip(categories, category_results):
        category_data = []

        for result in category_result.css('.yR1fYc'):
            date = result.css('[jscontroller="cNtv4b"] span')
            departure_date = date[0].text()
            arrival_date = date[1].text()
            company = result.css_first('.Ir0Voe .sSHqwe').text()
            duration = result.css_first('.AdWm1c.gvkrdb').text()
            stops = result.css_first('.EfT7Ae .ogfYpf').text()
            emissions = result.css_first('.V1iAHe .AdWm1c').text()
            emission_comparison = result.css_first('.N6PNV').text()
            price = result.css_first('.U3gSDe .FpEdX span').text()
            price_type = result.css_first('.U3gSDe .N872Rd').text() if result.css_first('.U3gSDe .N872Rd') else None

            flight_data = {
                'departure_date': departure_date,
                'arrival_date': arrival_date,
                'company': company,
                'duration': duration,
                'stops': stops,
                'emissions': emissions,
                'emission_comparison': emission_comparison,
                'price': price,
                'price_type': price_type
            }

            airports = result.css_first('.Ak5kof .sSHqwe')
            service = result.css_first('.hRBhge')

            if service:
                flight_data['service'] = service.text()
            else:
                flight_data['departure_airport'] = airports.css_first('span:nth-child(1) .eoY5cb').text()
                flight_data['arrival_airport'] = airports.css_first('span:nth-child(2) .eoY5cb').text()

            category_data.append(flight_data)

        data[category.text().lower().replace(' ', '_')] = category_data

    return data


def run(playwright):
    from_place = 'Seattle'
    to_place = 'Las Vegas'
    departure_date = '5-1-2023'
    return_date = '5-4-2023'

    parser = get_page(playwright, from_place, to_place, departure_date, return_date)
    google_flights_results = scrape_google_flights(parser)

    print(json.dumps(google_flights_results, indent=2, ensure_ascii=False))


with sync_playwright() as playwright:
    run(playwright)

Preparation

Install libraries:

pip install playwright selectolax

Install the required browser:

playwright install chromium

Code Explanation

Import libraries:

from playwright.sync_api import sync_playwright
from selectolax.lexbor import LexborHTMLParser
import json, time
LibraryPurpose
sync_playwrightfor synchronous API. playwright have asynchronous API as well using asyncio module.
LexborHTMLParsera fast HTML5 parser with CSS selectors using Lexbor engine.
jsonto convert extracted data to a JSON object.
timeto work with time in Python.

The next part of the code is divided into functions. Each function is described in the corresponding heading below.

Working with a Playwright

Declare a function:

def run(playwright):
    # further code ...

Passing user data to form a request:

from_place = 'Seattle'
to_place = 'Las Vegas'
departure_date = '5-1-2023'
return_date = '5-4-2023'

The playwright object and the previous data are then passed to the get_page(playwright, from_place, to_place, departure_date, return_date) function. The parser returned by this function is passed to the scrape_google_flights(parser) function to extract all the data. The explanation of these functions will be in the corresponding headings below.

parser = get_page(playwright, from_place, to_place, departure_date, return_date)
google_flights_results = scrape_google_flights(parser)

After the all data is retrieved, it is output in JSON format:

print(json.dumps(google_flights_results, indent=2, ensure_ascii=False))

Run your code using context manager:

with sync_playwright() as playwright:
    run(playwright)

The whole part of the code looks like this:

def run(playwright):
    from_place = 'Seattle'
    to_place = 'Las Vegas'
    departure_date = '5-1-2023'
    return_date = '5-4-2023'

    parser = get_page(playwright, from_place, to_place, departure_date, return_date)
    google_flights_results = scrape_google_flights(parser)

    print(json.dumps(google_flights_results, indent=2, ensure_ascii=False))


with sync_playwright() as playwright:
    run(playwright)

Get page

The function takes a playwright object and parameters from_place, to_place, departure_date, return_date. Returns a parser.

Initialize playwright, connect to chromium, launch() a browser new_page() and goto() to the URL:

page = playwright.chromium.launch(headless=False).new_page()
page.goto('https://www.google.com/travel/flights?hl=en-US&curr=USD')
ParametersExplanation
playwright.chromiumis a connection to the Chromium browser instance.
launch()will launch the browser, and headless argument will run it in headless mode. Default is True.
new_page()creates a new page in a new browser context.
page.goto()will make a request to provided website.

The next section of code is easier to show on the GIF:

get_page

In short, with the help of browser automation, we pass user parameters and generate search results:

# type "From"
from_place_field = page.query_selector_all('.e5F5td')[0]
from_place_field.click()
time.sleep(1)
from_place_field.type(from_place)
time.sleep(1)
page.keyboard.press('Enter')

# type "To"
to_place_field = page.query_selector_all('.e5F5td')[1]
to_place_field.click()
time.sleep(1)
to_place_field.type(to_place)
time.sleep(1)
page.keyboard.press('Enter')

# type "Departure date"
departure_date_field = page.query_selector_all('[jscontroller="s0nXec"] [aria-label="Departure"]')[0]
departure_date_field.click()
time.sleep(1)
departure_date_field.type(departure_date)
time.sleep(1)
page.query_selector('.WXaAwc .VfPpkd-LgbsSe').click()
time.sleep(1)

# type "Return date"
return_date_field = page.query_selector_all('[jscontroller="pxWpE"] [aria-label="Return"]')[0]
return_date_field.click()
time.sleep(1)
return_date_field.type(return_date)
time.sleep(1)
page.query_selector('.WXaAwc .VfPpkd-LgbsSe').click()
time.sleep(1)

# press "Explore"
page.query_selector('.MXvFbd .VfPpkd-LgbsSe').click()
time.sleep(2)

# press "More flights"
page.query_selector('.zISZ5c button').click()
time.sleep(2)

After all the data has been loaded, you need to process HTML using from selectolax because it has Lexbor parser which is incredibly fast, like 186% faster compared to bs4 with lxml backend when parsing data with 3000 iterations 5 times. Please note that selectolax does not currently support XPath:

parser = LexborHTMLParser(page.content())

After all the operations are done, close the browser and the parser is returned:

page.close()

return parser

The function looks like this:

def get_page(playwright, from_place, to_place, departure_date, return_date):
    page = playwright.chromium.launch(headless=False).new_page()
    page.goto('https://www.google.com/travel/flights?hl=en-US&curr=USD')

    # type "From"
    from_place_field = page.query_selector_all('.e5F5td')[0]
    from_place_field.click()
    time.sleep(1)
    from_place_field.type(from_place)
    time.sleep(1)
    page.keyboard.press('Enter')

    # type "To"
    to_place_field = page.query_selector_all('.e5F5td')[1]
    to_place_field.click()
    time.sleep(1)
    to_place_field.type(to_place)
    time.sleep(1)
    page.keyboard.press('Enter')

    # type "Departure date"
    departure_date_field = page.query_selector_all('[jscontroller="s0nXec"] [aria-label="Departure"]')[0]
    departure_date_field.click()
    time.sleep(1)
    departure_date_field.type(departure_date)
    time.sleep(1)
    page.query_selector('.WXaAwc .VfPpkd-LgbsSe').click()
    time.sleep(1)

    # type "Return date"
    return_date_field = page.query_selector_all('[jscontroller="pxWpE"] [aria-label="Return"]')[0]
    return_date_field.click()
    time.sleep(1)
    return_date_field.type(return_date)
    time.sleep(1)
    page.query_selector('.WXaAwc .VfPpkd-LgbsSe').click()
    time.sleep(1)

    # press "Explore"
    page.query_selector('.MXvFbd .VfPpkd-LgbsSe').click()
    time.sleep(2)

    # press "More flights"
    page.query_selector('.zISZ5c button').click()
    time.sleep(2)

    parser = LexborHTMLParser(page.content())
    page.close()

    return parser

Scrape Google Flights Listings

The function takes a Lexbor parser and returns the extracted data.

The data dictionary is declared to which the extracted data will be added:

data = {}

In order to extract all the data, you need to extract the category names and the elements of these categories separately. You need to use the css() method and pass the appropriate selectors there:

categories = parser.root.css('.zBTtmb')
category_results = parser.root.css('.Rk10dc')

You then need to iterate over the resulting item lists using the zip() method to successfully categorize the flights. For each category, a category_data dictionary is created, in which flights associated with a corresponding category will be added:

for category, category_result in zip(categories, category_results):
    category_data = []

We iterate each flight to extract all the necessary data:

for result in category_result.css('.yR1fYc'):
    # data extraction will be here

Most of the data is easily retrieved:

company = result.css_first('.Ir0Voe .sSHqwe').text()
duration = result.css_first('.AdWm1c.gvkrdb').text()
stops = result.css_first('.EfT7Ae .ogfYpf').text()
emissions = result.css_first('.V1iAHe .AdWm1c').text()
emission_comparison = result.css_first('.N6PNV').text()
price = result.css_first('.U3gSDe .FpEdX span').text()
price_type = result.css_first('.U3gSDe .N872Rd').text()
CodeExplanation
css_first()to find the desired element.
text()to retrieve the text content.

The selector responsible for the date contains 2 elements: departure_date and arrival_date.

date = result.css('[jscontroller="cNtv4b"] span')
departure_date = date[0].text()
arrival_date = date[1].text()

After extracting the main data, we form the flight_data dictionary:

flight_data = {
    'departure_date': departure_date,
    'arrival_date': arrival_date,
    'company': company,
    'duration': duration,
    'stops': stops,
    'emissions': emissions,
    'emission_comparison': emission_comparison,
    'price': price,
    'price_type': price_type
}

After adding the main data, you need to check the data, which may differ. These are the departure_airport and the arrival_airport or other service.

airports-or-service

Depending on what data is present in this flight, we add them to the flight_data dictionary. Then add this dictionary to the category_data list:

airports = result.css_first('.Ak5kof .sSHqwe')
service = result.css_first('.hRBhge')

if service:
    flight_data['service'] = service.text()
else:
    flight_data['departure_airport'] = airports.css_first('span:nth-child(1) .eoY5cb').text()
    flight_data['arrival_airport'] = airports.css_first('span:nth-child(2) .eoY5cb').text()

category_data.append(flight_data)

When all flights have been retrieved for a specific category, you need to add them to the data dictionary by key. The key is the name of the category:

data[category.text().lower().replace(' ', '_')] = category_data

At the end of the function, the data dictionary is returned:

return data

The function looks like this:

def scrape_google_flights(parser):
    data = {}

    categories = parser.root.css('.zBTtmb')
    category_results = parser.root.css('.Rk10dc')

    for category, category_result in zip(categories, category_results):
        category_data = []

        for result in category_result.css('.yR1fYc'):
            date = result.css('[jscontroller="cNtv4b"] span')
            departure_date = date[0].text()
            arrival_date = date[1].text()
            company = result.css_first('.Ir0Voe .sSHqwe').text()
            duration = result.css_first('.AdWm1c.gvkrdb').text()
            stops = result.css_first('.EfT7Ae .ogfYpf').text()
            emissions = result.css_first('.V1iAHe .AdWm1c').text()
            emission_comparison = result.css_first('.N6PNV').text()
            price = result.css_first('.U3gSDe .FpEdX span').text()
            price_type = result.css_first('.U3gSDe .N872Rd').text() if result.css_first('.U3gSDe .N872Rd') else None

            flight_data = {
                'departure_date': departure_date,
                'arrival_date': arrival_date,
                'company': company,
                'duration': duration,
                'stops': stops,
                'emissions': emissions,
                'emission_comparison': emission_comparison,
                'price': price,
                'price_type': price_type
            }

            airports = result.css_first('.Ak5kof .sSHqwe')
            service = result.css_first('.hRBhge')

            if service:
                flight_data['service'] = service.text()
            else:
                flight_data['departure_airport'] = airports.css_first('span:nth-child(1) .eoY5cb').text()
                flight_data['arrival_airport'] = airports.css_first('span:nth-child(2) .eoY5cb').text()

            category_data.append(flight_data)

        data[category.text().lower().replace(' ', '_')] = category_data

    return data

Output

{
  "Best departing options": [
    {
      "departure_date": "12:02 PM",
      "arrival_date": "2:32 PM",
      "company": "Frontier",
      "duration": "2 hr 30 min",
      "stops": "Nonstop",
      "emissions": "83 kg CO2",
      "emission_comparison": "-25% emissions",
      "price": "$52",
      "price_type": "round trip",
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    },
    {
      "departure_date": "6:05 AM",
      "arrival_date": "8:31 AM",
      "company": "Spirit",
      "duration": "2 hr 26 min",
      "stops": "Nonstop",
      "emissions": "100 kg CO2",
      "emission_comparison": "-10% emissions",
      "price": "$54",
      "price_type": "round trip",
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    },
    {
      "departure_date": "11:50 AM",
      "arrival_date": "2:21 PM",
      "company": "Spirit",
      "duration": "2 hr 31 min",
      "stops": "Nonstop",
      "emissions": "85 kg CO2",
      "emission_comparison": "-23% emissions",
      "price": "$54",
      "price_type": "round trip",
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    },
    {
      "departure_date": "6:52 PM",
      "arrival_date": "9:25 PM",
      "company": "Spirit",
      "duration": "2 hr 33 min",
      "stops": "Nonstop",
      "emissions": "85 kg CO2",
      "emission_comparison": "-23% emissions",
      "price": "$54",
      "price_type": "round trip",
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    },
    {
      "departure_date": "9:17 PM",
      "arrival_date": "11:46 PM",
      "company": "Spirit",
      "duration": "2 hr 29 min",
      "stops": "Nonstop",
      "emissions": "101 kg CO2",
      "emission_comparison": "-9% emissions",
      "price": "$54",
      "price_type": "round trip",
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    },
    {
      "departure_date": "10:10 AM",
      "arrival_date": "12:35 PM",
      "company": "Southwest",
      "duration": "2 hr 25 min",
      "stops": "Nonstop",
      "emissions": "93 kg CO2",
      "emission_comparison": "-16% emissions",
      "price": "Price unavailable",
      "price_type": null,
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    }
  ],
  "Other departing flights": [
    {
      "departure_date": "6:29 PM",
      "arrival_date": "8:02 AM+1",
      "company": "Frontier",
      "duration": "13 hr 33 min",
      "stops": "1 stop",
      "emissions": "153 kg CO2",
      "emission_comparison": "+38% emissions",
      "price": "$165",
      "price_type": "round trip",
      "departure_airport": "Seattle-Tacoma International Airport",
      "arrival_airport": "Harry Reid International Airport"
    },
    ... other results
  ]
}

πŸ“ŒNote: Other departing trains are not displayed due to the fact that they are not available for this route.

Β