# Module: NetflixSession
# Created on: 13.01.2017
-import sys
import os
-import base64
-import time
-import urllib
import json
-import requests
-import platform
+from requests import session, cookies
+from urllib import quote
+from time import time
+from base64 import urlsafe_b64encode
+from bs4 import BeautifulSoup, SoupStrainer
+from utils import noop
try:
import cPickle as pickle
except:
import pickle
-from bs4 import BeautifulSoup, SoupStrainer
-from pyjsparser import PyJsParser
-from utils import noop
class NetflixSession:
"""Helps with login/session management of Netflix users & API data fetching"""
urls = {
'login': '/login',
- 'browse': '/browse',
- 'video_list_ids': '/warmer',
+ 'browse': '/profiles/manage',
+ 'video_list_ids': '/preflight',
'shakti': '/pathEvaluator',
- 'profiles': '/browse',
+ 'profiles': '/profiles/manage',
'switch_profiles': '/profiles/switch',
'adult_pin': '/pin/service',
'metadata': '/metadata',
self.log = log_fn
# start session, fake chrome on the current platform (so that we get a proper widevine esn) & enable gzip
- self.session = requests.session()
+ self.session = session()
self.session.headers.update({
'User-Agent': self._get_user_agent_for_current_platform(),
'Accept-Encoding': 'gzip'
"""
payload = {
'switchProfileGuid': profile_id,
- '_': int(time.time()),
+ '_': int(time()),
'authURL': self.user_data['authURL']
}
if response.status_code != 200:
return False
- # fetch the index page again, so that we can fetch the corresponding user data
- browse_response = self._session_get(component='browse')
- only_script_tags = SoupStrainer('script')
- browse_soup = BeautifulSoup(browse_response.text, 'html.parser', parse_only=only_script_tags)
account_hash = self._generate_account_hash(account=account)
self.user_data['guid'] = profile_id;
- self._save_data(filename=self.data_path + '_' + account_hash)
- return True
+ return self._save_data(filename=self.data_path + '_' + account_hash)
def send_adult_pin (self, pin):
"""Send the adult pin to Netflix in case an adult rated video requests it
'toRow': list_to,
'opaqueImageExtension': 'jpg',
'transparentImageExtension': 'png',
- '_': int(time.time()),
+ '_': int(time()),
'authURL': self.user_data['authURL']
}
response = self._session_get(component='video_list_ids', params=payload, type='api')
Raw Netflix API call response or api call error
"""
# properly encode the search string
- encoded_search_string = urllib.quote(search_str)
+ encoded_search_string = quote(search_str)
paths = [
['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, ['summary', 'title']],
payload = {
'movieid': id,
'imageformat': 'jpg',
- '_': int(time.time())
+ '_': int(time())
}
response = self._session_get(component='metadata', params=payload, type='api')
return self._process_response(response=response, component=self._get_api_url_for(component='metadata'))
return False
with open(filename) as f:
- cookies = pickle.load(f)
- if cookies:
- jar = requests.cookies.RequestsCookieJar()
- jar._cookies = cookies
+ _cookies = pickle.load(f)
+ if _cookies:
+ jar = cookies.RequestsCookieJar()
+ jar._cookies = _cookies
self.session.cookies = jar
else:
return False
:obj:`str`
Account data hash
"""
- return base64.urlsafe_b64encode(account['email'])
+ return urlsafe_b64encode(account['email'])
def _get_user_agent_for_current_platform (self):
"""Determines the user agent string for the current platform (to retrieve a valid ESN)
:obj:`str`
User Agent for platform
"""
+ import platform
if platform == 'linux' or platform == 'linux2':
return 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
elif platform == 'darwin':
Contents of the field to match
"""
url = self._get_document_url_for(component=component) if type == 'document' else self._get_api_url_for(component=component)
- start = time.time()
+ start = time()
response = self.session.post(url=url, data=data, params=params, headers=headers, verify=self.verify_ssl)
- end = time.time()
+ end = time()
self.log('[POST] Request for "' + url + '" took ' + str(end - start) + ' seconds')
return response
Contents of the field to match
"""
url = self._get_document_url_for(component=component) if type == 'document' else self._get_api_url_for(component=component)
- start = time.time()
+ start = time()
response = self.session.get(url=url, verify=self.verify_ssl, params=params)
- end = time.time()
+ end = time()
self.log('[GET] Request for "' + url + '" took ' + str(end - start) + ' seconds')
return response
:obj:`dict` of :obj:`str`
Dict containing user, api & profile data
"""
- inline_data = [];
+ inline_data = []
+ from pyjsparser import PyJsParser
parser = PyJsParser()
for script in scripts:
- data = {};
+ data = {}
# unicode escape that incoming script stuff
contents = self._to_unicode(str(script.contents[0]))
# parse the JS & load the declarations we´re interested in
parsed = parser.parse(contents)
if len(parsed['body']) > 1 and parsed['body'][1]['expression']['right'].get('properties', None) != None:
- declarations = parsed['body'][1]['expression']['right']['properties'];
+ declarations = parsed['body'][1]['expression']['right']['properties']
for declaration in declarations:
for key in declaration:
# we found the correct path if the declaration is a dict & of type 'ObjectExpression'
:obj:`str` of :obj:`str
ESN, something like: NFCDCH-MC-D7D6F54LOPY8J416T72MQXX3RD20ME
"""
- esn = '';
+ esn = ''
# values are accessible via dict (sloppy parsing successfull)
if type(netflix_page_data) == dict:
return netflix_page_data.get('esn', '')
self.esn = self._parse_esn_data(netflix_page_data=netflix_page_data)
self.api_data = self._parse_api_base_data(netflix_page_data=netflix_page_data)
self.profiles = self._parse_profile_data(netflix_page_data=netflix_page_data)
- self.log('Found ESN "' + self.esn + '" for platform "' + str(platform.system()) + '"')
+ self.log('Found ESN "' + self.esn + '"')
return netflix_page_data