Added logging to almost everything here
This commit is contained in:
@@ -16,10 +16,7 @@ class song(object):
|
||||
self.download_url = ""
|
||||
|
||||
def format_track(self):
|
||||
if self.size != 0:
|
||||
return "{0}. {1}. {2}".format(self.title, self.duration, self.size)
|
||||
else:
|
||||
return "{0} {1}".format(self.title, self.duration)
|
||||
return self.extractor.format_track(self)
|
||||
|
||||
def get_download_url(self):
|
||||
self.download_url = self.extractor.get_download_url(self.url)
|
@@ -7,18 +7,23 @@ except ImportError:
|
||||
import urllib as urlparse
|
||||
import requests
|
||||
import youtube_dl
|
||||
import logging
|
||||
from bs4 import BeautifulSoup
|
||||
from . import baseFile
|
||||
|
||||
log = logging.getLogger("extractors.mail.ru")
|
||||
|
||||
class interface(object):
|
||||
|
||||
def __init__(self):
|
||||
self.results = []
|
||||
self.name = "mailru"
|
||||
self.needs_transcode = False
|
||||
log.debug("Started extraction service for mail.ru music")
|
||||
|
||||
def search(self, text, page=1):
|
||||
site = 'https://my.mail.ru/music/search/%s' % (text)
|
||||
log.debug("Retrieving data from {0}...".format(site,))
|
||||
r = requests.get(site)
|
||||
soup = BeautifulSoup(r.text, 'html.parser')
|
||||
search_results = soup.find_all("div", {"class": "songs-table__row__col songs-table__row__col--title title songs-table__row__col--title-hq-similar resize"})
|
||||
@@ -31,13 +36,19 @@ class interface(object):
|
||||
# print(data)
|
||||
s.url = u"https://my.mail.ru"+urlparse.quote(data[0].__dict__["attrs"]["href"])
|
||||
self.results.append(s)
|
||||
log.debug("{0} results found.".format(len(self.results)))
|
||||
|
||||
def get_download_url(self, url):
|
||||
ydl = youtube_dl.YoutubeDL({'quiet': True, 'format': 'bestaudio/best', 'outtmpl': u'%(id)s%(ext)s'})
|
||||
log.debug("Getting download URL for {0}".format(url,))
|
||||
ydl = youtube_dl.YoutubeDL({'quiet': True, 'no_warnings': True, 'logger': log, 'format': 'bestaudio/best', 'outtmpl': u'%(id)s%(ext)s'})
|
||||
with ydl:
|
||||
result = ydl.extract_info(url, download=False)
|
||||
if 'entries' in result:
|
||||
video = result['entries'][0]
|
||||
else:
|
||||
video = result
|
||||
return video["url"]
|
||||
log.debug("Download URL: {0}".format(video["url"],))
|
||||
return video["url"]
|
||||
|
||||
def format_track(self, item):
|
||||
return item.title
|
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals # at top of module
|
||||
import requests
|
||||
import logging
|
||||
try:
|
||||
import urllib.parse as urlparse
|
||||
except ImportError:
|
||||
@@ -9,6 +10,7 @@ from .import baseFile
|
||||
from update.utils import seconds_to_string
|
||||
|
||||
api_endpoint = "https://api-2.datmusic.xyz"
|
||||
log = logging.getLogger("extractors.vk.com")
|
||||
|
||||
class interface(object):
|
||||
|
||||
@@ -16,19 +18,27 @@ class interface(object):
|
||||
self.results = []
|
||||
self.name = "vk"
|
||||
self.needs_transcode = False
|
||||
log.debug("started extraction service for {0}".format(self.name,))
|
||||
|
||||
def search(self, text, page=1):
|
||||
self.results = []
|
||||
url = "{0}/search?q={1}".format(api_endpoint, text)
|
||||
log.debug("Retrieving data from {0}...".format(url,))
|
||||
search_results = requests.get(url)
|
||||
search_results = search_results.json()["data"]
|
||||
for i in search_results:
|
||||
s = baseFile.song(self)
|
||||
s.title = i["title"]
|
||||
s.artist = i["artist"]
|
||||
# URRL is not needed here as download_url is already provided. So let's skip that part.
|
||||
s.duration = seconds_to_string(i["duration"])
|
||||
s.download_url = i["stream"]
|
||||
self.results.append(s)
|
||||
log.debug("{0} results found.".format(len(self.results)))
|
||||
|
||||
def get_download_url(self, url):
|
||||
return None
|
||||
log.debug("This function has been called but does not apply to this module.")
|
||||
return None
|
||||
|
||||
def format_track(self, item):
|
||||
return "{0}. {1}".format(item.artist, item.title)
|
@@ -2,6 +2,7 @@
|
||||
from __future__ import unicode_literals # at top of module
|
||||
import isodate
|
||||
import youtube_dl
|
||||
import logging
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
from .import baseFile
|
||||
@@ -11,16 +12,20 @@ DEVELOPER_KEY = "AIzaSyCU_hvZJEjLlAGAnlscquKEkE8l0lVOfn0"
|
||||
YOUTUBE_API_SERVICE_NAME = "youtube"
|
||||
YOUTUBE_API_VERSION = "v3"
|
||||
|
||||
log = logging.getLogger("extractors.youtube.com")
|
||||
|
||||
class interface(object):
|
||||
|
||||
def __init__(self):
|
||||
self.results = []
|
||||
self.name = "youtube"
|
||||
self.needs_transcode = True
|
||||
log.debug("started extraction service for {0}".format(self.name,))
|
||||
|
||||
def search(self, text, page=1):
|
||||
type = "video"
|
||||
max_results = 20
|
||||
log.debug("Retrieving data from Youtube...")
|
||||
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)
|
||||
search_response = youtube.search().list(q=text, part="id,snippet", maxResults=max_results, type=type).execute()
|
||||
self.results = []
|
||||
@@ -35,13 +40,19 @@ class interface(object):
|
||||
ssr = youtube.videos().list(id=",".join(ids), part="contentDetails", maxResults=1).execute()
|
||||
for i in range(len(self.results)):
|
||||
self.results[i].duration = seconds_to_string(isodate.parse_duration(ssr["items"][i]["contentDetails"]["duration"]).total_seconds())
|
||||
log.debug("{0} results found.".format(len(self.results)))
|
||||
|
||||
def get_download_url(self, url):
|
||||
ydl = youtube_dl.YoutubeDL({'quiet': True, 'format': 'bestaudio/best', 'outtmpl': u'%(id)s%(ext)s'})
|
||||
log.debug("Getting download URL for {0}".format(url,))
|
||||
ydl = youtube_dl.YoutubeDL({'quiet': True, 'no_warnings': True, 'logger': log, 'format': 'bestaudio/best', 'outtmpl': u'%(id)s%(ext)s'})
|
||||
with ydl:
|
||||
result = ydl.extract_info(url, download=False)
|
||||
if 'entries' in result:
|
||||
video = result['entries'][0]
|
||||
else:
|
||||
video = result
|
||||
log.debug("Download URL: {0}".format(video["url"],))
|
||||
return video["url"]
|
||||
|
||||
def format_track(self, item):
|
||||
return "{0} {1}".format(item.title, item.duration)
|
@@ -4,18 +4,23 @@ from __future__ import unicode_literals # at top of module
|
||||
import re
|
||||
import json
|
||||
import requests
|
||||
import logging
|
||||
from bs4 import BeautifulSoup
|
||||
from . import baseFile
|
||||
|
||||
log = logging.getLogger("extractors.zaycev.net")
|
||||
|
||||
class interface(object):
|
||||
|
||||
def __init__(self):
|
||||
self.results = []
|
||||
self.name = "zaycev"
|
||||
self.needs_transcode = False
|
||||
log.debug("Started extraction service for zaycev.net")
|
||||
|
||||
def search(self, text, page=1):
|
||||
site = 'http://go.mail.ru/zaycev?q=%s&page=%s' % (text, page)
|
||||
log.debug("Retrieving data from {0}...".format(site,))
|
||||
r = requests.get(site)
|
||||
soup = BeautifulSoup(r.text, 'html.parser')
|
||||
D = r'длительность.(\d+\:\d+\:\d+)'
|
||||
@@ -32,8 +37,14 @@ class interface(object):
|
||||
s.size = self.hd[i]["size"]
|
||||
s.bitrate = self.hd[i]["bitrate"]
|
||||
self.results.append(s)
|
||||
log.debug("{0} results found.".format(len(self.results)))
|
||||
|
||||
def get_download_url(self, url):
|
||||
log.debug("Getting download URL for {0}".format(url,))
|
||||
soups = BeautifulSoup(requests.get(url).text, 'html.parser')
|
||||
data = json.loads(requests.get('http://zaycev.net' + soups.find('div', {'class':"musicset-track"}).get('data-url')).text)
|
||||
return data["url"]
|
||||
log.debug("Download URL: {0}".format(data["url"]))
|
||||
return data["url"]
|
||||
|
||||
def format_track(self, item):
|
||||
return "{0}. {1}. {2}".format(item.title, item.duration, item.size)
|
Reference in New Issue
Block a user