repo_name
stringclasses
4 values
method_name
stringlengths
3
72
method_code
stringlengths
87
3.59k
method_summary
stringlengths
12
196
original_method_code
stringlengths
129
8.98k
method_path
stringlengths
15
136
Azure/azure-sdk-for-python
WebsiteManagementService.restart_site
def restart_site(self, webspace_name, website_name): return self._perform_post( self._get_restart_path(webspace_name, website_name), None, as_async=True)
Restart a web site.
def restart_site(self, webspace_name, website_name): ''' Restart a web site. webspace_name: The name of the webspace. website_name: The name of the website. ''' return self._perform_post( self._get_restart_path(webspace_name, website_name), None, as_async=True)
azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
Azure/azure-sdk-for-python
WebsiteManagementService.get_historical_usage_metrics
def get_historical_usage_metrics(self, webspace_name, website_name, metrics = None, start_time=None, end_time=None, time_grain=None): metrics = ('names='+','.join(metrics)) if metrics else '' start_time = ('StartTime='+start_time) if start_time else '' end_time = ('EndTime='+end_time) if end_time else '' time_grain = ('TimeGrain='+time_grain) if time_grain else '' parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v)) parameters = '?'+parameters if parameters else '' return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters, MetricResponses)
Get historical usage metrics.
def get_historical_usage_metrics(self, webspace_name, website_name, metrics = None, start_time=None, end_time=None, time_grain=None): ''' Get historical usage metrics. webspace_name: The name of the webspace. website_name: The name of the website. metrics: Optional. List of metrics name. Otherwise, all metrics returned. start_time: Optional. An ISO8601 date. Otherwise, current hour is used. end_time: Optional. An ISO8601 date. Otherwise, current time is used. time_grain: Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used. More information and metrics name at: http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx ''' metrics = ('names='+','.join(metrics)) if metrics else '' start_time = ('StartTime='+start_time) if start_time else '' end_time = ('EndTime='+end_time) if end_time else '' time_grain = ('TimeGrain='+time_grain) if time_grain else '' parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v)) parameters = '?'+parameters if parameters else '' return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters, MetricResponses)
azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
Azure/azure-sdk-for-python
WebsiteManagementService.get_metric_definitions
def get_metric_definitions(self, webspace_name, website_name): return self._perform_get(self._get_metric_definitions_path(webspace_name, website_name), MetricDefinitions)
Get metric definitions of metrics available of this web site.
def get_metric_definitions(self, webspace_name, website_name): ''' Get metric definitions of metrics available of this web site. webspace_name: The name of the webspace. website_name: The name of the website. ''' return self._perform_get(self._get_metric_definitions_path(webspace_name, website_name), MetricDefinitions)
azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
Azure/azure-sdk-for-python
WebsiteManagementService.get_publish_profile_xml
def get_publish_profile_xml(self, webspace_name, website_name): return self._perform_get(self._get_publishxml_path(webspace_name, website_name), None).body.decode("utf-8")
Get a site's publish profile as a string
def get_publish_profile_xml(self, webspace_name, website_name): ''' Get a site's publish profile as a string webspace_name: The name of the webspace. website_name: The name of the website. ''' return self._perform_get(self._get_publishxml_path(webspace_name, website_name), None).body.decode("utf-8")
azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
Azure/azure-sdk-for-python
WebsiteManagementService.get_publish_profile
def get_publish_profile(self, webspace_name, website_name): return self._perform_get(self._get_publishxml_path(webspace_name, website_name), PublishData)
Get a site's publish profile as an object
def get_publish_profile(self, webspace_name, website_name): ''' Get a site's publish profile as an object webspace_name: The name of the webspace. website_name: The name of the website. ''' return self._perform_get(self._get_publishxml_path(webspace_name, website_name), PublishData)
azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
Azure/azure-sdk-for-python
RegistriesOperations.update_policies
def update_policies( self, resource_group_name, registry_name, quarantine_policy=None, trust_policy=None, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._update_policies_initial( resource_group_name=resource_group_name, registry_name=registry_name, quarantine_policy=quarantine_policy, trust_policy=trust_policy, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('RegistryPolicies', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Updates the policies for the specified container registry.
def update_policies( self, resource_group_name, registry_name, quarantine_policy=None, trust_policy=None, custom_headers=None, raw=False, polling=True, **operation_config): """Updates the policies for the specified container registry. :param resource_group_name: The name of the resource group to which the container registry belongs. :type resource_group_name: str :param registry_name: The name of the container registry. :type registry_name: str :param quarantine_policy: An object that represents quarantine policy for a container registry. :type quarantine_policy: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.QuarantinePolicy :param trust_policy: An object that represents content trust policy for a container registry. :type trust_policy: ~azure.mgmt.containerregistry.v2018_02_01_preview.models.TrustPolicy :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns RegistryPolicies or ClientRawResponse<RegistryPolicies> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.containerregistry.v2018_02_01_preview.models.RegistryPolicies] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.containerregistry.v2018_02_01_preview.models.RegistryPolicies]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._update_policies_initial( resource_group_name=resource_group_name, registry_name=registry_name, quarantine_policy=quarantine_policy, trust_policy=trust_policy, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('RegistryPolicies', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_02_01_preview/operations/registries_operations.py
Azure/azure-sdk-for-python
SchedulerManagementService.check_job_collection_name
def check_job_collection_name(self, cloud_service_id, job_collection_id): _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) path = self._get_cloud_services_path( cloud_service_id, "scheduler", "jobCollections") path += "?op=checknameavailability&resourceName=" + job_collection_id return self._perform_post(path, None, AvailabilityResponse)
The Check Name Availability operation checks if a new job collection with the given name may be created, or if it is unavailable. The result of the operation is a Boolean true or false.
def check_job_collection_name(self, cloud_service_id, job_collection_id): ''' The Check Name Availability operation checks if a new job collection with the given name may be created, or if it is unavailable. The result of the operation is a Boolean true or false. cloud_service_id: The cloud service id job_collection_id: The name of the job_collection_id. ''' _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) path = self._get_cloud_services_path( cloud_service_id, "scheduler", "jobCollections") path += "?op=checknameavailability&resourceName=" + job_collection_id return self._perform_post(path, None, AvailabilityResponse)
azure-servicemanagement-legacy/azure/servicemanagement/schedulermanagementservice.py
Azure/azure-sdk-for-python
SchedulerManagementService.get_job_collection
def get_job_collection(self, cloud_service_id, job_collection_id): _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) path = self._get_job_collection_path( cloud_service_id, job_collection_id) return self._perform_get(path, Resource)
The Get Job Collection operation gets the details of a job collection
def get_job_collection(self, cloud_service_id, job_collection_id): ''' The Get Job Collection operation gets the details of a job collection cloud_service_id: The cloud service id job_collection_id: Name of the hosted service. ''' _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) path = self._get_job_collection_path( cloud_service_id, job_collection_id) return self._perform_get(path, Resource)
azure-servicemanagement-legacy/azure/servicemanagement/schedulermanagementservice.py
Azure/azure-sdk-for-python
ManagedDatabasesOperations.complete_restore
def complete_restore( self, location_name, operation_id, last_backup_name, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._complete_restore_initial( location_name=location_name, operation_id=operation_id, last_backup_name=last_backup_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Completes the restore operation on a managed database.
def complete_restore( self, location_name, operation_id, last_backup_name, custom_headers=None, raw=False, polling=True, **operation_config): """Completes the restore operation on a managed database. :param location_name: The name of the region where the resource is located. :type location_name: str :param operation_id: Management operation id that this request tries to complete. :type operation_id: str :param last_backup_name: The last backup name to apply :type last_backup_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._complete_restore_initial( location_name=location_name, operation_id=operation_id, last_backup_name=last_backup_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
azure-mgmt-sql/azure/mgmt/sql/operations/managed_databases_operations.py
Azure/azure-sdk-for-python
Sender.cancel_scheduled_messages
async def cancel_scheduled_messages(self, *sequence_numbers): if not self.running: await self.open() numbers = [types.AMQPLong(s) for s in sequence_numbers] request_body = {'sequence-numbers': types.AMQPArray(numbers)} return await self._mgmt_request_response( REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION, request_body, mgmt_handlers.default)
Cancel one or more messages that have previsouly been scheduled and are still pending.
async def cancel_scheduled_messages(self, *sequence_numbers): """Cancel one or more messages that have previsouly been scheduled and are still pending. :param sequence_numbers: The seqeuence numbers of the scheduled messages. :type sequence_numbers: int Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START cancel_schedule_messages] :end-before: [END cancel_schedule_messages] :language: python :dedent: 4 :caption: Schedule messages. """ if not self.running: await self.open() numbers = [types.AMQPLong(s) for s in sequence_numbers] request_body = {'sequence-numbers': types.AMQPArray(numbers)} return await self._mgmt_request_response( REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION, request_body, mgmt_handlers.default)
azure-servicebus/azure/servicebus/aio/async_send_handler.py
Azure/azure-sdk-for-python
Sender.send_pending_messages
async def send_pending_messages(self): if not self.running: await self.open() try: pending = self._handler._pending_messages[:] await self._handler.wait_async() results = [] for m in pending: if m.state == constants.MessageState.SendFailed: results.append((False, MessageSendFailed(m._response))) else: results.append((True, None)) return results except Exception as e: raise MessageSendFailed(e)
Wait until all pending messages have been sent.
async def send_pending_messages(self): """Wait until all pending messages have been sent. :returns: A list of the send results of all the pending messages. Each send result is a tuple with two values. The first is a boolean, indicating `True` if the message sent, or `False` if it failed. The second is an error if the message failed, otherwise it will be `None`. :rtype: list[tuple[bool, ~azure.servicebus.common.errors.MessageSendFailed]] Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START queue_sender_messages] :end-before: [END queue_sender_messages] :language: python :dedent: 4 :caption: Schedule messages. """ if not self.running: await self.open() try: pending = self._handler._pending_messages[:] # pylint: disable=protected-access await self._handler.wait_async() results = [] for m in pending: if m.state == constants.MessageState.SendFailed: results.append((False, MessageSendFailed(m._response))) # pylint: disable=protected-access else: results.append((True, None)) return results except Exception as e: # pylint: disable=broad-except raise MessageSendFailed(e)
azure-servicebus/azure/servicebus/aio/async_send_handler.py
Azure/azure-sdk-for-python
get_certificate_from_publish_settings
def get_certificate_from_publish_settings(publish_settings_path, path_to_write_certificate, subscription_id=None): import base64 try: from xml.etree import cElementTree as ET except ImportError: from xml.etree import ElementTree as ET try: import OpenSSL.crypto as crypto except: raise Exception("pyopenssl is required to use get_certificate_from_publish_settings") _validate_not_none('publish_settings_path', publish_settings_path) _validate_not_none('path_to_write_certificate', path_to_write_certificate) tree = ET.parse(publish_settings_path) subscriptions = tree.getroot().findall("./PublishProfile/Subscription") if subscription_id: subscription = next((s for s in subscriptions if s.get('Id').lower() == subscription_id.lower()), None) else: subscription = subscriptions[0] if subscription is None: raise ValueError("The provided subscription_id '{}' was not found in the publish settings file provided at '{}'".format(subscription_id, publish_settings_path)) cert_string = _decode_base64_to_bytes(subscription.get('ManagementCertificate')) cert = crypto.load_pkcs12(cert_string, b'') with open(path_to_write_certificate, 'wb') as f: f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert.get_certificate())) f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, cert.get_privatekey())) return subscription.get('Id')
Writes a certificate file to the specified location. This can then be used to instantiate ServiceManagementService.
def get_certificate_from_publish_settings(publish_settings_path, path_to_write_certificate, subscription_id=None): ''' Writes a certificate file to the specified location. This can then be used to instantiate ServiceManagementService. Returns the subscription ID. publish_settings_path: Path to subscription file downloaded from http://go.microsoft.com/fwlink/?LinkID=301775 path_to_write_certificate: Path to write the certificate file. subscription_id: (optional) Provide a subscription id here if you wish to use a specific subscription under the publish settings file. ''' import base64 try: from xml.etree import cElementTree as ET except ImportError: from xml.etree import ElementTree as ET try: import OpenSSL.crypto as crypto except: raise Exception("pyopenssl is required to use get_certificate_from_publish_settings") _validate_not_none('publish_settings_path', publish_settings_path) _validate_not_none('path_to_write_certificate', path_to_write_certificate) # parse the publishsettings file and find the ManagementCertificate Entry tree = ET.parse(publish_settings_path) subscriptions = tree.getroot().findall("./PublishProfile/Subscription") # Default to the first subscription in the file if they don't specify # or get the matching subscription or return none. if subscription_id: subscription = next((s for s in subscriptions if s.get('Id').lower() == subscription_id.lower()), None) else: subscription = subscriptions[0] # validate that subscription was found if subscription is None: raise ValueError("The provided subscription_id '{}' was not found in the publish settings file provided at '{}'".format(subscription_id, publish_settings_path)) cert_string = _decode_base64_to_bytes(subscription.get('ManagementCertificate')) # Load the string in pkcs12 format. Don't provide a password as it isn't encrypted. cert = crypto.load_pkcs12(cert_string, b'') # Write the data out as a PEM format to a random location in temp for use under this run. with open(path_to_write_certificate, 'wb') as f: f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert.get_certificate())) f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, cert.get_privatekey())) return subscription.get('Id')
azure-servicemanagement-legacy/azure/servicemanagement/publishsettings.py
streamlink/streamlink
Plugin.load_cookies
def load_cookies(self): if not self.session or not self.cache: raise RuntimeError("Cannot loaded cached cookies in unbound plugin") restored = [] for key, value in self.cache.get_all().items(): if key.startswith("__cookie"): cookie = requests.cookies.create_cookie(**value) self.session.http.cookies.set_cookie(cookie) restored.append(cookie.name) if restored: self.logger.debug("Restored cookies: {0}".format(", ".join(restored))) return restored
Load any stored cookies for the plugin that have not expired.
def load_cookies(self): """ Load any stored cookies for the plugin that have not expired. :return: list of the restored cookie names """ if not self.session or not self.cache: raise RuntimeError("Cannot loaded cached cookies in unbound plugin") restored = [] for key, value in self.cache.get_all().items(): if key.startswith("__cookie"): cookie = requests.cookies.create_cookie(**value) self.session.http.cookies.set_cookie(cookie) restored.append(cookie.name) if restored: self.logger.debug("Restored cookies: {0}".format(", ".join(restored))) return restored
src/streamlink/plugin/plugin.py
streamlink/streamlink
get_cut_prefix
def get_cut_prefix(value, max_len): should_convert = isinstance(value, bytes) if should_convert: value = value.decode("utf8", "ignore") for i in range(len(value)): if terminal_width(value[i:]) <= max_len: break return value[i:].encode("utf8", "ignore") if should_convert else value[i:]
Drops Characters by unicode not by bytes.
def get_cut_prefix(value, max_len): """Drops Characters by unicode not by bytes.""" should_convert = isinstance(value, bytes) if should_convert: value = value.decode("utf8", "ignore") for i in range(len(value)): if terminal_width(value[i:]) <= max_len: break return value[i:].encode("utf8", "ignore") if should_convert else value[i:]
src/streamlink_cli/utils/progress.py
streamlink/streamlink
print_inplace
def print_inplace(msg): term_width = get_terminal_size().columns spacing = term_width - terminal_width(msg) if is_win32: spacing -= 1 sys.stderr.write("\r{0}".format(msg)) sys.stderr.write(" " * max(0, spacing)) sys.stderr.flush()
Clears out the previous line and prints a new one.
def print_inplace(msg): """Clears out the previous line and prints a new one.""" term_width = get_terminal_size().columns spacing = term_width - terminal_width(msg) # On windows we need one less space or we overflow the line for some reason. if is_win32: spacing -= 1 sys.stderr.write("\r{0}".format(msg)) sys.stderr.write(" " * max(0, spacing)) sys.stderr.flush()
src/streamlink_cli/utils/progress.py
streamlink/streamlink
format_filesize
def format_filesize(size): for suffix in ("bytes", "KB", "MB", "GB", "TB"): if size < 1024.0: if suffix in ("GB", "TB"): return "{0:3.2f} {1}".format(size, suffix) else: return "{0:3.1f} {1}".format(size, suffix) size /= 1024.0
Formats the file size into a human readable format.
def format_filesize(size): """Formats the file size into a human readable format.""" for suffix in ("bytes", "KB", "MB", "GB", "TB"): if size < 1024.0: if suffix in ("GB", "TB"): return "{0:3.2f} {1}".format(size, suffix) else: return "{0:3.1f} {1}".format(size, suffix) size /= 1024.0
src/streamlink_cli/utils/progress.py
streamlink/streamlink
format_time
def format_time(elapsed): hours = int(elapsed / (60 * 60)) minutes = int((elapsed % (60 * 60)) / 60) seconds = int(elapsed % 60) rval = "" if hours: rval += "{0}h".format(hours) if elapsed > 60: rval += "{0}m".format(minutes) rval += "{0}s".format(seconds) return rval
Formats elapsed seconds into a human readable format.
def format_time(elapsed): """Formats elapsed seconds into a human readable format.""" hours = int(elapsed / (60 * 60)) minutes = int((elapsed % (60 * 60)) / 60) seconds = int(elapsed % 60) rval = "" if hours: rval += "{0}h".format(hours) if elapsed > 60: rval += "{0}m".format(minutes) rval += "{0}s".format(seconds) return rval
src/streamlink_cli/utils/progress.py
streamlink/streamlink
create_status_line
def create_status_line(**params): max_size = get_terminal_size().columns - 1 for fmt in PROGRESS_FORMATS: status = fmt.format(**params) if len(status) <= max_size: break return status
Creates a status line with appropriate size.
def create_status_line(**params): """Creates a status line with appropriate size.""" max_size = get_terminal_size().columns - 1 for fmt in PROGRESS_FORMATS: status = fmt.format(**params) if len(status) <= max_size: break return status
src/streamlink_cli/utils/progress.py
streamlink/streamlink
progress
def progress(iterator, prefix): if terminal_width(prefix) > 25: prefix = (".." + get_cut_prefix(prefix, 23)) speed_updated = start = time() speed_written = written = 0 speed_history = deque(maxlen=5) for data in iterator: yield data now = time() elapsed = now - start written += len(data) speed_elapsed = now - speed_updated if speed_elapsed >= 0.5: speed_history.appendleft(( written - speed_written, speed_updated, )) speed_updated = now speed_written = written speed_history_written = sum(h[0] for h in speed_history) speed_history_elapsed = now - speed_history[-1][1] speed = speed_history_written / speed_history_elapsed status = create_status_line( prefix=prefix, written=format_filesize(written), elapsed=format_time(elapsed), speed=format_filesize(speed) ) print_inplace(status) sys.stderr.write("\n") sys.stderr.flush()
Progress an iterator and updates a pretty status line to the terminal. The status line
def progress(iterator, prefix): """Progress an iterator and updates a pretty status line to the terminal. The status line contains: - Amount of data read from the iterator - Time elapsed - Average speed, based on the last few seconds. """ if terminal_width(prefix) > 25: prefix = (".." + get_cut_prefix(prefix, 23)) speed_updated = start = time() speed_written = written = 0 speed_history = deque(maxlen=5) for data in iterator: yield data now = time() elapsed = now - start written += len(data) speed_elapsed = now - speed_updated if speed_elapsed >= 0.5: speed_history.appendleft(( written - speed_written, speed_updated, )) speed_updated = now speed_written = written speed_history_written = sum(h[0] for h in speed_history) speed_history_elapsed = now - speed_history[-1][1] speed = speed_history_written / speed_history_elapsed status = create_status_line( prefix=prefix, written=format_filesize(written), elapsed=format_time(elapsed), speed=format_filesize(speed) ) print_inplace(status) sys.stderr.write("\n") sys.stderr.flush()
src/streamlink_cli/utils/progress.py
streamlink/streamlink
SegmentedStreamWorker.wait
def wait(self, time): self._wait = Event() return not self._wait.wait(time)
Pauses the thread for a specified time.
def wait(self, time): """Pauses the thread for a specified time. Returns False if interrupted by another thread and True if the time runs out normally. """ self._wait = Event() return not self._wait.wait(time)
src/streamlink/stream/segmented.py
streamlink/streamlink
SegmentedStreamWriter.put
def put(self, segment): if self.closed: return if segment is not None: future = self.executor.submit(self.fetch, segment, retries=self.retries) else: future = None self.queue(self.futures, (segment, future))
Adds a segment to the download pool and write queue.
def put(self, segment): """Adds a segment to the download pool and write queue.""" if self.closed: return if segment is not None: future = self.executor.submit(self.fetch, segment, retries=self.retries) else: future = None self.queue(self.futures, (segment, future))
src/streamlink/stream/segmented.py
streamlink/streamlink
SegmentedStreamWriter.queue
def queue(self, queue_, value): while not self.closed: try: queue_.put(value, block=True, timeout=1) return except queue.Full: continue
Puts a value into a queue but aborts if this thread is closed.
def queue(self, queue_, value): """Puts a value into a queue but aborts if this thread is closed.""" while not self.closed: try: queue_.put(value, block=True, timeout=1) return except queue.Full: continue
src/streamlink/stream/segmented.py
streamlink/streamlink
BBCiPlayer.find_vpid
def find_vpid(self, url, res=None): log.debug("Looking for vpid on {0}", url) res = res or self.session.http.get(url) m = self.mediator_re.search(res.text) vpid = m and parse_json(m.group(1), schema=self.mediator_schema) return vpid
Find the Video Packet ID in the HTML for the provided URL
def find_vpid(self, url, res=None): """ Find the Video Packet ID in the HTML for the provided URL :param url: URL to download, if res is not provided. :param res: Provide a cached version of the HTTP response to search :type url: string :type res: requests.Response :return: Video Packet ID for a Programme in iPlayer :rtype: string """ log.debug("Looking for vpid on {0}", url) # Use pre-fetched page if available res = res or self.session.http.get(url) m = self.mediator_re.search(res.text) vpid = m and parse_json(m.group(1), schema=self.mediator_schema) return vpid
src/streamlink/plugins/bbciplayer.py
streamlink/streamlink
parse_json
def parse_json(data, name="JSON", exception=PluginError, schema=None): try: json_data = json.loads(data) except ValueError as err: snippet = repr(data) if len(snippet) > 35: snippet = snippet[:35] + " ..." else: snippet = data raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet)) if schema: json_data = schema.validate(json_data, name=name, exception=exception) return json_data
Wrapper around json.loads. Wraps errors in custom exception with a snippet of the data in the message.
def parse_json(data, name="JSON", exception=PluginError, schema=None): """Wrapper around json.loads. Wraps errors in custom exception with a snippet of the data in the message. """ try: json_data = json.loads(data) except ValueError as err: snippet = repr(data) if len(snippet) > 35: snippet = snippet[:35] + " ..." else: snippet = data raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet)) if schema: json_data = schema.validate(json_data, name=name, exception=exception) return json_data
src/streamlink/utils/__init__.py
streamlink/streamlink
parse_xml
def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=None, invalid_char_entities=False): if is_py2 and isinstance(data, unicode): data = data.encode("utf8") elif is_py3 and isinstance(data, str): data = bytearray(data, "utf8") if ignore_ns: data = re.sub(br"[\t ]xmlns=\"(.+?)\"", b"", data) if invalid_char_entities: data = re.sub(br'&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)', b'&amp;', data) try: tree = ET.fromstring(data) except Exception as err: snippet = repr(data) if len(snippet) > 35: snippet = snippet[:35] + " ..." raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet)) if schema: tree = schema.validate(tree, name=name, exception=exception) return tree
Wrapper around ElementTree.fromstring with some extras. Provides these extra
def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=None, invalid_char_entities=False): """Wrapper around ElementTree.fromstring with some extras. Provides these extra features: - Handles incorrectly encoded XML - Allows stripping namespace information - Wraps errors in custom exception with a snippet of the data in the message """ if is_py2 and isinstance(data, unicode): data = data.encode("utf8") elif is_py3 and isinstance(data, str): data = bytearray(data, "utf8") if ignore_ns: data = re.sub(br"[\t ]xmlns=\"(.+?)\"", b"", data) if invalid_char_entities: data = re.sub(br'&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)', b'&amp;', data) try: tree = ET.fromstring(data) except Exception as err: snippet = repr(data) if len(snippet) > 35: snippet = snippet[:35] + " ..." raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet)) if schema: tree = schema.validate(tree, name=name, exception=exception) return tree
src/streamlink/utils/__init__.py
streamlink/streamlink
parse_qsd
def parse_qsd(data, name="query string", exception=PluginError, schema=None, **params): value = dict(parse_qsl(data, **params)) if schema: value = schema.validate(value, name=name, exception=exception) return value
Parses a query string into a dict. Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value.
def parse_qsd(data, name="query string", exception=PluginError, schema=None, **params): """Parses a query string into a dict. Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value. """ value = dict(parse_qsl(data, **params)) if schema: value = schema.validate(value, name=name, exception=exception) return value
src/streamlink/utils/__init__.py
streamlink/streamlink
search_dict
def search_dict(data, key): if isinstance(data, dict): for dkey, value in data.items(): if dkey == key: yield value for result in search_dict(value, key): yield result elif isinstance(data, list): for value in data: for result in search_dict(value, key): yield result
Search for a key in a nested dict, or list of nested dicts, and return the values.
def search_dict(data, key): """ Search for a key in a nested dict, or list of nested dicts, and return the values. :param data: dict/list to search :param key: key to find :return: matches for key """ if isinstance(data, dict): for dkey, value in data.items(): if dkey == key: yield value for result in search_dict(value, key): yield result elif isinstance(data, list): for value in data: for result in search_dict(value, key): yield result
src/streamlink/utils/__init__.py
streamlink/streamlink
DASHStream.parse_manifest
def parse_manifest(cls, session, url_or_manifest, **args): ret = {} if url_or_manifest.startswith('<?xml'): mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True)) else: res = session.http.get(url_or_manifest, **args) url = res.url urlp = list(urlparse(url)) urlp[2], _ = urlp[2].rsplit("/", 1) mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url) video, audio = [], [] for aset in mpd.periods[0].adaptationSets: if aset.contentProtection: raise PluginError("{} is protected by DRM".format(url)) for rep in aset.representations: if rep.mimeType.startswith("video"): video.append(rep) elif rep.mimeType.startswith("audio"): audio.append(rep) if not video: video = [None] if not audio: audio = [None] locale = session.localization locale_lang = locale.language lang = None available_languages = set() for aud in audio: if aud and aud.lang: available_languages.add(aud.lang) try: if locale.explicit and aud.lang and Language.get(aud.lang) == locale_lang: lang = aud.lang except LookupError: continue if not lang: lang = audio[0] and audio[0].lang log.debug("Available languages for DASH audio streams: {0} (using: {1})".format(", ".join(available_languages) or "NONE", lang or "n/a")) if len(available_languages) > 1: audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio)) for vid, aud in itertools.product(video, audio): stream = DASHStream(session, mpd, vid, aud, **args) stream_name = [] if vid: stream_name.append("{:0.0f}{}".format(vid.height or vid.bandwidth_rounded, "p" if vid.height else "k")) if audio and len(audio) > 1: stream_name.append("a{:0.0f}k".format(aud.bandwidth)) ret['+'.join(stream_name)] = stream return ret
Attempt to parse a DASH manifest file and return its streams
def parse_manifest(cls, session, url_or_manifest, **args): """ Attempt to parse a DASH manifest file and return its streams :param session: Streamlink session instance :param url_or_manifest: URL of the manifest file or an XML manifest string :return: a dict of name -> DASHStream instances """ ret = {} if url_or_manifest.startswith('<?xml'): mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True)) else: res = session.http.get(url_or_manifest, **args) url = res.url urlp = list(urlparse(url)) urlp[2], _ = urlp[2].rsplit("/", 1) mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url) video, audio = [], [] # Search for suitable video and audio representations for aset in mpd.periods[0].adaptationSets: if aset.contentProtection: raise PluginError("{} is protected by DRM".format(url)) for rep in aset.representations: if rep.mimeType.startswith("video"): video.append(rep) elif rep.mimeType.startswith("audio"): audio.append(rep) if not video: video = [None] if not audio: audio = [None] locale = session.localization locale_lang = locale.language lang = None available_languages = set() # if the locale is explicitly set, prefer that language over others for aud in audio: if aud and aud.lang: available_languages.add(aud.lang) try: if locale.explicit and aud.lang and Language.get(aud.lang) == locale_lang: lang = aud.lang except LookupError: continue if not lang: # filter by the first language that appears lang = audio[0] and audio[0].lang log.debug("Available languages for DASH audio streams: {0} (using: {1})".format(", ".join(available_languages) or "NONE", lang or "n/a")) # if the language is given by the stream, filter out other languages that do not match if len(available_languages) > 1: audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio)) for vid, aud in itertools.product(video, audio): stream = DASHStream(session, mpd, vid, aud, **args) stream_name = [] if vid: stream_name.append("{:0.0f}{}".format(vid.height or vid.bandwidth_rounded, "p" if vid.height else "k")) if audio and len(audio) > 1: stream_name.append("a{:0.0f}k".format(aud.bandwidth)) ret['+'.join(stream_name)] = stream return ret
src/streamlink/stream/dash.py
streamlink/streamlink
HTTPSession.determine_json_encoding
def determine_json_encoding(cls, sample): nulls_at = [i for i, j in enumerate(bytearray(sample[:4])) if j == 0] if nulls_at == [0, 1, 2]: return "UTF-32BE" elif nulls_at == [0, 2]: return "UTF-16BE" elif nulls_at == [1, 2, 3]: return "UTF-32LE" elif nulls_at == [1, 3]: return "UTF-16LE" else: return "UTF-8"
Determine which Unicode encoding the JSON text sample is encoded with RFC4627 (
def determine_json_encoding(cls, sample): """ Determine which Unicode encoding the JSON text sample is encoded with RFC4627 (http://www.ietf.org/rfc/rfc4627.txt) suggests that the encoding of JSON text can be determined by checking the pattern of NULL bytes in first 4 octets of the text. :param sample: a sample of at least 4 bytes of the JSON text :return: the most likely encoding of the JSON text """ nulls_at = [i for i, j in enumerate(bytearray(sample[:4])) if j == 0] if nulls_at == [0, 1, 2]: return "UTF-32BE" elif nulls_at == [0, 2]: return "UTF-16BE" elif nulls_at == [1, 2, 3]: return "UTF-32LE" elif nulls_at == [1, 3]: return "UTF-16LE" else: return "UTF-8"
src/streamlink/plugin/api/http_session.py
streamlink/streamlink
HTTPSession.json
def json(cls, res, *args, **kwargs): if res.encoding is None: res.encoding = cls.determine_json_encoding(res.content[:4]) return parse_json(res.text, *args, **kwargs)
Parses JSON from a response.
def json(cls, res, *args, **kwargs): """Parses JSON from a response.""" # if an encoding is already set then use the provided encoding if res.encoding is None: res.encoding = cls.determine_json_encoding(res.content[:4]) return parse_json(res.text, *args, **kwargs)
src/streamlink/plugin/api/http_session.py
streamlink/streamlink
HTTPSession.xml
def xml(cls, res, *args, **kwargs): return parse_xml(res.text, *args, **kwargs)
Parses XML from a response.
def xml(cls, res, *args, **kwargs): """Parses XML from a response.""" return parse_xml(res.text, *args, **kwargs)
src/streamlink/plugin/api/http_session.py
streamlink/streamlink
HTTPSession.parse_cookies
def parse_cookies(self, cookies, **kwargs): for name, value in _parse_keyvalue_list(cookies): self.cookies.set(name, value, **kwargs)
Parses a semi-colon delimited list of cookies.
def parse_cookies(self, cookies, **kwargs): """Parses a semi-colon delimited list of cookies. Example: foo=bar;baz=qux """ for name, value in _parse_keyvalue_list(cookies): self.cookies.set(name, value, **kwargs)
src/streamlink/plugin/api/http_session.py
streamlink/streamlink
HTTPSession.parse_headers
def parse_headers(self, headers): for name, value in _parse_keyvalue_list(headers): self.headers[name] = value
Parses a semi-colon delimited list of headers.
def parse_headers(self, headers): """Parses a semi-colon delimited list of headers. Example: foo=bar;baz=qux """ for name, value in _parse_keyvalue_list(headers): self.headers[name] = value
src/streamlink/plugin/api/http_session.py
streamlink/streamlink
HTTPSession.parse_query_params
def parse_query_params(self, cookies, **kwargs): for name, value in _parse_keyvalue_list(cookies): self.params[name] = value
Parses a semi-colon delimited list of query parameters.
def parse_query_params(self, cookies, **kwargs): """Parses a semi-colon delimited list of query parameters. Example: foo=bar;baz=qux """ for name, value in _parse_keyvalue_list(cookies): self.params[name] = value
src/streamlink/plugin/api/http_session.py
streamlink/streamlink
_LogRecord.getMessage
def getMessage(self): msg = self.msg if self.args: msg = msg.format(*self.args) return maybe_encode(msg)
Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message.
def getMessage(self): """ Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message. """ msg = self.msg if self.args: msg = msg.format(*self.args) return maybe_encode(msg)
src/streamlink/logger.py
streamlink/streamlink
StreamlinkLogger.makeRecord
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): if name.startswith("streamlink"): rv = _LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) else: rv = _CompatLogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv
A factory method which can be overridden in subclasses to create specialized LogRecords.
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): """ A factory method which can be overridden in subclasses to create specialized LogRecords. """ if name.startswith("streamlink"): rv = _LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) else: rv = _CompatLogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv
src/streamlink/logger.py
streamlink/streamlink
LiveEdu.login
def login(self): email = self.get_option("email") password = self.get_option("password") if email and password: res = self.session.http.get(self.login_url) csrf_match = self.csrf_re.search(res.text) token = csrf_match and csrf_match.group(1) self.logger.debug("Attempting login as {0} (token={1})", email, token) res = self.session.http.post(self.login_url, data=dict(login=email, password=password, csrfmiddlewaretoken=token), allow_redirects=False, raise_for_status=False, headers={"Referer": self.login_url}) if res.status_code != 302: self.logger.error("Failed to login to LiveEdu account: {0}", email)
Attempt a login to LiveEdu.tv
def login(self): """ Attempt a login to LiveEdu.tv """ email = self.get_option("email") password = self.get_option("password") if email and password: res = self.session.http.get(self.login_url) csrf_match = self.csrf_re.search(res.text) token = csrf_match and csrf_match.group(1) self.logger.debug("Attempting login as {0} (token={1})", email, token) res = self.session.http.post(self.login_url, data=dict(login=email, password=password, csrfmiddlewaretoken=token), allow_redirects=False, raise_for_status=False, headers={"Referer": self.login_url}) if res.status_code != 302: self.logger.error("Failed to login to LiveEdu account: {0}", email)
src/streamlink/plugins/liveedu.py
streamlink/streamlink
update_qsd
def update_qsd(url, qsd=None, remove=None): qsd = qsd or {} remove = remove or [] parsed = urlparse(url) current_qsd = OrderedDict(parse_qsl(parsed.query)) if remove == "*": remove = list(current_qsd.keys()) for key in remove: if key not in qsd: del current_qsd[key] for key, value in qsd.items(): if value: current_qsd[key] = value return parsed._replace(query=urlencode(current_qsd)).geturl()
Update or remove keys from a query string in a URL
def update_qsd(url, qsd=None, remove=None): """ Update or remove keys from a query string in a URL :param url: URL to update :param qsd: dict of keys to update, a None value leaves it unchanged :param remove: list of keys to remove, or "*" to remove all note: updated keys are never removed, even if unchanged :return: updated URL """ qsd = qsd or {} remove = remove or [] # parse current query string parsed = urlparse(url) current_qsd = OrderedDict(parse_qsl(parsed.query)) # * removes all possible keys if remove == "*": remove = list(current_qsd.keys()) # remove keys before updating, but leave updated keys untouched for key in remove: if key not in qsd: del current_qsd[key] # and update the query string for key, value in qsd.items(): if value: current_qsd[key] = value return parsed._replace(query=urlencode(current_qsd)).geturl()
src/streamlink/utils/url.py
streamlink/streamlink
FLVTagConcat.iter_chunks
def iter_chunks(self, fd=None, buf=None, skip_header=None): timestamps = dict(self.timestamps_add) tag_iterator = self.iter_tags(fd=fd, buf=buf, skip_header=skip_header) if not self.flv_header_written: analyzed_tags = self.analyze_tags(tag_iterator) else: analyzed_tags = [] for tag in chain(analyzed_tags, tag_iterator): if not self.flv_header_written: flv_header = Header(has_video=self.has_video, has_audio=self.has_audio) yield flv_header.serialize() self.flv_header_written = True if self.verify_tag(tag): self.adjust_tag_gap(tag) self.adjust_tag_timestamp(tag) if self.duration: norm_timestamp = tag.timestamp / 1000 if norm_timestamp > self.duration: break yield tag.serialize() timestamps[tag.type] = tag.timestamp if not self.flatten_timestamps: self.timestamps_add = timestamps self.tags = []
Reads FLV tags from fd or buf and returns them with adjusted timestamps.
def iter_chunks(self, fd=None, buf=None, skip_header=None): """Reads FLV tags from fd or buf and returns them with adjusted timestamps.""" timestamps = dict(self.timestamps_add) tag_iterator = self.iter_tags(fd=fd, buf=buf, skip_header=skip_header) if not self.flv_header_written: analyzed_tags = self.analyze_tags(tag_iterator) else: analyzed_tags = [] for tag in chain(analyzed_tags, tag_iterator): if not self.flv_header_written: flv_header = Header(has_video=self.has_video, has_audio=self.has_audio) yield flv_header.serialize() self.flv_header_written = True if self.verify_tag(tag): self.adjust_tag_gap(tag) self.adjust_tag_timestamp(tag) if self.duration: norm_timestamp = tag.timestamp / 1000 if norm_timestamp > self.duration: break yield tag.serialize() timestamps[tag.type] = tag.timestamp if not self.flatten_timestamps: self.timestamps_add = timestamps self.tags = []
src/streamlink/stream/flvconcat.py
streamlink/streamlink
Arguments.requires
def requires(self, name): results = set([name]) argument = self.get(name) for reqname in argument.requires: required = self.get(reqname) if not required: raise KeyError("{0} is not a valid argument for this plugin".format(reqname)) if required.name in results: raise RuntimeError("cycle detected in plugin argument config") results.add(required.name) yield required for r in self.requires(required.name): if r.name in results: raise RuntimeError("cycle detected in plugin argument config") results.add(r.name) yield r
Find all the arguments required by name
def requires(self, name): """ Find all the arguments required by name :param name: name of the argument the find the dependencies :return: list of dependant arguments """ results = set([name]) argument = self.get(name) for reqname in argument.requires: required = self.get(reqname) if not required: raise KeyError("{0} is not a valid argument for this plugin".format(reqname)) if required.name in results: raise RuntimeError("cycle detected in plugin argument config") results.add(required.name) yield required for r in self.requires(required.name): if r.name in results: raise RuntimeError("cycle detected in plugin argument config") results.add(r.name) yield r
src/streamlink/options.py
streamlink/streamlink
check_file_output
def check_file_output(filename, force): log.debug("Checking file output") if os.path.isfile(filename) and not force: if sys.stdin.isatty(): answer = console.ask("File {0} already exists! Overwrite it? [y/N] ", filename) if answer.lower() != "y": sys.exit() else: log.error("File {0} already exists, use --force to overwrite it.".format(filename)) sys.exit() return FileOutput(filename)
Checks if file already exists and ask the user if it should be overwritten if it does.
def check_file_output(filename, force): """Checks if file already exists and ask the user if it should be overwritten if it does.""" log.debug("Checking file output") if os.path.isfile(filename) and not force: if sys.stdin.isatty(): answer = console.ask("File {0} already exists! Overwrite it? [y/N] ", filename) if answer.lower() != "y": sys.exit() else: log.error("File {0} already exists, use --force to overwrite it.".format(filename)) sys.exit() return FileOutput(filename)
src/streamlink_cli/main.py
streamlink/streamlink
create_output
def create_output(plugin): if (args.output or args.stdout) and (args.record or args.record_and_pipe): console.exit("Cannot use record options with other file output options.") if args.output: if args.output == "-": out = FileOutput(fd=stdout) else: out = check_file_output(args.output, args.force) elif args.stdout: out = FileOutput(fd=stdout) elif args.record_and_pipe: record = check_file_output(args.record_and_pipe, args.force) out = FileOutput(fd=stdout, record=record) else: http = namedpipe = record = None if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") if args.player_fifo: pipename = "streamlinkpipe-{0}".format(os.getpid()) log.info("Creating pipe {0}", pipename) try: namedpipe = NamedPipe(pipename) except IOError as err: console.exit("Failed to create pipe: {0}", err) elif args.player_http: http = create_http_server() title = create_title(plugin) if args.record: record = check_file_output(args.record, args.force) log.info("Starting player: {0}", args.player) out = PlayerOutput(args.player, args=args.player_args, quiet=not args.verbose_player, kill=not args.player_no_close, namedpipe=namedpipe, http=http, record=record, title=title) return out
Decides where to write the stream. Depending on arguments it can be one of
def create_output(plugin): """Decides where to write the stream. Depending on arguments it can be one of these: - The stdout pipe - A subprocess' stdin pipe - A named pipe that the subprocess reads from - A regular file """ if (args.output or args.stdout) and (args.record or args.record_and_pipe): console.exit("Cannot use record options with other file output options.") if args.output: if args.output == "-": out = FileOutput(fd=stdout) else: out = check_file_output(args.output, args.force) elif args.stdout: out = FileOutput(fd=stdout) elif args.record_and_pipe: record = check_file_output(args.record_and_pipe, args.force) out = FileOutput(fd=stdout, record=record) else: http = namedpipe = record = None if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") if args.player_fifo: pipename = "streamlinkpipe-{0}".format(os.getpid()) log.info("Creating pipe {0}", pipename) try: namedpipe = NamedPipe(pipename) except IOError as err: console.exit("Failed to create pipe: {0}", err) elif args.player_http: http = create_http_server() title = create_title(plugin) if args.record: record = check_file_output(args.record, args.force) log.info("Starting player: {0}", args.player) out = PlayerOutput(args.player, args=args.player_args, quiet=not args.verbose_player, kill=not args.player_no_close, namedpipe=namedpipe, http=http, record=record, title=title) return out
src/streamlink_cli/main.py
streamlink/streamlink
create_http_server
def create_http_server(host=None, port=0): try: http = HTTPServer() http.bind(host=host, port=port) except OSError as err: console.exit("Failed to create HTTP server: {0}", err) return http
Creates a HTTP server listening on a given host and port. If host is empty, listen on all available interfaces, and if port is 0, listen on a random high port.
def create_http_server(host=None, port=0): """Creates a HTTP server listening on a given host and port. If host is empty, listen on all available interfaces, and if port is 0, listen on a random high port. """ try: http = HTTPServer() http.bind(host=host, port=port) except OSError as err: console.exit("Failed to create HTTP server: {0}", err) return http
src/streamlink_cli/main.py
streamlink/streamlink
iter_http_requests
def iter_http_requests(server, player): while not player or player.running: try: yield server.open(timeout=2.5) except OSError: continue
Repeatedly accept HTTP connections on a server. Forever if the serving externally, or while a player is running if it is not empty.
def iter_http_requests(server, player): """Repeatedly accept HTTP connections on a server. Forever if the serving externally, or while a player is running if it is not empty. """ while not player or player.running: try: yield server.open(timeout=2.5) except OSError: continue
src/streamlink_cli/main.py
streamlink/streamlink
output_stream_http
def output_stream_http(plugin, initial_streams, external=False, port=0): global output if not external: if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") title = create_title(plugin) server = create_http_server() player = output = PlayerOutput(args.player, args=args.player_args, filename=server.url, quiet=not args.verbose_player, title=title) try: log.info("Starting player: {0}", args.player) if player: player.open() except OSError as err: console.exit("Failed to start player: {0} ({1})", args.player, err) else: server = create_http_server(host=None, port=port) player = None log.info("Starting server, access with one of:") for url in server.urls: log.info(" " + url) for req in iter_http_requests(server, player): user_agent = req.headers.get("User-Agent") or "unknown player" log.info("Got HTTP request from {0}".format(user_agent)) stream_fd = prebuffer = None while not stream_fd and (not player or player.running): try: streams = initial_streams or fetch_streams(plugin) initial_streams = None for stream_name in (resolve_stream_name(streams, s) for s in args.stream): if stream_name in streams: stream = streams[stream_name] break else: log.info("Stream not available, will re-fetch " "streams in 10 sec") sleep(10) continue except PluginError as err: log.error(u"Unable to fetch new streams: {0}", err) continue try: log.info("Opening stream: {0} ({1})", stream_name, type(stream).shortname()) stream_fd, prebuffer = open_stream(stream) except StreamError as err: log.error("{0}", err) if stream_fd and prebuffer: log.debug("Writing stream to player") read_stream(stream_fd, server, prebuffer) server.close(True) player.close() server.close()
Continuously output the stream over HTTP.
def output_stream_http(plugin, initial_streams, external=False, port=0): """Continuously output the stream over HTTP.""" global output if not external: if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") title = create_title(plugin) server = create_http_server() player = output = PlayerOutput(args.player, args=args.player_args, filename=server.url, quiet=not args.verbose_player, title=title) try: log.info("Starting player: {0}", args.player) if player: player.open() except OSError as err: console.exit("Failed to start player: {0} ({1})", args.player, err) else: server = create_http_server(host=None, port=port) player = None log.info("Starting server, access with one of:") for url in server.urls: log.info(" " + url) for req in iter_http_requests(server, player): user_agent = req.headers.get("User-Agent") or "unknown player" log.info("Got HTTP request from {0}".format(user_agent)) stream_fd = prebuffer = None while not stream_fd and (not player or player.running): try: streams = initial_streams or fetch_streams(plugin) initial_streams = None for stream_name in (resolve_stream_name(streams, s) for s in args.stream): if stream_name in streams: stream = streams[stream_name] break else: log.info("Stream not available, will re-fetch " "streams in 10 sec") sleep(10) continue except PluginError as err: log.error(u"Unable to fetch new streams: {0}", err) continue try: log.info("Opening stream: {0} ({1})", stream_name, type(stream).shortname()) stream_fd, prebuffer = open_stream(stream) except StreamError as err: log.error("{0}", err) if stream_fd and prebuffer: log.debug("Writing stream to player") read_stream(stream_fd, server, prebuffer) server.close(True) player.close() server.close()
src/streamlink_cli/main.py
streamlink/streamlink
output_stream_passthrough
def output_stream_passthrough(plugin, stream): global output title = create_title(plugin) filename = '"{0}"'.format(stream_to_url(stream)) output = PlayerOutput(args.player, args=args.player_args, filename=filename, call=True, quiet=not args.verbose_player, title=title) try: log.info("Starting player: {0}", args.player) output.open() except OSError as err: console.exit("Failed to start player: {0} ({1})", args.player, err) return False return True
Prepares a filename to be passed to the player.
def output_stream_passthrough(plugin, stream): """Prepares a filename to be passed to the player.""" global output title = create_title(plugin) filename = '"{0}"'.format(stream_to_url(stream)) output = PlayerOutput(args.player, args=args.player_args, filename=filename, call=True, quiet=not args.verbose_player, title=title) try: log.info("Starting player: {0}", args.player) output.open() except OSError as err: console.exit("Failed to start player: {0} ({1})", args.player, err) return False return True
src/streamlink_cli/main.py
streamlink/streamlink
open_stream
def open_stream(stream): global stream_fd try: stream_fd = stream.open() except StreamError as err: raise StreamError("Could not open stream: {0}".format(err)) try: log.debug("Pre-buffering 8192 bytes") prebuffer = stream_fd.read(8192) except IOError as err: stream_fd.close() raise StreamError("Failed to read data from stream: {0}".format(err)) if not prebuffer: stream_fd.close() raise StreamError("No data returned from stream") return stream_fd, prebuffer
Opens a stream and reads 8192 bytes from it. This is useful to check if a stream actually has data before opening the output.
def open_stream(stream): """Opens a stream and reads 8192 bytes from it. This is useful to check if a stream actually has data before opening the output. """ global stream_fd # Attempts to open the stream try: stream_fd = stream.open() except StreamError as err: raise StreamError("Could not open stream: {0}".format(err)) # Read 8192 bytes before proceeding to check for errors. # This is to avoid opening the output unnecessarily. try: log.debug("Pre-buffering 8192 bytes") prebuffer = stream_fd.read(8192) except IOError as err: stream_fd.close() raise StreamError("Failed to read data from stream: {0}".format(err)) if not prebuffer: stream_fd.close() raise StreamError("No data returned from stream") return stream_fd, prebuffer
src/streamlink_cli/main.py
streamlink/streamlink
output_stream
def output_stream(plugin, stream): global output success_open = False for i in range(args.retry_open): try: stream_fd, prebuffer = open_stream(stream) success_open = True break except StreamError as err: log.error("Try {0}/{1}: Could not open stream {2} ({3})", i + 1, args.retry_open, stream, err) if not success_open: console.exit("Could not open stream {0}, tried {1} times, exiting", stream, args.retry_open) output = create_output(plugin) try: output.open() except (IOError, OSError) as err: if isinstance(output, PlayerOutput): console.exit("Failed to start player: {0} ({1})", args.player, err) else: console.exit("Failed to open output: {0} ({1})", args.output, err) with closing(output): log.debug("Writing stream to output") read_stream(stream_fd, output, prebuffer) return True
Open stream, create output and finally write the stream to output.
def output_stream(plugin, stream): """Open stream, create output and finally write the stream to output.""" global output success_open = False for i in range(args.retry_open): try: stream_fd, prebuffer = open_stream(stream) success_open = True break except StreamError as err: log.error("Try {0}/{1}: Could not open stream {2} ({3})", i + 1, args.retry_open, stream, err) if not success_open: console.exit("Could not open stream {0}, tried {1} times, exiting", stream, args.retry_open) output = create_output(plugin) try: output.open() except (IOError, OSError) as err: if isinstance(output, PlayerOutput): console.exit("Failed to start player: {0} ({1})", args.player, err) else: console.exit("Failed to open output: {0} ({1})", args.output, err) with closing(output): log.debug("Writing stream to output") read_stream(stream_fd, output, prebuffer) return True
src/streamlink_cli/main.py
streamlink/streamlink
read_stream
def read_stream(stream, output, prebuffer, chunk_size=8192): is_player = isinstance(output, PlayerOutput) is_http = isinstance(output, HTTPServer) is_fifo = is_player and output.namedpipe show_progress = isinstance(output, FileOutput) and output.fd is not stdout and sys.stdout.isatty() show_record_progress = hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and sys.stdout.isatty() stream_iterator = chain( [prebuffer], iter(partial(stream.read, chunk_size), b"") ) if show_progress: stream_iterator = progress(stream_iterator, prefix=os.path.basename(args.output)) elif show_record_progress: stream_iterator = progress(stream_iterator, prefix=os.path.basename(args.record)) try: for data in stream_iterator: if is_win32 and is_fifo: output.player.poll() if output.player.returncode is not None: log.info("Player closed") break try: output.write(data) except IOError as err: if is_player and err.errno in ACCEPTABLE_ERRNO: log.info("Player closed") elif is_http and err.errno in ACCEPTABLE_ERRNO: log.info("HTTP connection closed") else: console.exit("Error when writing to output: {0}, exiting", err) break except IOError as err: console.exit("Error when reading from stream: {0}, exiting", err) finally: stream.close() log.info("Stream ended")
Reads data from stream and then writes it to the output.
def read_stream(stream, output, prebuffer, chunk_size=8192): """Reads data from stream and then writes it to the output.""" is_player = isinstance(output, PlayerOutput) is_http = isinstance(output, HTTPServer) is_fifo = is_player and output.namedpipe show_progress = isinstance(output, FileOutput) and output.fd is not stdout and sys.stdout.isatty() show_record_progress = hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and sys.stdout.isatty() stream_iterator = chain( [prebuffer], iter(partial(stream.read, chunk_size), b"") ) if show_progress: stream_iterator = progress(stream_iterator, prefix=os.path.basename(args.output)) elif show_record_progress: stream_iterator = progress(stream_iterator, prefix=os.path.basename(args.record)) try: for data in stream_iterator: # We need to check if the player process still exists when # using named pipes on Windows since the named pipe is not # automatically closed by the player. if is_win32 and is_fifo: output.player.poll() if output.player.returncode is not None: log.info("Player closed") break try: output.write(data) except IOError as err: if is_player and err.errno in ACCEPTABLE_ERRNO: log.info("Player closed") elif is_http and err.errno in ACCEPTABLE_ERRNO: log.info("HTTP connection closed") else: console.exit("Error when writing to output: {0}, exiting", err) break except IOError as err: console.exit("Error when reading from stream: {0}, exiting", err) finally: stream.close() log.info("Stream ended")
src/streamlink_cli/main.py
streamlink/streamlink
handle_stream
def handle_stream(plugin, streams, stream_name): stream_name = resolve_stream_name(streams, stream_name) stream = streams[stream_name] if args.subprocess_cmdline: if isinstance(stream, StreamProcess): try: cmdline = stream.cmdline() except StreamError as err: console.exit("{0}", err) console.msg("{0}", cmdline) else: console.exit("The stream specified cannot be translated to a command") elif console.json: console.msg_json(stream) elif args.stream_url: try: console.msg("{0}", stream.to_url()) except TypeError: console.exit("The stream specified cannot be translated to a URL") else: alt_streams = list(filter(lambda k: stream_name + "_alt" in k, sorted(streams.keys()))) file_output = args.output or args.stdout for stream_name in [stream_name] + alt_streams: stream = streams[stream_name] stream_type = type(stream).shortname() if stream_type in args.player_passthrough and not file_output: log.info("Opening stream: {0} ({1})", stream_name, stream_type) success = output_stream_passthrough(plugin, stream) elif args.player_external_http: return output_stream_http(plugin, streams, external=True, port=args.player_external_http_port) elif args.player_continuous_http and not file_output: return output_stream_http(plugin, streams) else: log.info("Opening stream: {0} ({1})", stream_name, stream_type) success = output_stream(plugin, stream) if success: break
Decides what to do with the selected stream. Depending on arguments it can be one of
def handle_stream(plugin, streams, stream_name): """Decides what to do with the selected stream. Depending on arguments it can be one of these: - Output internal command-line - Output JSON represenation - Continuously output the stream over HTTP - Output stream data to selected output """ stream_name = resolve_stream_name(streams, stream_name) stream = streams[stream_name] # Print internal command-line if this stream # uses a subprocess. if args.subprocess_cmdline: if isinstance(stream, StreamProcess): try: cmdline = stream.cmdline() except StreamError as err: console.exit("{0}", err) console.msg("{0}", cmdline) else: console.exit("The stream specified cannot be translated to a command") # Print JSON representation of the stream elif console.json: console.msg_json(stream) elif args.stream_url: try: console.msg("{0}", stream.to_url()) except TypeError: console.exit("The stream specified cannot be translated to a URL") # Output the stream else: # Find any streams with a '_alt' suffix and attempt # to use these in case the main stream is not usable. alt_streams = list(filter(lambda k: stream_name + "_alt" in k, sorted(streams.keys()))) file_output = args.output or args.stdout for stream_name in [stream_name] + alt_streams: stream = streams[stream_name] stream_type = type(stream).shortname() if stream_type in args.player_passthrough and not file_output: log.info("Opening stream: {0} ({1})", stream_name, stream_type) success = output_stream_passthrough(plugin, stream) elif args.player_external_http: return output_stream_http(plugin, streams, external=True, port=args.player_external_http_port) elif args.player_continuous_http and not file_output: return output_stream_http(plugin, streams) else: log.info("Opening stream: {0} ({1})", stream_name, stream_type) success = output_stream(plugin, stream) if success: break
src/streamlink_cli/main.py
streamlink/streamlink
fetch_streams
def fetch_streams(plugin): return plugin.streams(stream_types=args.stream_types, sorting_excludes=args.stream_sorting_excludes)
Fetches streams using correct parameters.
def fetch_streams(plugin): """Fetches streams using correct parameters.""" return plugin.streams(stream_types=args.stream_types, sorting_excludes=args.stream_sorting_excludes)
src/streamlink_cli/main.py
streamlink/streamlink
fetch_streams_with_retry
def fetch_streams_with_retry(plugin, interval, count): try: streams = fetch_streams(plugin) except PluginError as err: log.error(u"{0}", err) streams = None if not streams: log.info("Waiting for streams, retrying every {0} " "second(s)", interval) attempts = 0 while not streams: sleep(interval) try: streams = fetch_streams(plugin) except FatalPluginError as err: raise except PluginError as err: log.error(u"{0}", err) if count > 0: attempts += 1 if attempts >= count: break return streams
Attempts to fetch streams repeatedly until some are returned or limit hit.
def fetch_streams_with_retry(plugin, interval, count): """Attempts to fetch streams repeatedly until some are returned or limit hit.""" try: streams = fetch_streams(plugin) except PluginError as err: log.error(u"{0}", err) streams = None if not streams: log.info("Waiting for streams, retrying every {0} " "second(s)", interval) attempts = 0 while not streams: sleep(interval) try: streams = fetch_streams(plugin) except FatalPluginError as err: raise except PluginError as err: log.error(u"{0}", err) if count > 0: attempts += 1 if attempts >= count: break return streams
src/streamlink_cli/main.py
streamlink/streamlink
format_valid_streams
def format_valid_streams(plugin, streams): delimiter = ", " validstreams = [] for name, stream in sorted(streams.items(), key=lambda stream: plugin.stream_weight(stream[0])): if name in STREAM_SYNONYMS: continue def synonymfilter(n): return stream is streams[n] and n is not name synonyms = list(filter(synonymfilter, streams.keys())) if len(synonyms) > 0: joined = delimiter.join(synonyms) name = "{0} ({1})".format(name, joined) validstreams.append(name) return delimiter.join(validstreams)
Formats a dict of streams. Filters out synonyms and displays them next to the stream they point to. Streams are sorted according to their quality (based on plugin.stream_weight).
def format_valid_streams(plugin, streams): """Formats a dict of streams. Filters out synonyms and displays them next to the stream they point to. Streams are sorted according to their quality (based on plugin.stream_weight). """ delimiter = ", " validstreams = [] for name, stream in sorted(streams.items(), key=lambda stream: plugin.stream_weight(stream[0])): if name in STREAM_SYNONYMS: continue def synonymfilter(n): return stream is streams[n] and n is not name synonyms = list(filter(synonymfilter, streams.keys())) if len(synonyms) > 0: joined = delimiter.join(synonyms) name = "{0} ({1})".format(name, joined) validstreams.append(name) return delimiter.join(validstreams)
src/streamlink_cli/main.py
streamlink/streamlink
print_plugins
def print_plugins(): pluginlist = list(streamlink.get_plugins().keys()) pluginlist_formatted = ", ".join(sorted(pluginlist)) if console.json: console.msg_json(pluginlist) else: console.msg("Loaded plugins: {0}", pluginlist_formatted)
Outputs a list of all plugins Streamlink has loaded.
def print_plugins(): """Outputs a list of all plugins Streamlink has loaded.""" pluginlist = list(streamlink.get_plugins().keys()) pluginlist_formatted = ", ".join(sorted(pluginlist)) if console.json: console.msg_json(pluginlist) else: console.msg("Loaded plugins: {0}", pluginlist_formatted)
src/streamlink_cli/main.py
streamlink/streamlink
authenticate_twitch_oauth
def authenticate_twitch_oauth(): client_id = TWITCH_CLIENT_ID redirect_uri = "https://streamlink.github.io/twitch_oauth.html" url = ("https://api.twitch.tv/kraken/oauth2/authorize" "?response_type=token" "&client_id={0}" "&redirect_uri={1}" "&scope=user_read+user_subscriptions" "&force_verify=true").format(client_id, redirect_uri) console.msg("Attempting to open a browser to let you authenticate " "Streamlink with Twitch") try: if not webbrowser.open_new_tab(url): raise webbrowser.Error except webbrowser.Error: console.exit("Unable to open a web browser, try accessing this URL " "manually instead:\n{0}".format(url))
Opens a web browser to allow the user to grant Streamlink access to their Twitch account.
def authenticate_twitch_oauth(): """Opens a web browser to allow the user to grant Streamlink access to their Twitch account.""" client_id = TWITCH_CLIENT_ID redirect_uri = "https://streamlink.github.io/twitch_oauth.html" url = ("https://api.twitch.tv/kraken/oauth2/authorize" "?response_type=token" "&client_id={0}" "&redirect_uri={1}" "&scope=user_read+user_subscriptions" "&force_verify=true").format(client_id, redirect_uri) console.msg("Attempting to open a browser to let you authenticate " "Streamlink with Twitch") try: if not webbrowser.open_new_tab(url): raise webbrowser.Error except webbrowser.Error: console.exit("Unable to open a web browser, try accessing this URL " "manually instead:\n{0}".format(url))
src/streamlink_cli/main.py
streamlink/streamlink
load_plugins
def load_plugins(dirs): dirs = [os.path.expanduser(d) for d in dirs] for directory in dirs: if os.path.isdir(directory): streamlink.load_plugins(directory) else: log.warning("Plugin path {0} does not exist or is not " "a directory!", directory)
Attempts to load plugins from a list of directories.
def load_plugins(dirs): """Attempts to load plugins from a list of directories.""" dirs = [os.path.expanduser(d) for d in dirs] for directory in dirs: if os.path.isdir(directory): streamlink.load_plugins(directory) else: log.warning("Plugin path {0} does not exist or is not " "a directory!", directory)
src/streamlink_cli/main.py
streamlink/streamlink
setup_args
def setup_args(parser, config_files=[], ignore_unknown=False): global args arglist = sys.argv[1:] for config_file in filter(os.path.isfile, config_files): arglist.insert(0, "@" + config_file) args, unknown = parser.parse_known_args(arglist) if unknown and not ignore_unknown: msg = gettext('unrecognized arguments: %s') parser.error(msg % ' '.join(unknown)) if args.stream: args.stream = [stream.lower() for stream in args.stream] if not args.url and args.url_param: args.url = args.url_param
Parses arguments.
def setup_args(parser, config_files=[], ignore_unknown=False): """Parses arguments.""" global args arglist = sys.argv[1:] # Load arguments from config files for config_file in filter(os.path.isfile, config_files): arglist.insert(0, "@" + config_file) args, unknown = parser.parse_known_args(arglist) if unknown and not ignore_unknown: msg = gettext('unrecognized arguments: %s') parser.error(msg % ' '.join(unknown)) # Force lowercase to allow case-insensitive lookup if args.stream: args.stream = [stream.lower() for stream in args.stream] if not args.url and args.url_param: args.url = args.url_param
src/streamlink_cli/main.py
streamlink/streamlink
setup_console
def setup_console(output): global console console = ConsoleOutput(output, streamlink) console.json = args.json signal.signal(signal.SIGTERM, signal.default_int_handler)
Console setup.
def setup_console(output): """Console setup.""" global console # All console related operations is handled via the ConsoleOutput class console = ConsoleOutput(output, streamlink) console.json = args.json # Handle SIGTERM just like SIGINT signal.signal(signal.SIGTERM, signal.default_int_handler)
src/streamlink_cli/main.py
streamlink/streamlink
setup_http_session
def setup_http_session(): if args.http_proxy: streamlink.set_option("http-proxy", args.http_proxy) if args.https_proxy: streamlink.set_option("https-proxy", args.https_proxy) if args.http_cookie: streamlink.set_option("http-cookies", dict(args.http_cookie)) if args.http_header: streamlink.set_option("http-headers", dict(args.http_header)) if args.http_query_param: streamlink.set_option("http-query-params", dict(args.http_query_param)) if args.http_ignore_env: streamlink.set_option("http-trust-env", False) if args.http_no_ssl_verify: streamlink.set_option("http-ssl-verify", False) if args.http_disable_dh: streamlink.set_option("http-disable-dh", True) if args.http_ssl_cert: streamlink.set_option("http-ssl-cert", args.http_ssl_cert) if args.http_ssl_cert_crt_key: streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key)) if args.http_timeout: streamlink.set_option("http-timeout", args.http_timeout) if args.http_cookies: streamlink.set_option("http-cookies", args.http_cookies) if args.http_headers: streamlink.set_option("http-headers", args.http_headers) if args.http_query_params: streamlink.set_option("http-query-params", args.http_query_params)
Sets the global HTTP settings, such as proxy and headers.
def setup_http_session(): """Sets the global HTTP settings, such as proxy and headers.""" if args.http_proxy: streamlink.set_option("http-proxy", args.http_proxy) if args.https_proxy: streamlink.set_option("https-proxy", args.https_proxy) if args.http_cookie: streamlink.set_option("http-cookies", dict(args.http_cookie)) if args.http_header: streamlink.set_option("http-headers", dict(args.http_header)) if args.http_query_param: streamlink.set_option("http-query-params", dict(args.http_query_param)) if args.http_ignore_env: streamlink.set_option("http-trust-env", False) if args.http_no_ssl_verify: streamlink.set_option("http-ssl-verify", False) if args.http_disable_dh: streamlink.set_option("http-disable-dh", True) if args.http_ssl_cert: streamlink.set_option("http-ssl-cert", args.http_ssl_cert) if args.http_ssl_cert_crt_key: streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key)) if args.http_timeout: streamlink.set_option("http-timeout", args.http_timeout) if args.http_cookies: streamlink.set_option("http-cookies", args.http_cookies) if args.http_headers: streamlink.set_option("http-headers", args.http_headers) if args.http_query_params: streamlink.set_option("http-query-params", args.http_query_params)
src/streamlink_cli/main.py
streamlink/streamlink
setup_plugins
def setup_plugins(extra_plugin_dir=None): if os.path.isdir(PLUGINS_DIR): load_plugins([PLUGINS_DIR]) if extra_plugin_dir: load_plugins(extra_plugin_dir)
Loads any additional plugins.
def setup_plugins(extra_plugin_dir=None): """Loads any additional plugins.""" if os.path.isdir(PLUGINS_DIR): load_plugins([PLUGINS_DIR]) if extra_plugin_dir: load_plugins(extra_plugin_dir)
src/streamlink_cli/main.py
streamlink/streamlink
setup_options
def setup_options(): if args.hls_live_edge: streamlink.set_option("hls-live-edge", args.hls_live_edge) if args.hls_segment_attempts: streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts) if args.hls_playlist_reload_attempts: streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts) if args.hls_segment_threads: streamlink.set_option("hls-segment-threads", args.hls_segment_threads) if args.hls_segment_timeout: streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout) if args.hls_segment_ignore_names: streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names) if args.hls_segment_key_uri: streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri) if args.hls_timeout: streamlink.set_option("hls-timeout", args.hls_timeout) if args.hls_audio_select: streamlink.set_option("hls-audio-select", args.hls_audio_select) if args.hls_start_offset: streamlink.set_option("hls-start-offset", args.hls_start_offset) if args.hls_duration: streamlink.set_option("hls-duration", args.hls_duration) if args.hls_live_restart: streamlink.set_option("hls-live-restart", args.hls_live_restart) if args.hds_live_edge: streamlink.set_option("hds-live-edge", args.hds_live_edge) if args.hds_segment_attempts: streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts) if args.hds_segment_threads: streamlink.set_option("hds-segment-threads", args.hds_segment_threads) if args.hds_segment_timeout: streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout) if args.hds_timeout: streamlink.set_option("hds-timeout", args.hds_timeout) if args.http_stream_timeout: streamlink.set_option("http-stream-timeout", args.http_stream_timeout) if args.ringbuffer_size: streamlink.set_option("ringbuffer-size", args.ringbuffer_size) if args.rtmp_proxy: streamlink.set_option("rtmp-proxy", args.rtmp_proxy) if args.rtmp_rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump) if args.rtmp_timeout: streamlink.set_option("rtmp-timeout", args.rtmp_timeout) if args.stream_segment_attempts: streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts) if args.stream_segment_threads: streamlink.set_option("stream-segment-threads", args.stream_segment_threads) if args.stream_segment_timeout: streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout) if args.stream_timeout: streamlink.set_option("stream-timeout", args.stream_timeout) if args.ffmpeg_ffmpeg: streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg) if args.ffmpeg_verbose: streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose) if args.ffmpeg_verbose_path: streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path) if args.ffmpeg_video_transcode: streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode) if args.ffmpeg_audio_transcode: streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode) streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog) streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path) streamlink.set_option("locale", args.locale)
Sets Streamlink options.
def setup_options(): """Sets Streamlink options.""" if args.hls_live_edge: streamlink.set_option("hls-live-edge", args.hls_live_edge) if args.hls_segment_attempts: streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts) if args.hls_playlist_reload_attempts: streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts) if args.hls_segment_threads: streamlink.set_option("hls-segment-threads", args.hls_segment_threads) if args.hls_segment_timeout: streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout) if args.hls_segment_ignore_names: streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names) if args.hls_segment_key_uri: streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri) if args.hls_timeout: streamlink.set_option("hls-timeout", args.hls_timeout) if args.hls_audio_select: streamlink.set_option("hls-audio-select", args.hls_audio_select) if args.hls_start_offset: streamlink.set_option("hls-start-offset", args.hls_start_offset) if args.hls_duration: streamlink.set_option("hls-duration", args.hls_duration) if args.hls_live_restart: streamlink.set_option("hls-live-restart", args.hls_live_restart) if args.hds_live_edge: streamlink.set_option("hds-live-edge", args.hds_live_edge) if args.hds_segment_attempts: streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts) if args.hds_segment_threads: streamlink.set_option("hds-segment-threads", args.hds_segment_threads) if args.hds_segment_timeout: streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout) if args.hds_timeout: streamlink.set_option("hds-timeout", args.hds_timeout) if args.http_stream_timeout: streamlink.set_option("http-stream-timeout", args.http_stream_timeout) if args.ringbuffer_size: streamlink.set_option("ringbuffer-size", args.ringbuffer_size) if args.rtmp_proxy: streamlink.set_option("rtmp-proxy", args.rtmp_proxy) if args.rtmp_rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump) if args.rtmp_timeout: streamlink.set_option("rtmp-timeout", args.rtmp_timeout) if args.stream_segment_attempts: streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts) if args.stream_segment_threads: streamlink.set_option("stream-segment-threads", args.stream_segment_threads) if args.stream_segment_timeout: streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout) if args.stream_timeout: streamlink.set_option("stream-timeout", args.stream_timeout) if args.ffmpeg_ffmpeg: streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg) if args.ffmpeg_verbose: streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose) if args.ffmpeg_verbose_path: streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path) if args.ffmpeg_video_transcode: streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode) if args.ffmpeg_audio_transcode: streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode) streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog) streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path) streamlink.set_option("locale", args.locale)
src/streamlink_cli/main.py
streamlink/streamlink
log_current_versions
def log_current_versions(): if logger.root.isEnabledFor(logging.DEBUG): if sys.platform == "darwin": os_version = "macOS {0}".format(platform.mac_ver()[0]) elif sys.platform.startswith("win"): os_version = "{0} {1}".format(platform.system(), platform.release()) else: os_version = platform.platform() log.debug("OS: {0}".format(os_version)) log.debug("Python: {0}".format(platform.python_version())) log.debug("Streamlink: {0}".format(streamlink_version)) log.debug("Requests({0}), Socks({1}), Websocket({2})".format( requests.__version__, socks_version, websocket_version))
Show current installed versions
def log_current_versions(): """Show current installed versions""" if logger.root.isEnabledFor(logging.DEBUG): # MAC OS X if sys.platform == "darwin": os_version = "macOS {0}".format(platform.mac_ver()[0]) # Windows elif sys.platform.startswith("win"): os_version = "{0} {1}".format(platform.system(), platform.release()) # linux / other else: os_version = platform.platform() log.debug("OS: {0}".format(os_version)) log.debug("Python: {0}".format(platform.python_version())) log.debug("Streamlink: {0}".format(streamlink_version)) log.debug("Requests({0}), Socks({1}), Websocket({2})".format( requests.__version__, socks_version, websocket_version))
src/streamlink_cli/main.py
streamlink/streamlink
Viasat._get_stream_id
def _get_stream_id(self, text): m = self._image_re.search(text) if m: return m.group("stream_id")
Try to find a stream_id
def _get_stream_id(self, text): """Try to find a stream_id""" m = self._image_re.search(text) if m: return m.group("stream_id")
src/streamlink/plugins/viasat.py
streamlink/streamlink
Viasat._get_iframe
def _get_iframe(self, text): m = self._iframe_re.search(text) if m: return self.session.streams(m.group("url"))
Fallback if no stream_id was found before
def _get_iframe(self, text): """Fallback if no stream_id was found before""" m = self._iframe_re.search(text) if m: return self.session.streams(m.group("url"))
src/streamlink/plugins/viasat.py
streamlink/streamlink
Streamlink.set_option
def set_option(self, key, value): if key == "rtmpdump": key = "rtmp-rtmpdump" elif key == "rtmpdump-proxy": key = "rtmp-proxy" elif key == "errorlog": key = "subprocess-errorlog" elif key == "errorlog-path": key = "subprocess-errorlog-path" if key == "http-proxy": self.http.proxies["http"] = update_scheme("http://", value) elif key == "https-proxy": self.http.proxies["https"] = update_scheme("https://", value) elif key == "http-cookies": if isinstance(value, dict): self.http.cookies.update(value) else: self.http.parse_cookies(value) elif key == "http-headers": if isinstance(value, dict): self.http.headers.update(value) else: self.http.parse_headers(value) elif key == "http-query-params": if isinstance(value, dict): self.http.params.update(value) else: self.http.parse_query_params(value) elif key == "http-trust-env": self.http.trust_env = value elif key == "http-ssl-verify": self.http.verify = value elif key == "http-disable-dh": if value: requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH' try: requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \ requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii") except AttributeError: pass elif key == "http-ssl-cert": self.http.cert = value elif key == "http-timeout": self.http.timeout = value else: self.options.set(key, value)
Sets general options used by plugins and streams originating from this session object.
def set_option(self, key, value): """Sets general options used by plugins and streams originating from this session object. :param key: key of the option :param value: value to set the option to **Available options**: ======================== ========================================= hds-live-edge ( float) Specify the time live HDS streams will start from the edge of stream, default: ``10.0`` hds-segment-attempts (int) How many attempts should be done to download each HDS segment, default: ``3`` hds-segment-threads (int) The size of the thread pool used to download segments, default: ``1`` hds-segment-timeout (float) HDS segment connect and read timeout, default: ``10.0`` hds-timeout (float) Timeout for reading data from HDS streams, default: ``60.0`` hls-live-edge (int) How many segments from the end to start live streams on, default: ``3`` hls-segment-attempts (int) How many attempts should be done to download each HLS segment, default: ``3`` hls-segment-threads (int) The size of the thread pool used to download segments, default: ``1`` hls-segment-timeout (float) HLS segment connect and read timeout, default: ``10.0`` hls-timeout (float) Timeout for reading data from HLS streams, default: ``60.0`` http-proxy (str) Specify a HTTP proxy to use for all HTTP requests https-proxy (str) Specify a HTTPS proxy to use for all HTTPS requests http-cookies (dict or str) A dict or a semi-colon (;) delimited str of cookies to add to each HTTP request, e.g. ``foo=bar;baz=qux`` http-headers (dict or str) A dict or semi-colon (;) delimited str of headers to add to each HTTP request, e.g. ``foo=bar;baz=qux`` http-query-params (dict or str) A dict or a ampersand (&) delimited string of query parameters to add to each HTTP request, e.g. ``foo=bar&baz=qux`` http-trust-env (bool) Trust HTTP settings set in the environment, such as environment variables (HTTP_PROXY, etc) and ~/.netrc authentication http-ssl-verify (bool) Verify SSL certificates, default: ``True`` http-ssl-cert (str or tuple) SSL certificate to use, can be either a .pem file (str) or a .crt/.key pair (tuple) http-timeout (float) General timeout used by all HTTP requests except the ones covered by other options, default: ``20.0`` http-stream-timeout (float) Timeout for reading data from HTTP streams, default: ``60.0`` subprocess-errorlog (bool) Log errors from subprocesses to a file located in the temp directory subprocess-errorlog-path (str) Log errors from subprocesses to a specific file ringbuffer-size (int) The size of the internal ring buffer used by most stream types, default: ``16777216`` (16MB) rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP streams will use rtmp-rtmpdump (str) Specify the location of the rtmpdump executable used by RTMP streams, e.g. ``/usr/local/bin/rtmpdump`` rtmp-timeout (float) Timeout for reading data from RTMP streams, default: ``60.0`` ffmpeg-ffmpeg (str) Specify the location of the ffmpeg executable use by Muxing streams e.g. ``/usr/local/bin/ffmpeg`` ffmpeg-verbose (bool) Log stderr from ffmpeg to the console ffmpeg-verbose-path (str) Specify the location of the ffmpeg stderr log file ffmpeg-video-transcode (str) The codec to use if transcoding video when muxing with ffmpeg e.g. ``h264`` ffmpeg-audio-transcode (str) The codec to use if transcoding audio when muxing with ffmpeg e.g. ``aac`` stream-segment-attempts (int) How many attempts should be done to download each segment, default: ``3``. General option used by streams not covered by other options. stream-segment-threads (int) The size of the thread pool used to download segments, default: ``1``. General option used by streams not covered by other options. stream-segment-timeout (float) Segment connect and read timeout, default: ``10.0``. General option used by streams not covered by other options. stream-timeout (float) Timeout for reading data from stream, default: ``60.0``. General option used by streams not covered by other options. locale (str) Locale setting, in the RFC 1766 format eg. en_US or es_ES default: ``system locale``. user-input-requester (UserInputRequester) instance of UserInputRequester to collect input from the user at runtime. Must be set before the plugins are loaded. default: ``UserInputRequester``. ======================== ========================================= """ # Backwards compatibility if key == "rtmpdump": key = "rtmp-rtmpdump" elif key == "rtmpdump-proxy": key = "rtmp-proxy" elif key == "errorlog": key = "subprocess-errorlog" elif key == "errorlog-path": key = "subprocess-errorlog-path" if key == "http-proxy": self.http.proxies["http"] = update_scheme("http://", value) elif key == "https-proxy": self.http.proxies["https"] = update_scheme("https://", value) elif key == "http-cookies": if isinstance(value, dict): self.http.cookies.update(value) else: self.http.parse_cookies(value) elif key == "http-headers": if isinstance(value, dict): self.http.headers.update(value) else: self.http.parse_headers(value) elif key == "http-query-params": if isinstance(value, dict): self.http.params.update(value) else: self.http.parse_query_params(value) elif key == "http-trust-env": self.http.trust_env = value elif key == "http-ssl-verify": self.http.verify = value elif key == "http-disable-dh": if value: requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH' try: requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \ requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii") except AttributeError: # no ssl to disable the cipher on pass elif key == "http-ssl-cert": self.http.cert = value elif key == "http-timeout": self.http.timeout = value else: self.options.set(key, value)
src/streamlink/session.py
streamlink/streamlink
Streamlink.set_plugin_option
def set_plugin_option(self, plugin, key, value): if plugin in self.plugins: plugin = self.plugins[plugin] plugin.set_option(key, value)
Sets plugin specific options used by plugins originating from this session object.
def set_plugin_option(self, plugin, key, value): """Sets plugin specific options used by plugins originating from this session object. :param plugin: name of the plugin :param key: key of the option :param value: value to set the option to """ if plugin in self.plugins: plugin = self.plugins[plugin] plugin.set_option(key, value)
src/streamlink/session.py
streamlink/streamlink
Streamlink.resolve_url
def resolve_url(self, url, follow_redirect=True): url = update_scheme("http://", url) available_plugins = [] for name, plugin in self.plugins.items(): if plugin.can_handle_url(url): available_plugins.append(plugin) available_plugins.sort(key=lambda x: x.priority(url), reverse=True) if available_plugins: return available_plugins[0](url) if follow_redirect: try: res = self.http.head(url, allow_redirects=True, acceptable_status=[501]) if res.status_code == 501: res = self.http.get(url, stream=True) if res.url != url: return self.resolve_url(res.url, follow_redirect=follow_redirect) except PluginError: pass raise NoPluginError
Attempts to find a plugin that can use this URL. The default protocol (http) will be prefixed to the URL if not specified.
def resolve_url(self, url, follow_redirect=True): """Attempts to find a plugin that can use this URL. The default protocol (http) will be prefixed to the URL if not specified. Raises :exc:`NoPluginError` on failure. :param url: a URL to match against loaded plugins :param follow_redirect: follow redirects """ url = update_scheme("http://", url) available_plugins = [] for name, plugin in self.plugins.items(): if plugin.can_handle_url(url): available_plugins.append(plugin) available_plugins.sort(key=lambda x: x.priority(url), reverse=True) if available_plugins: return available_plugins[0](url) if follow_redirect: # Attempt to handle a redirect URL try: res = self.http.head(url, allow_redirects=True, acceptable_status=[501]) # Fall back to GET request if server doesn't handle HEAD. if res.status_code == 501: res = self.http.get(url, stream=True) if res.url != url: return self.resolve_url(res.url, follow_redirect=follow_redirect) except PluginError: pass raise NoPluginError
src/streamlink/session.py
streamlink/streamlink
Streamlink.load_plugins
def load_plugins(self, path): for loader, name, ispkg in pkgutil.iter_modules([path]): file, pathname, desc = imp.find_module(name, [path]) module_name = "streamlink.plugin.{0}".format(name) try: self.load_plugin(module_name, file, pathname, desc) except Exception: sys.stderr.write("Failed to load plugin {0}:\n".format(name)) print_small_exception("load_plugin") continue
Attempt to load plugins from the path specified.
def load_plugins(self, path): """Attempt to load plugins from the path specified. :param path: full path to a directory where to look for plugins """ for loader, name, ispkg in pkgutil.iter_modules([path]): file, pathname, desc = imp.find_module(name, [path]) # set the full plugin module name module_name = "streamlink.plugin.{0}".format(name) try: self.load_plugin(module_name, file, pathname, desc) except Exception: sys.stderr.write("Failed to load plugin {0}:\n".format(name)) print_small_exception("load_plugin") continue
src/streamlink/session.py
streamlink/streamlink
hours_minutes_seconds
def hours_minutes_seconds(value): try: return int(value) except ValueError: pass match = (_hours_minutes_seconds_re.match(value) or _hours_minutes_seconds_2_re.match(value)) if not match: raise ValueError s = 0 s += int(match.group("hours") or "0") * 60 * 60 s += int(match.group("minutes") or "0") * 60 s += int(match.group("seconds") or "0") return s
converts a timestamp to seconds -
def hours_minutes_seconds(value): """converts a timestamp to seconds - hours:minutes:seconds to seconds - minutes:seconds to seconds - 11h22m33s to seconds - 11h to seconds - 20h15m to seconds - seconds to seconds :param value: hh:mm:ss ; 00h00m00s ; seconds :return: seconds """ try: return int(value) except ValueError: pass match = (_hours_minutes_seconds_re.match(value) or _hours_minutes_seconds_2_re.match(value)) if not match: raise ValueError s = 0 s += int(match.group("hours") or "0") * 60 * 60 s += int(match.group("minutes") or "0") * 60 s += int(match.group("seconds") or "0") return s
src/streamlink/utils/times.py
streamlink/streamlink
startswith
def startswith(string): def starts_with(value): validate(text, value) if not value.startswith(string): raise ValueError("'{0}' does not start with '{1}'".format(value, string)) return True return starts_with
Checks if the string value starts with another string.
def startswith(string): """Checks if the string value starts with another string.""" def starts_with(value): validate(text, value) if not value.startswith(string): raise ValueError("'{0}' does not start with '{1}'".format(value, string)) return True return starts_with
src/streamlink/plugin/api/validate.py
streamlink/streamlink
endswith
def endswith(string): def ends_with(value): validate(text, value) if not value.endswith(string): raise ValueError("'{0}' does not end with '{1}'".format(value, string)) return True return ends_with
Checks if the string value ends with another string.
def endswith(string): """Checks if the string value ends with another string.""" def ends_with(value): validate(text, value) if not value.endswith(string): raise ValueError("'{0}' does not end with '{1}'".format(value, string)) return True return ends_with
src/streamlink/plugin/api/validate.py
streamlink/streamlink
contains
def contains(string): def contains_str(value): validate(text, value) if string not in value: raise ValueError("'{0}' does not contain '{1}'".format(value, string)) return True return contains_str
Checks if the string value contains another string.
def contains(string): """Checks if the string value contains another string.""" def contains_str(value): validate(text, value) if string not in value: raise ValueError("'{0}' does not contain '{1}'".format(value, string)) return True return contains_str
src/streamlink/plugin/api/validate.py
streamlink/streamlink
getattr
def getattr(attr, default=None): def getter(value): return _getattr(value, attr, default) return transform(getter)
Get a named attribute from an object. When a default argument is given, it is returned when the attribute doesn't exist.
def getattr(attr, default=None): """Get a named attribute from an object. When a default argument is given, it is returned when the attribute doesn't exist. """ def getter(value): return _getattr(value, attr, default) return transform(getter)
src/streamlink/plugin/api/validate.py
streamlink/streamlink
filter
def filter(func): def expand_kv(kv): return func(*kv) def filter_values(value): cls = type(value) if isinstance(value, dict): return cls(_filter(expand_kv, value.items())) else: return cls(_filter(func, value)) return transform(filter_values)
Filters out unwanted items using the specified function. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict.
def filter(func): """Filters out unwanted items using the specified function. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict. """ def expand_kv(kv): return func(*kv) def filter_values(value): cls = type(value) if isinstance(value, dict): return cls(_filter(expand_kv, value.items())) else: return cls(_filter(func, value)) return transform(filter_values)
src/streamlink/plugin/api/validate.py
streamlink/streamlink
map
def map(func): if is_py2 and text == func: func = unicode def expand_kv(kv): return func(*kv) def map_values(value): cls = type(value) if isinstance(value, dict): return cls(_map(expand_kv, value.items())) else: return cls(_map(func, value)) return transform(map_values)
Apply function to each value inside the sequence or dict. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict.
def map(func): """Apply function to each value inside the sequence or dict. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict. """ # text is an alias for basestring on Python 2, which cannot be # instantiated and therefore can't be used to transform the value, # so we force to unicode instead. if is_py2 and text == func: func = unicode def expand_kv(kv): return func(*kv) def map_values(value): cls = type(value) if isinstance(value, dict): return cls(_map(expand_kv, value.items())) else: return cls(_map(func, value)) return transform(map_values)
src/streamlink/plugin/api/validate.py
streamlink/streamlink
url
def url(**attributes): def check_url(value): validate(text, value) parsed = urlparse(value) if not parsed.netloc: raise ValueError("'{0}' is not a valid URL".format(value)) for name, schema in attributes.items(): if not _hasattr(parsed, name): raise ValueError("Invalid URL attribute '{0}'".format(name)) try: validate(schema, _getattr(parsed, name)) except ValueError as err: raise ValueError( "Unable to validate URL attribute '{0}': {1}".format( name, err ) ) return True if attributes.get("scheme") == "http": attributes["scheme"] = any("http", "https") return check_url
Parses an URL and validates its attributes.
def url(**attributes): """Parses an URL and validates its attributes.""" def check_url(value): validate(text, value) parsed = urlparse(value) if not parsed.netloc: raise ValueError("'{0}' is not a valid URL".format(value)) for name, schema in attributes.items(): if not _hasattr(parsed, name): raise ValueError("Invalid URL attribute '{0}'".format(name)) try: validate(schema, _getattr(parsed, name)) except ValueError as err: raise ValueError( "Unable to validate URL attribute '{0}': {1}".format( name, err ) ) return True # Convert "http" to be either any("http", "https") for convenience if attributes.get("scheme") == "http": attributes["scheme"] = any("http", "https") return check_url
src/streamlink/plugin/api/validate.py
streamlink/streamlink
xml_find
def xml_find(xpath): def xpath_find(value): validate(ET.iselement, value) value = value.find(xpath) if value is None: raise ValueError("XPath '{0}' did not return an element".format(xpath)) return validate(ET.iselement, value) return transform(xpath_find)
Find a XML element via xpath.
def xml_find(xpath): """Find a XML element via xpath.""" def xpath_find(value): validate(ET.iselement, value) value = value.find(xpath) if value is None: raise ValueError("XPath '{0}' did not return an element".format(xpath)) return validate(ET.iselement, value) return transform(xpath_find)
src/streamlink/plugin/api/validate.py
streamlink/streamlink
xml_findall
def xml_findall(xpath): def xpath_findall(value): validate(ET.iselement, value) return value.findall(xpath) return transform(xpath_findall)
Find a list of XML elements via xpath.
def xml_findall(xpath): """Find a list of XML elements via xpath.""" def xpath_findall(value): validate(ET.iselement, value) return value.findall(xpath) return transform(xpath_findall)
src/streamlink/plugin/api/validate.py
streamlink/streamlink
_find_player_url
def _find_player_url(response): url = '' matches = _player_re.search(response.text) if matches: tmp_url = matches.group(0).replace('&amp;', '&') if 'hash' not in tmp_url: matches = _hash_re.search(response.text) if matches: url = tmp_url + '&hash=' + matches.group(1) else: url = tmp_url return 'http://ceskatelevize.cz/' + url
Finds embedded player url in HTTP response.
def _find_player_url(response): """ Finds embedded player url in HTTP response. :param response: Response object. :returns: Player url (str). """ url = '' matches = _player_re.search(response.text) if matches: tmp_url = matches.group(0).replace('&amp;', '&') if 'hash' not in tmp_url: # there's no hash in the URL, try to find it matches = _hash_re.search(response.text) if matches: url = tmp_url + '&hash=' + matches.group(1) else: url = tmp_url return 'http://ceskatelevize.cz/' + url
src/streamlink/plugins/ceskatelevize.py
streamlink/streamlink
load
def load(data, base_uri=None, parser=M3U8Parser, **kwargs): return parser(base_uri, **kwargs).parse(data)
Attempts to parse a M3U8 playlist from a string of data. If specified, *base_uri
def load(data, base_uri=None, parser=M3U8Parser, **kwargs): """Attempts to parse a M3U8 playlist from a string of data. If specified, *base_uri* is the base URI that relative URIs will be joined together with, otherwise relative URIs will be as is. If specified, *parser* can be a M3U8Parser subclass to be used to parse the data. """ return parser(base_uri, **kwargs).parse(data)
src/streamlink/stream/hls_playlist.py
streamlink/streamlink
PlayerOutput.supported_player
def supported_player(cls, cmd): if not is_win32: cmd = shlex.split(cmd)[0] cmd = os.path.basename(cmd.lower()) for player, possiblecmds in SUPPORTED_PLAYERS.items(): for possiblecmd in possiblecmds: if cmd.startswith(possiblecmd): return player
Check if the current player supports adding a title
def supported_player(cls, cmd): """ Check if the current player supports adding a title :param cmd: command to test :return: name of the player|None """ if not is_win32: # under a POSIX system use shlex to find the actual command # under windows this is not an issue because executables end in .exe cmd = shlex.split(cmd)[0] cmd = os.path.basename(cmd.lower()) for player, possiblecmds in SUPPORTED_PLAYERS.items(): for possiblecmd in possiblecmds: if cmd.startswith(possiblecmd): return player
src/streamlink_cli/output.py
streamlink/streamlink
SteamBroadcastPlugin.dologin
def dologin(self, email, password, emailauth="", emailsteamid="", captchagid="-1", captcha_text="", twofactorcode=""): epassword, rsatimestamp = self.encrypt_password(email, password) login_data = { 'username': email, "password": epassword, "emailauth": emailauth, "loginfriendlyname": "Streamlink", "captchagid": captchagid, "captcha_text": captcha_text, "emailsteamid": emailsteamid, "rsatimestamp": rsatimestamp, "remember_login": True, "donotcache": self.donotcache, "twofactorcode": twofactorcode } res = self.session.http.post(self._dologin_url, data=login_data) resp = self.session.http.json(res, schema=self._dologin_schema) if not resp[u"success"]: if resp.get(u"captcha_needed"): captchagid = resp[u"captcha_gid"] log.error("Captcha result required, open this URL to see the captcha: {}".format( self._captcha_url.format(captchagid))) try: captcha_text = self.input_ask("Captcha text") except FatalPluginError: captcha_text = None if not captcha_text: return False else: if resp.get(u"emailauth_needed"): if not emailauth: try: emailauth = self.input_ask("Email auth code required") except FatalPluginError: emailauth = None if not emailauth: return False else: raise SteamLoginFailed("Email auth key error") if resp.get(u"requires_twofactor"): try: twofactorcode = self.input_ask("Two factor auth code required") except FatalPluginError: twofactorcode = None if not twofactorcode: return False if resp.get(u"message"): raise SteamLoginFailed(resp[u"message"]) return self.dologin(email, password, emailauth=emailauth, emailsteamid=resp.get(u"emailsteamid", u""), captcha_text=captcha_text, captchagid=captchagid, twofactorcode=twofactorcode) elif resp.get("login_complete"): return True else: log.error("Something when wrong when logging in to Steam") return False
Logs in to Steam
def dologin(self, email, password, emailauth="", emailsteamid="", captchagid="-1", captcha_text="", twofactorcode=""): """ Logs in to Steam """ epassword, rsatimestamp = self.encrypt_password(email, password) login_data = { 'username': email, "password": epassword, "emailauth": emailauth, "loginfriendlyname": "Streamlink", "captchagid": captchagid, "captcha_text": captcha_text, "emailsteamid": emailsteamid, "rsatimestamp": rsatimestamp, "remember_login": True, "donotcache": self.donotcache, "twofactorcode": twofactorcode } res = self.session.http.post(self._dologin_url, data=login_data) resp = self.session.http.json(res, schema=self._dologin_schema) if not resp[u"success"]: if resp.get(u"captcha_needed"): # special case for captcha captchagid = resp[u"captcha_gid"] log.error("Captcha result required, open this URL to see the captcha: {}".format( self._captcha_url.format(captchagid))) try: captcha_text = self.input_ask("Captcha text") except FatalPluginError: captcha_text = None if not captcha_text: return False else: # If the user must enter the code that was emailed to them if resp.get(u"emailauth_needed"): if not emailauth: try: emailauth = self.input_ask("Email auth code required") except FatalPluginError: emailauth = None if not emailauth: return False else: raise SteamLoginFailed("Email auth key error") # If the user must enter a two factor auth code if resp.get(u"requires_twofactor"): try: twofactorcode = self.input_ask("Two factor auth code required") except FatalPluginError: twofactorcode = None if not twofactorcode: return False if resp.get(u"message"): raise SteamLoginFailed(resp[u"message"]) return self.dologin(email, password, emailauth=emailauth, emailsteamid=resp.get(u"emailsteamid", u""), captcha_text=captcha_text, captchagid=captchagid, twofactorcode=twofactorcode) elif resp.get("login_complete"): return True else: log.error("Something when wrong when logging in to Steam") return False
src/streamlink/plugins/steam.py
streamlink/streamlink
ABweb._login
def _login(self, username, password): self.logger.debug('login ...') res = self.session.http.get(self.login_url) input_list = self._input_re.findall(res.text) if not input_list: raise PluginError('Missing input data on login website.') data = {} for _input_data in input_list: try: _input_name = self._name_re.search(_input_data).group(1) except AttributeError: continue try: _input_value = self._value_re.search(_input_data).group(1) except AttributeError: _input_value = '' data[_input_name] = _input_value login_data = { 'ctl00$Login1$UserName': username, 'ctl00$Login1$Password': password, 'ctl00$Login1$LoginButton.x': '0', 'ctl00$Login1$LoginButton.y': '0' } data.update(login_data) res = self.session.http.post(self.login_url, data=data) for cookie in self.session.http.cookies: self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24) if self._session_attributes.get('ASP.NET_SessionId') and self._session_attributes.get('.abportail1'): self.logger.debug('New session data') self.set_expires_time_cache() return True else: self.logger.error('Failed to login, check your username/password') return False
login and update cached cookies
def _login(self, username, password): '''login and update cached cookies''' self.logger.debug('login ...') res = self.session.http.get(self.login_url) input_list = self._input_re.findall(res.text) if not input_list: raise PluginError('Missing input data on login website.') data = {} for _input_data in input_list: try: _input_name = self._name_re.search(_input_data).group(1) except AttributeError: continue try: _input_value = self._value_re.search(_input_data).group(1) except AttributeError: _input_value = '' data[_input_name] = _input_value login_data = { 'ctl00$Login1$UserName': username, 'ctl00$Login1$Password': password, 'ctl00$Login1$LoginButton.x': '0', 'ctl00$Login1$LoginButton.y': '0' } data.update(login_data) res = self.session.http.post(self.login_url, data=data) for cookie in self.session.http.cookies: self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24) if self._session_attributes.get('ASP.NET_SessionId') and self._session_attributes.get('.abportail1'): self.logger.debug('New session data') self.set_expires_time_cache() return True else: self.logger.error('Failed to login, check your username/password') return False
src/streamlink/plugins/abweb.py
streamlink/streamlink
CrunchyrollAPI._api_call
def _api_call(self, entrypoint, params=None, schema=None): url = self._api_url.format(entrypoint) params = params or {} if self.session_id: params.update({ "session_id": self.session_id }) else: params.update({ "device_id": self.device_id, "device_type": self._access_type, "access_token": self._access_token, "version": self._version_code }) params.update({ "locale": self.locale.replace('_', ''), }) if self.session_id: params["session_id"] = self.session_id res = self.session.http.post(url, data=params, headers=self.headers, verify=False) json_res = self.session.http.json(res, schema=_api_schema) if json_res["error"]: err_msg = json_res.get("message", "Unknown error") err_code = json_res.get("code", "unknown_error") raise CrunchyrollAPIError(err_msg, err_code) data = json_res.get("data") if schema: data = schema.validate(data, name="API response") return data
Makes a call against the api.
def _api_call(self, entrypoint, params=None, schema=None): """Makes a call against the api. :param entrypoint: API method to call. :param params: parameters to include in the request data. :param schema: schema to use to validate the data """ url = self._api_url.format(entrypoint) # Default params params = params or {} if self.session_id: params.update({ "session_id": self.session_id }) else: params.update({ "device_id": self.device_id, "device_type": self._access_type, "access_token": self._access_token, "version": self._version_code }) params.update({ "locale": self.locale.replace('_', ''), }) if self.session_id: params["session_id"] = self.session_id # The certificate used by Crunchyroll cannot be verified in some environments. res = self.session.http.post(url, data=params, headers=self.headers, verify=False) json_res = self.session.http.json(res, schema=_api_schema) if json_res["error"]: err_msg = json_res.get("message", "Unknown error") err_code = json_res.get("code", "unknown_error") raise CrunchyrollAPIError(err_msg, err_code) data = json_res.get("data") if schema: data = schema.validate(data, name="API response") return data
src/streamlink/plugins/crunchyroll.py
streamlink/streamlink
CrunchyrollAPI.start_session
def start_session(self): params = {} if self.auth: params["auth"] = self.auth self.session_id = self._api_call("start_session", params, schema=_session_schema) log.debug("Session created with ID: {0}".format(self.session_id)) return self.session_id
Starts a session against Crunchyroll's server. Is recommended that you call this method before making any other calls to make sure you have a valid session against the server.
def start_session(self): """ Starts a session against Crunchyroll's server. Is recommended that you call this method before making any other calls to make sure you have a valid session against the server. """ params = {} if self.auth: params["auth"] = self.auth self.session_id = self._api_call("start_session", params, schema=_session_schema) log.debug("Session created with ID: {0}".format(self.session_id)) return self.session_id
src/streamlink/plugins/crunchyroll.py
streamlink/streamlink
Crunchyroll._create_api
def _create_api(self): if self.options.get("purge_credentials"): self.cache.set("session_id", None, 0) self.cache.set("auth", None, 0) self.cache.set("session_id", None, 0) locale = self.get_option("locale") or self.session.localization.language_code api = CrunchyrollAPI(self.cache, self.session, session_id=self.get_option("session_id"), locale=locale) if not self.get_option("session_id"): self.logger.debug("Creating session with locale: {0}", locale) api.start_session() if api.auth: self.logger.debug("Using saved credentials") login = api.authenticate() self.logger.info("Successfully logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) elif self.options.get("username"): try: self.logger.debug("Attempting to login using username and password") api.login(self.options.get("username"), self.options.get("password")) login = api.authenticate() self.logger.info("Logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) except CrunchyrollAPIError as err: raise PluginError(u"Authentication error: {0}".format(err.msg)) else: self.logger.warning( "No authentication provided, you won't be able to access " "premium restricted content" ) return api
Creates a new CrunchyrollAPI object, initiates it's session and tries to authenticate it either by using saved credentials or the user's username and password.
def _create_api(self): """Creates a new CrunchyrollAPI object, initiates it's session and tries to authenticate it either by using saved credentials or the user's username and password. """ if self.options.get("purge_credentials"): self.cache.set("session_id", None, 0) self.cache.set("auth", None, 0) self.cache.set("session_id", None, 0) # use the crunchyroll locale as an override, for backwards compatibility locale = self.get_option("locale") or self.session.localization.language_code api = CrunchyrollAPI(self.cache, self.session, session_id=self.get_option("session_id"), locale=locale) if not self.get_option("session_id"): self.logger.debug("Creating session with locale: {0}", locale) api.start_session() if api.auth: self.logger.debug("Using saved credentials") login = api.authenticate() self.logger.info("Successfully logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) elif self.options.get("username"): try: self.logger.debug("Attempting to login using username and password") api.login(self.options.get("username"), self.options.get("password")) login = api.authenticate() self.logger.info("Logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) except CrunchyrollAPIError as err: raise PluginError(u"Authentication error: {0}".format(err.msg)) else: self.logger.warning( "No authentication provided, you won't be able to access " "premium restricted content" ) return api
src/streamlink/plugins/crunchyroll.py
open-mmlab/mmcv
frames2video
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): if end == 0: ext = filename_tmpl.split('.')[-1] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, 'The start frame not found: ' + first_file) img = cv2.imread(first_file) height, width = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): filename = osp.join(frame_dir, filename_tmpl.format(i)) img = cv2.imread(filename) vwriter.write(img) vwriter.release()
Read the frame images from a directory and join them as a video
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): """Read the frame images from a directory and join them as a video Args: frame_dir (str): The directory containing video frames. video_file (str): Output filename. fps (float): FPS of the output video. fourcc (str): Fourcc of the output video, this should be compatible with the output file type. filename_tmpl (str): Filename template with the index as the variable. start (int): Starting frame index. end (int): Ending frame index. show_progress (bool): Whether to show a progress bar. """ if end == 0: ext = filename_tmpl.split('.')[-1] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, 'The start frame not found: ' + first_file) img = cv2.imread(first_file) height, width = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): filename = osp.join(frame_dir, filename_tmpl.format(i)) img = cv2.imread(filename) vwriter.write(img) vwriter.release()
mmcv/video/io.py
open-mmlab/mmcv
VideoReader.read
def read(self): if self._cache: img = self._cache.get(self._position) if img is not None: ret = True else: if self._position != self._get_real_position(): self._set_real_position(self._position) ret, img = self._vcap.read() if ret: self._cache.put(self._position, img) else: ret, img = self._vcap.read() if ret: self._position += 1 return img
Read the next frame. If the next frame have been decoded before and in the cache, then return it directly, otherwise decode, cache and return it.
def read(self): """Read the next frame. If the next frame have been decoded before and in the cache, then return it directly, otherwise decode, cache and return it. Returns: ndarray or None: Return the frame if successful, otherwise None. """ # pos = self._position if self._cache: img = self._cache.get(self._position) if img is not None: ret = True else: if self._position != self._get_real_position(): self._set_real_position(self._position) ret, img = self._vcap.read() if ret: self._cache.put(self._position, img) else: ret, img = self._vcap.read() if ret: self._position += 1 return img
mmcv/video/io.py
open-mmlab/mmcv
VideoReader.get_frame
def get_frame(self, frame_id): if frame_id < 0 or frame_id >= self._frame_cnt: raise IndexError( '"frame_id" must be between 0 and {}'.format(self._frame_cnt - 1)) if frame_id == self._position: return self.read() if self._cache: img = self._cache.get(frame_id) if img is not None: self._position = frame_id + 1 return img self._set_real_position(frame_id) ret, img = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img
Get frame by index.
def get_frame(self, frame_id): """Get frame by index. Args: frame_id (int): Index of the expected frame, 0-based. Returns: ndarray or None: Return the frame if successful, otherwise None. """ if frame_id < 0 or frame_id >= self._frame_cnt: raise IndexError( '"frame_id" must be between 0 and {}'.format(self._frame_cnt - 1)) if frame_id == self._position: return self.read() if self._cache: img = self._cache.get(frame_id) if img is not None: self._position = frame_id + 1 return img self._set_real_position(frame_id) ret, img = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img
mmcv/video/io.py
open-mmlab/mmcv
VideoReader.cvt2frames
def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): mkdir_or_exist(frame_dir) if max_num == 0: task_num = self.frame_cnt - start else: task_num = min(self.frame_cnt - start, max_num) if task_num <= 0: raise ValueError('start must be less than total frame number') if start > 0: self._set_real_position(start) def write_frame(file_idx): img = self.read() filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, file_start + task_num)) else: for i in range(task_num): img = self.read() if img is None: break filename = osp.join(frame_dir, filename_tmpl.format(i + file_start)) cv2.imwrite(filename, img)
Convert a video to frame images
def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): """Convert a video to frame images Args: frame_dir (str): Output directory to store all the frame images. file_start (int): Filenames will start from the specified number. filename_tmpl (str): Filename template with the index as the placeholder. start (int): The starting frame index. max_num (int): Maximum number of frames to be written. show_progress (bool): Whether to show a progress bar. """ mkdir_or_exist(frame_dir) if max_num == 0: task_num = self.frame_cnt - start else: task_num = min(self.frame_cnt - start, max_num) if task_num <= 0: raise ValueError('start must be less than total frame number') if start > 0: self._set_real_position(start) def write_frame(file_idx): img = self.read() filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, file_start + task_num)) else: for i in range(task_num): img = self.read() if img is None: break filename = osp.join(frame_dir, filename_tmpl.format(i + file_start)) cv2.imwrite(filename, img)
mmcv/video/io.py
open-mmlab/mmcv
track_progress
def track_progress(func, tasks, bar_width=50, **kwargs): if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], collections_abc.Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, collections_abc.Iterable): task_num = len(tasks) else: raise TypeError( '"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() sys.stdout.write('\n') return results
Track the progress of tasks execution with a progress bar. Tasks are done with a simple for-loop.
def track_progress(func, tasks, bar_width=50, **kwargs): """Track the progress of tasks execution with a progress bar. Tasks are done with a simple for-loop. Args: func (callable): The function to be applied to each task. tasks (list or tuple[Iterable, int]): A list of tasks or (tasks, total num). bar_width (int): Width of progress bar. Returns: list: The task results. """ if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], collections_abc.Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, collections_abc.Iterable): task_num = len(tasks) else: raise TypeError( '"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() sys.stdout.write('\n') return results
mmcv/utils/progressbar.py
open-mmlab/mmcv
imflip
def imflip(img, direction='horizontal'): assert direction in ['horizontal', 'vertical'] if direction == 'horizontal': return np.flip(img, axis=1) else: return np.flip(img, axis=0)
Flip an image horizontally or vertically.
def imflip(img, direction='horizontal'): """Flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The flipped image. """ assert direction in ['horizontal', 'vertical'] if direction == 'horizontal': return np.flip(img, axis=1) else: return np.flip(img, axis=0)
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
imrotate
def imrotate(img, angle, center=None, scale=1.0, border_value=0, auto_bound=False): if center is not None and auto_bound: raise ValueError('`auto_bound` conflicts with `center`') h, w = img.shape[:2] if center is None: center = ((w - 1) * 0.5, (h - 1) * 0.5) assert isinstance(center, tuple) matrix = cv2.getRotationMatrix2D(center, -angle, scale) if auto_bound: cos = np.abs(matrix[0, 0]) sin = np.abs(matrix[0, 1]) new_w = h * sin + w * cos new_h = h * cos + w * sin matrix[0, 2] += (new_w - w) * 0.5 matrix[1, 2] += (new_h - h) * 0.5 w = int(np.round(new_w)) h = int(np.round(new_h)) rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value) return rotated
Rotate an image.
def imrotate(img, angle, center=None, scale=1.0, border_value=0, auto_bound=False): """Rotate an image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees, positive values mean clockwise rotation. center (tuple): Center of the rotation in the source image, by default it is the center of the image. scale (float): Isotropic scale factor. border_value (int): Border value. auto_bound (bool): Whether to adjust the image size to cover the whole rotated image. Returns: ndarray: The rotated image. """ if center is not None and auto_bound: raise ValueError('`auto_bound` conflicts with `center`') h, w = img.shape[:2] if center is None: center = ((w - 1) * 0.5, (h - 1) * 0.5) assert isinstance(center, tuple) matrix = cv2.getRotationMatrix2D(center, -angle, scale) if auto_bound: cos = np.abs(matrix[0, 0]) sin = np.abs(matrix[0, 1]) new_w = h * sin + w * cos new_h = h * cos + w * sin matrix[0, 2] += (new_w - w) * 0.5 matrix[1, 2] += (new_h - h) * 0.5 w = int(np.round(new_w)) h = int(np.round(new_h)) rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value) return rotated
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
bbox_clip
def bbox_clip(bboxes, img_shape): assert bboxes.shape[-1] % 4 == 0 clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype) clipped_bboxes[..., 0::2] = np.maximum( np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0) clipped_bboxes[..., 1::2] = np.maximum( np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0) return clipped_bboxes
Clip bboxes to fit the image shape.
def bbox_clip(bboxes, img_shape): """Clip bboxes to fit the image shape. Args: bboxes (ndarray): Shape (..., 4*k) img_shape (tuple): (height, width) of the image. Returns: ndarray: Clipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype) clipped_bboxes[..., 0::2] = np.maximum( np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0) clipped_bboxes[..., 1::2] = np.maximum( np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0) return clipped_bboxes
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
bbox_scaling
def bbox_scaling(bboxes, scale, clip_shape=None): if float(scale) == 1.0: scaled_bboxes = bboxes.copy() else: w = bboxes[..., 2] - bboxes[..., 0] + 1 h = bboxes[..., 3] - bboxes[..., 1] + 1 dw = (w * (scale - 1)) * 0.5 dh = (h * (scale - 1)) * 0.5 scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) if clip_shape is not None: return bbox_clip(scaled_bboxes, clip_shape) else: return scaled_bboxes
Scaling bboxes w.r.t the box center.
def bbox_scaling(bboxes, scale, clip_shape=None): """Scaling bboxes w.r.t the box center. Args: bboxes (ndarray): Shape(..., 4). scale (float): Scaling factor. clip_shape (tuple, optional): If specified, bboxes that exceed the boundary will be clipped according to the given shape (h, w). Returns: ndarray: Scaled bboxes. """ if float(scale) == 1.0: scaled_bboxes = bboxes.copy() else: w = bboxes[..., 2] - bboxes[..., 0] + 1 h = bboxes[..., 3] - bboxes[..., 1] + 1 dw = (w * (scale - 1)) * 0.5 dh = (h * (scale - 1)) * 0.5 scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) if clip_shape is not None: return bbox_clip(scaled_bboxes, clip_shape) else: return scaled_bboxes
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
imcrop
def imcrop(img, bboxes, scale=1.0, pad_fill=None): chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 2: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches
Crop image patches. 3
def imcrop(img, bboxes, scale=1.0, pad_fill=None): """Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (number or list): Value to be filled for padding, None for no padding. Returns: list or ndarray: The cropped image patches. """ chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 2: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
impad
def impad(img, shape, pad_val=0): if not isinstance(pad_val, (int, float)): assert len(pad_val) == img.shape[-1] if len(shape) < len(img.shape): shape = shape + (img.shape[-1], ) assert len(shape) == len(img.shape) for i in range(len(shape) - 1): assert shape[i] >= img.shape[i] pad = np.empty(shape, dtype=img.dtype) pad[...] = pad_val pad[:img.shape[0], :img.shape[1], ...] = img return pad
Pad an image to a certain shape.
def impad(img, shape, pad_val=0): """Pad an image to a certain shape. Args: img (ndarray): Image to be padded. shape (tuple): Expected padding shape. pad_val (number or sequence): Values to be filled in padding areas. Returns: ndarray: The padded image. """ if not isinstance(pad_val, (int, float)): assert len(pad_val) == img.shape[-1] if len(shape) < len(img.shape): shape = shape + (img.shape[-1], ) assert len(shape) == len(img.shape) for i in range(len(shape) - 1): assert shape[i] >= img.shape[i] pad = np.empty(shape, dtype=img.dtype) pad[...] = pad_val pad[:img.shape[0], :img.shape[1], ...] = img return pad
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
impad_to_multiple
def impad_to_multiple(img, divisor, pad_val=0): pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor return impad(img, (pad_h, pad_w), pad_val)
Pad an image to ensure each edge to be multiple to some number.
def impad_to_multiple(img, divisor, pad_val=0): """Pad an image to ensure each edge to be multiple to some number. Args: img (ndarray): Image to be padded. divisor (int): Padded image edges will be multiple to divisor. pad_val (number or sequence): Same as :func:`impad`. Returns: ndarray: The padded image. """ pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor return impad(img, (pad_h, pad_w), pad_val)
mmcv/image/transforms/geometry.py
open-mmlab/mmcv
_scale_size
def _scale_size(size, scale): w, h = size return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
Rescale a size by a ratio.
def _scale_size(size, scale): """Rescale a size by a ratio. Args: size (tuple): w, h. scale (float): Scaling factor. Returns: tuple[int]: scaled size. """ w, h = size return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
mmcv/image/transforms/resize.py
open-mmlab/mmcv
imresize
def imresize(img, size, return_scale=False, interpolation='bilinear'): h, w = img.shape[:2] resized_img = cv2.resize( img, size, interpolation=interp_codes[interpolation]) if not return_scale: return resized_img else: w_scale = size[0] / w h_scale = size[1] / h return resized_img, w_scale, h_scale
Resize image to a given size.
def imresize(img, size, return_scale=False, interpolation='bilinear'): """Resize image to a given size. Args: img (ndarray): The input image. size (tuple): Target (w, h). return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos". Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = img.shape[:2] resized_img = cv2.resize( img, size, interpolation=interp_codes[interpolation]) if not return_scale: return resized_img else: w_scale = size[0] / w h_scale = size[1] / h return resized_img, w_scale, h_scale
mmcv/image/transforms/resize.py