repo_name
stringclasses 4
values | method_name
stringlengths 3
72
| method_code
stringlengths 87
3.59k
| method_summary
stringlengths 12
196
| original_method_code
stringlengths 129
8.98k
| method_path
stringlengths 15
136
|
---|---|---|---|---|---|
apache/airflow | S3Hook.read_key | def read_key(self, key, bucket_name=None):
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8') | Reads a key from S3 | def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8') | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.select_key | def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event) | Reads a key with S3 Select. | def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event) | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.check_for_wildcard_key | def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None | Checks that a key matching a wildcard expression exists in a bucket | def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:param delimiter: the delimiter marks key hierarchy
:type delimiter: str
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.load_file | def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args) | Loads a local file to S3 | def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args) | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.load_string | def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt) | Loads a string to S3 This is provided as a convenience to drop a string in S3. It uses the boto infrastructure to ship a file to s3. | def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: str to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt) | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.load_bytes | def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args) | Loads bytes to S3 This is provided as a convenience to drop a string in S3. It uses the boto infrastructure to ship a file to s3. | def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args) | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.load_file_obj | def load_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args) | Loads a file object to S3 | def load_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a file object to S3
:param file_obj: The file-like object to set as the content for the S3 key.
:type file_obj: file-like object
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag that indicates whether to overwrite the key
if it already exists.
:type replace: bool
:param encrypt: If True, S3 encrypts the file on the server,
and the file is stored in encrypted form at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args) | airflow/hooks/S3_hook.py |
apache/airflow | S3Hook.copy_object | def copy_object(self,
source_bucket_key,
dest_bucket_key,
source_bucket_name=None,
dest_bucket_name=None,
source_version_id=None):
if dest_bucket_name is None:
dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key)
else:
parsed_url = urlparse(dest_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If dest_bucket_name is provided, ' +
'dest_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
if source_bucket_name is None:
source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key)
else:
parsed_url = urlparse(source_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If source_bucket_name is provided, ' +
'source_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
CopySource = {'Bucket': source_bucket_name,
'Key': source_bucket_key,
'VersionId': source_version_id}
response = self.get_conn().copy_object(Bucket=dest_bucket_name,
Key=dest_bucket_key,
CopySource=CopySource)
return response | Creates a copy of an object that is already stored in S3. | def copy_object(self,
source_bucket_key,
dest_bucket_key,
source_bucket_name=None,
dest_bucket_name=None,
source_version_id=None):
"""
Creates a copy of an object that is already stored in S3.
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
:param source_bucket_key: The key of the source object.
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:type source_bucket_key: str
:param dest_bucket_key: The key of the object to copy to.
The convention to specify `dest_bucket_key` is the same
as `source_bucket_key`.
:type dest_bucket_key: str
:param source_bucket_name: Name of the S3 bucket where the source object is in.
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:type source_bucket_name: str
:param dest_bucket_name: Name of the S3 bucket to where the object is copied.
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:type dest_bucket_name: str
:param source_version_id: Version ID of the source object (OPTIONAL)
:type source_version_id: str
"""
if dest_bucket_name is None:
dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key)
else:
parsed_url = urlparse(dest_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If dest_bucket_name is provided, ' +
'dest_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
if source_bucket_name is None:
source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key)
else:
parsed_url = urlparse(source_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If source_bucket_name is provided, ' +
'source_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
CopySource = {'Bucket': source_bucket_name,
'Key': source_bucket_key,
'VersionId': source_version_id}
response = self.get_conn().copy_object(Bucket=dest_bucket_name,
Key=dest_bucket_key,
CopySource=CopySource)
return response | airflow/hooks/S3_hook.py |
apache/airflow | CassandraToGoogleCloudStorageOperator._query_cassandra | def _query_cassandra(self):
self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)
session = self.hook.get_conn()
cursor = session.execute(self.cql)
return cursor | Queries cassandra and returns a cursor to the results. | def _query_cassandra(self):
"""
Queries cassandra and returns a cursor to the results.
"""
self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)
session = self.hook.get_conn()
cursor = session.execute(self.cql)
return cursor | airflow/contrib/operators/cassandra_to_gcs.py |
apache/airflow | CassandraToGoogleCloudStorageOperator.convert_user_type | def convert_user_type(cls, name, value):
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values) | Converts a user type to RECORD that contains n fields, where n is the number of attributes. Each element in the user type class will be converted to its corresponding data type in BQ. | def convert_user_type(cls, name, value):
"""
Converts a user type to RECORD that contains n fields, where n is the
number of attributes. Each element in the user type class will be converted to its
corresponding data type in BQ.
"""
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values) | airflow/contrib/operators/cassandra_to_gcs.py |
apache/airflow | send_email | def send_email(to, subject, html_content, files=None, dryrun=False, cc=None,
bcc=None, mime_subtype='mixed', sandbox_mode=False, **kwargs):
if files is None:
files = []
mail = Mail()
from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')
from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')
mail.from_email = Email(from_email, from_name)
mail.subject = subject
mail.mail_settings = MailSettings()
if sandbox_mode:
mail.mail_settings.sandbox_mode = SandBoxMode(enable=True)
personalization = Personalization()
to = get_email_address_list(to)
for to_address in to:
personalization.add_to(Email(to_address))
if cc:
cc = get_email_address_list(cc)
for cc_address in cc:
personalization.add_cc(Email(cc_address))
if bcc:
bcc = get_email_address_list(bcc)
for bcc_address in bcc:
personalization.add_bcc(Email(bcc_address))
pers_custom_args = kwargs.get('personalization_custom_args', None)
if isinstance(pers_custom_args, dict):
for key in pers_custom_args.keys():
personalization.add_custom_arg(CustomArg(key, pers_custom_args[key]))
mail.add_personalization(personalization)
mail.add_content(Content('text/html', html_content))
categories = kwargs.get('categories', [])
for cat in categories:
mail.add_category(Category(cat))
for fname in files:
basename = os.path.basename(fname)
attachment = Attachment()
attachment.type = mimetypes.guess_type(basename)[0]
attachment.filename = basename
attachment.disposition = "attachment"
attachment.content_id = '<{0}>'.format(basename)
with open(fname, "rb") as f:
attachment.content = base64.b64encode(f.read()).decode('utf-8')
mail.add_attachment(attachment)
_post_sendgrid_mail(mail.get()) | Send an email with html content using sendgrid. To use this | def send_email(to, subject, html_content, files=None, dryrun=False, cc=None,
bcc=None, mime_subtype='mixed', sandbox_mode=False, **kwargs):
"""
Send an email with html content using sendgrid.
To use this plugin:
0. include sendgrid subpackage as part of your Airflow installation, e.g.,
pip install 'apache-airflow[sendgrid]'
1. update [email] backend in airflow.cfg, i.e.,
[email]
email_backend = airflow.contrib.utils.sendgrid.send_email
2. configure Sendgrid specific environment variables at all Airflow instances:
SENDGRID_MAIL_FROM={your-mail-from}
SENDGRID_API_KEY={your-sendgrid-api-key}.
"""
if files is None:
files = []
mail = Mail()
from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')
from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')
mail.from_email = Email(from_email, from_name)
mail.subject = subject
mail.mail_settings = MailSettings()
if sandbox_mode:
mail.mail_settings.sandbox_mode = SandBoxMode(enable=True)
# Add the recipient list of to emails.
personalization = Personalization()
to = get_email_address_list(to)
for to_address in to:
personalization.add_to(Email(to_address))
if cc:
cc = get_email_address_list(cc)
for cc_address in cc:
personalization.add_cc(Email(cc_address))
if bcc:
bcc = get_email_address_list(bcc)
for bcc_address in bcc:
personalization.add_bcc(Email(bcc_address))
# Add custom_args to personalization if present
pers_custom_args = kwargs.get('personalization_custom_args', None)
if isinstance(pers_custom_args, dict):
for key in pers_custom_args.keys():
personalization.add_custom_arg(CustomArg(key, pers_custom_args[key]))
mail.add_personalization(personalization)
mail.add_content(Content('text/html', html_content))
categories = kwargs.get('categories', [])
for cat in categories:
mail.add_category(Category(cat))
# Add email attachment.
for fname in files:
basename = os.path.basename(fname)
attachment = Attachment()
attachment.type = mimetypes.guess_type(basename)[0]
attachment.filename = basename
attachment.disposition = "attachment"
attachment.content_id = '<{0}>'.format(basename)
with open(fname, "rb") as f:
attachment.content = base64.b64encode(f.read()).decode('utf-8')
mail.add_attachment(attachment)
_post_sendgrid_mail(mail.get()) | airflow/contrib/utils/sendgrid.py |
apache/airflow | GCPSpeechToTextHook.get_conn | def get_conn(self):
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials())
return self._client | Retrieves connection to Cloud Speech. | def get_conn(self):
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
:rtype: google.cloud.speech_v1.SpeechClient
"""
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials())
return self._client | airflow/contrib/hooks/gcp_speech_to_text_hook.py |
apache/airflow | GCPSpeechToTextHook.recognize_speech | def recognize_speech(self, config, audio, retry=None, timeout=None):
client = self.get_conn()
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s" % response)
return response | Recognizes audio input | def recognize_speech(self, config, audio, retry=None, timeout=None):
"""
Recognizes audio input
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:type config: dict or google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:type audio: dict or google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:type timeout: float
"""
client = self.get_conn()
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s" % response)
return response | airflow/contrib/hooks/gcp_speech_to_text_hook.py |
apache/airflow | SparkSqlOperator.execute | def execute(self, context):
self._hook = SparkSqlHook(sql=self._sql,
conf=self._conf,
conn_id=self._conn_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
principal=self._principal,
name=self._name,
num_executors=self._num_executors,
master=self._master,
yarn_queue=self._yarn_queue
)
self._hook.run_query() | Call the SparkSqlHook to run the provided sql query | def execute(self, context):
"""
Call the SparkSqlHook to run the provided sql query
"""
self._hook = SparkSqlHook(sql=self._sql,
conf=self._conf,
conn_id=self._conn_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
principal=self._principal,
name=self._name,
num_executors=self._num_executors,
master=self._master,
yarn_queue=self._yarn_queue
)
self._hook.run_query() | airflow/contrib/operators/spark_sql_operator.py |
apache/airflow | load_entrypoint_plugins | def load_entrypoint_plugins(entry_points, airflow_plugins):
for entry_point in entry_points:
log.debug('Importing entry_point plugin %s', entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, 'on_load', None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins | Load AirflowPlugin subclasses from the entrypoints provided. The entry_point group should be 'airflow.plugins'. | def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
for entry_point in entry_points:
log.debug('Importing entry_point plugin %s', entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, 'on_load', None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins | airflow/plugins_manager.py |
apache/airflow | is_valid_plugin | def is_valid_plugin(plugin_obj, existing_plugins):
if (
inspect.isclass(plugin_obj) and
issubclass(plugin_obj, AirflowPlugin) and
(plugin_obj is not AirflowPlugin)
):
plugin_obj.validate()
return plugin_obj not in existing_plugins
return False | Check whether a potential object is a subclass of the AirflowPlugin class. | def is_valid_plugin(plugin_obj, existing_plugins):
"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:param existing_plugins: Existing list of AirflowPlugin subclasses
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""
if (
inspect.isclass(plugin_obj) and
issubclass(plugin_obj, AirflowPlugin) and
(plugin_obj is not AirflowPlugin)
):
plugin_obj.validate()
return plugin_obj not in existing_plugins
return False | airflow/plugins_manager.py |
apache/airflow | SkipMixin.skip | def skip(self, dag_run, execution_date, tasks, session=None):
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = timezone.utcnow()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state: State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
assert execution_date is not None, "Execution date is None and no dag run"
self.log.warning("No DAG RUN present this should not happen")
for task in tasks:
ti = TaskInstance(task, execution_date=execution_date)
ti.state = State.SKIPPED
ti.start_date = now
ti.end_date = now
session.merge(ti)
session.commit() | Sets tasks instances to skipped from the same dag run. | def skip(self, dag_run, execution_date, tasks, session=None):
"""
Sets tasks instances to skipped from the same dag run.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
:param session: db session to use
"""
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = timezone.utcnow()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state: State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
assert execution_date is not None, "Execution date is None and no dag run"
self.log.warning("No DAG RUN present this should not happen")
# this is defensive against dag runs that are not complete
for task in tasks:
ti = TaskInstance(task, execution_date=execution_date)
ti.state = State.SKIPPED
ti.start_date = now
ti.end_date = now
session.merge(ti)
session.commit() | airflow/models/skipmixin.py |
apache/airflow | AzureDataLakeHook.get_conn | def get_conn(self):
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
self.account_name = service_options.get('account_name')
adlCreds = lib.auth(tenant_id=service_options.get('tenant'),
client_secret=conn.password,
client_id=conn.login)
adlsFileSystemClient = core.AzureDLFileSystem(adlCreds,
store_name=self.account_name)
adlsFileSystemClient.connect()
return adlsFileSystemClient | Return a AzureDLFileSystem object. | def get_conn(self):
"""Return a AzureDLFileSystem object."""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
self.account_name = service_options.get('account_name')
adlCreds = lib.auth(tenant_id=service_options.get('tenant'),
client_secret=conn.password,
client_id=conn.login)
adlsFileSystemClient = core.AzureDLFileSystem(adlCreds,
store_name=self.account_name)
adlsFileSystemClient.connect()
return adlsFileSystemClient | airflow/contrib/hooks/azure_data_lake_hook.py |
apache/airflow | AzureDataLakeHook.check_for_file | def check_for_file(self, file_path):
try:
files = self.connection.glob(file_path, details=False, invalidate_cache=True)
return len(files) == 1
except FileNotFoundError:
return False | Check if a file exists on Azure Data Lake. | def check_for_file(self, file_path):
"""
Check if a file exists on Azure Data Lake.
:param file_path: Path and name of the file.
:type file_path: str
:return: True if the file exists, False otherwise.
:rtype: bool
"""
try:
files = self.connection.glob(file_path, details=False, invalidate_cache=True)
return len(files) == 1
except FileNotFoundError:
return False | airflow/contrib/hooks/azure_data_lake_hook.py |
apache/airflow | AzureDataLakeHook.upload_file | def upload_file(self, local_path, remote_path, nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304):
multithread.ADLUploader(self.connection,
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize) | Upload a file to Azure Data Lake. | def upload_file(self, local_path, remote_path, nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304):
"""
Upload a file to Azure Data Lake.
:param local_path: local path. Can be single file, directory (in which case,
upload recursively) or glob pattern. Recursive glob patterns using `**`
are not supported.
:type local_path: str
:param remote_path: Remote path to upload to; if multiple files, this is the
directory root to write within.
:type remote_path: str
:param nthreads: Number of threads to use. If None, uses the number of cores.
:type nthreads: int
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:type overwrite: bool
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:type buffersize: int
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
:type blocksize: int
"""
multithread.ADLUploader(self.connection,
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize) | airflow/contrib/hooks/azure_data_lake_hook.py |
apache/airflow | AzureDataLakeHook.list | def list(self, path):
if "*" in path:
return self.connection.glob(path)
else:
return self.connection.walk(path) | List files in Azure Data Lake Storage | def list(self, path):
"""
List files in Azure Data Lake Storage
:param path: full path/globstring to use to list files in ADLS
:type path: str
"""
if "*" in path:
return self.connection.glob(path)
else:
return self.connection.walk(path) | airflow/contrib/hooks/azure_data_lake_hook.py |
apache/airflow | AWSAthenaOperator.execute | def execute(self, context):
self.hook = self.get_hook()
self.hook.get_conn()
self.query_execution_context['Database'] = self.database
self.result_configuration['OutputLocation'] = self.output_location
self.query_execution_id = self.hook.run_query(self.query, self.query_execution_context,
self.result_configuration, self.client_request_token)
query_status = self.hook.poll_query_status(self.query_execution_id, self.max_tries)
if query_status in AWSAthenaHook.FAILURE_STATES:
raise Exception(
'Final state of Athena job is {}, query_execution_id is {}.'
.format(query_status, self.query_execution_id))
elif not query_status or query_status in AWSAthenaHook.INTERMEDIATE_STATES:
raise Exception(
'Final state of Athena job is {}. '
'Max tries of poll status exceeded, query_execution_id is {}.'
.format(query_status, self.query_execution_id)) | Run Presto Query on Athena | def execute(self, context):
"""
Run Presto Query on Athena
"""
self.hook = self.get_hook()
self.hook.get_conn()
self.query_execution_context['Database'] = self.database
self.result_configuration['OutputLocation'] = self.output_location
self.query_execution_id = self.hook.run_query(self.query, self.query_execution_context,
self.result_configuration, self.client_request_token)
query_status = self.hook.poll_query_status(self.query_execution_id, self.max_tries)
if query_status in AWSAthenaHook.FAILURE_STATES:
raise Exception(
'Final state of Athena job is {}, query_execution_id is {}.'
.format(query_status, self.query_execution_id))
elif not query_status or query_status in AWSAthenaHook.INTERMEDIATE_STATES:
raise Exception(
'Final state of Athena job is {}. '
'Max tries of poll status exceeded, query_execution_id is {}.'
.format(query_status, self.query_execution_id)) | airflow/contrib/operators/aws_athena_operator.py |
apache/airflow | uncompress_file | def uncompress_file(input_file_name, file_extension, dest_dir):
if file_extension.lower() not in ('.gz', '.bz2'):
raise NotImplementedError("Received {} format. Only gz and bz2 "
"files can currently be uncompressed."
.format(file_extension))
if file_extension.lower() == '.gz':
fmodule = gzip.GzipFile
elif file_extension.lower() == '.bz2':
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode='rb') as f_compressed,\
NamedTemporaryFile(dir=dest_dir,
mode='wb',
delete=False) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name | Uncompress gz and bz2 files | def uncompress_file(input_file_name, file_extension, dest_dir):
"""
Uncompress gz and bz2 files
"""
if file_extension.lower() not in ('.gz', '.bz2'):
raise NotImplementedError("Received {} format. Only gz and bz2 "
"files can currently be uncompressed."
.format(file_extension))
if file_extension.lower() == '.gz':
fmodule = gzip.GzipFile
elif file_extension.lower() == '.bz2':
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode='rb') as f_compressed,\
NamedTemporaryFile(dir=dest_dir,
mode='wb',
delete=False) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name | airflow/utils/compression.py |
apache/airflow | MsSqlToGoogleCloudStorageOperator._query_mssql | def _query_mssql(self):
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor | Queries MSSQL and returns a cursor of results. | def _query_mssql(self):
"""
Queries MSSQL and returns a cursor of results.
:return: mssql cursor
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor | airflow/contrib/operators/mssql_to_gcs.py |
apache/airflow | CgroupTaskRunner._create_cgroup | def _create_cgroup(self, path):
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path)
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists",
path_element, node.path
)
node = name_to_node[path_element]
return node | Create the specified cgroup. | def _create_cgroup(self, path):
"""
Create the specified cgroup.
:param path: The path of the cgroup to create.
E.g. cpu/mygroup/mysubgroup
:return: the Node associated with the created cgroup.
:rtype: cgroupspy.nodes.Node
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path)
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists",
path_element, node.path
)
node = name_to_node[path_element]
return node | airflow/contrib/task_runner/cgroup_task_runner.py |
apache/airflow | CgroupTaskRunner._delete_cgroup | def _delete_cgroup(self, path):
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name) | Delete the specified cgroup. | def _delete_cgroup(self, path):
"""
Delete the specified cgroup.
:param path: The path of the cgroup to delete.
E.g. cpu/mygroup/mysubgroup
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
# node is now the leaf node
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name) | airflow/contrib/task_runner/cgroup_task_runner.py |
apache/airflow | DatabricksHook._parse_host | def _parse_host(host):
urlparse_host = urlparse.urlparse(host).hostname
if urlparse_host:
return urlparse_host
else:
return host | The purpose of this function is to be robust to improper connections settings provided by users, specifically in the host field. For example -- when users supply `` | def _parse_host(host):
"""
The purpose of this function is to be robust to improper connections
settings provided by users, specifically in the host field.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.::
h = DatabricksHook()
assert h._parse_host('https://xx.cloud.databricks.com') == \
'xx.cloud.databricks.com'
In the case where users supply the correct ``xx.cloud.databricks.com`` as the
host, this function is a no-op.::
assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'
"""
urlparse_host = urlparse.urlparse(host).hostname
if urlparse_host:
# In this case, host = https://xx.cloud.databricks.com
return urlparse_host
else:
# In this case, host = xx.cloud.databricks.com
return host | airflow/contrib/hooks/databricks_hook.py |
apache/airflow | DatabricksHook._do_api_call | def _do_api_call(self, endpoint_info, json):
method, endpoint = endpoint_info
url = 'https://{host}/{endpoint}'.format(
host=self._parse_host(self.databricks_conn.host),
endpoint=endpoint)
if 'token' in self.databricks_conn.extra_dejson:
self.log.info('Using token auth.')
auth = _TokenAuth(self.databricks_conn.extra_dejson['token'])
else:
self.log.info('Using basic auth.')
auth = (self.databricks_conn.login, self.databricks_conn.password)
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
attempt_num = 1
while True:
try:
response = request_func(
url,
json=json,
auth=auth,
headers=USER_AGENT_HEADER,
timeout=self.timeout_seconds)
response.raise_for_status()
return response.json()
except requests_exceptions.RequestException as e:
if not _retryable_error(e):
raise AirflowException('Response: {0}, Status Code: {1}'.format(
e.response.content, e.response.status_code))
self._log_request_error(attempt_num, e)
if attempt_num == self.retry_limit:
raise AirflowException(('API requests to Databricks failed {} times. ' +
'Giving up.').format(self.retry_limit))
attempt_num += 1
sleep(self.retry_delay) | Utility function to perform an API call with retries | def _do_api_call(self, endpoint_info, json):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:type endpoint_info: tuple[string, string]
:param json: Parameters for this API call.
:type json: dict
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
url = 'https://{host}/{endpoint}'.format(
host=self._parse_host(self.databricks_conn.host),
endpoint=endpoint)
if 'token' in self.databricks_conn.extra_dejson:
self.log.info('Using token auth.')
auth = _TokenAuth(self.databricks_conn.extra_dejson['token'])
else:
self.log.info('Using basic auth.')
auth = (self.databricks_conn.login, self.databricks_conn.password)
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
attempt_num = 1
while True:
try:
response = request_func(
url,
json=json,
auth=auth,
headers=USER_AGENT_HEADER,
timeout=self.timeout_seconds)
response.raise_for_status()
return response.json()
except requests_exceptions.RequestException as e:
if not _retryable_error(e):
# In this case, the user probably made a mistake.
# Don't retry.
raise AirflowException('Response: {0}, Status Code: {1}'.format(
e.response.content, e.response.status_code))
self._log_request_error(attempt_num, e)
if attempt_num == self.retry_limit:
raise AirflowException(('API requests to Databricks failed {} times. ' +
'Giving up.').format(self.retry_limit))
attempt_num += 1
sleep(self.retry_delay) | airflow/contrib/hooks/databricks_hook.py |
apache/airflow | SalesforceHook.get_conn | def get_conn(self):
if not self.conn:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
self.conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=extras['security_token'],
instance_url=connection.host,
sandbox=extras.get('sandbox', False)
)
return self.conn | Sign into Salesforce, only if we are not already signed in. | def get_conn(self):
"""
Sign into Salesforce, only if we are not already signed in.
"""
if not self.conn:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
self.conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=extras['security_token'],
instance_url=connection.host,
sandbox=extras.get('sandbox', False)
)
return self.conn | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | SalesforceHook.make_query | def make_query(self, query):
conn = self.get_conn()
self.log.info("Querying for all objects")
query_results = conn.query_all(query)
self.log.info("Received results: Total size: %s; Done: %s",
query_results['totalSize'], query_results['done'])
return query_results | Make a query to Salesforce. | def make_query(self, query):
"""
Make a query to Salesforce.
:param query: The query to make to Salesforce.
:type query: str
:return: The query result.
:rtype: dict
"""
conn = self.get_conn()
self.log.info("Querying for all objects")
query_results = conn.query_all(query)
self.log.info("Received results: Total size: %s; Done: %s",
query_results['totalSize'], query_results['done'])
return query_results | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | SalesforceHook.describe_object | def describe_object(self, obj):
conn = self.get_conn()
return conn.__getattr__(obj).describe() | Get the description of an object from Salesforce. This description is the object's schema and some extra metadata that Salesforce stores for each object. | def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema and
some extra metadata that Salesforce stores for each object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the description of the Salesforce object.
:rtype: dict
"""
conn = self.get_conn()
return conn.__getattr__(obj).describe() | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | SalesforceHook.get_available_fields | def get_available_fields(self, obj):
self.get_conn()
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']] | Get a list of all available fields for an object. | def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the names of the fields.
:rtype: list of str
"""
self.get_conn()
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']] | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | SalesforceHook.get_object_from_salesforce | def get_object_from_salesforce(self, obj, fields):
query = "SELECT {} FROM {}".format(",".join(fields), obj)
self.log.info("Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]))
return self.make_query(query) | Get all instances of the `object` from Salesforce. For each model, only get the fields specified in fields. All we really do underneath the hood is | def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
:param obj: The object name to get from Salesforce.
:type obj: str
:param fields: The fields to get from the object.
:type fields: iterable
:return: all instances of the object from Salesforce.
:rtype: dict
"""
query = "SELECT {} FROM {}".format(",".join(fields), obj)
self.log.info("Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]))
return self.make_query(query) | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | SalesforceHook._to_timestamp | def _to_timestamp(cls, column):
try:
column = pd.to_datetime(column)
except ValueError:
log = LoggingMixin().log
log.warning("Could not convert field to timestamps: %s", column.name)
return column
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index) | Convert a column of a dataframe to UNIX timestamps if applicable | def _to_timestamp(cls, column):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param column: A Series object representing a column of a dataframe.
:type column: pd.Series
:return: a new series that maintains the same index as the original
:rtype: pd.Series
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
column = pd.to_datetime(column)
except ValueError:
log = LoggingMixin().log
log.warning("Could not convert field to timestamps: %s", column.name)
return column
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index) | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | SalesforceHook.write_object_to_file | def write_object_to_file(self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False):
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {}".format(fmt))
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
if coerce_to_timestamp and df.shape[0] > 0:
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
if fmt == "csv":
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "").str.replace("\n", "")
)
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df | Write query results to file. Acceptable formats | def write_object_to_file(self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param filename: the name of the file where the data should be dumped to
:type filename: str
:param fmt: the format you want the output in. Default: 'csv'
:type fmt: str
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe that gets written to the file.
:rtype: pd.Dataframe
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {}".format(fmt))
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "").str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df | airflow/contrib/hooks/salesforce_hook.py |
apache/airflow | MongoHook.get_conn | def get_conn(self):
if self.client is not None:
return self.client
options = self.extras
if options.get('ssl', False):
options.update({'ssl_cert_reqs': CERT_NONE})
self.client = MongoClient(self.uri, **options)
return self.client | Fetches PyMongo Client | def get_conn(self):
"""
Fetches PyMongo Client
"""
if self.client is not None:
return self.client
# Mongo Connection Options dict that is unpacked when passed to MongoClient
options = self.extras
# If we are using SSL disable requiring certs from specific hostname
if options.get('ssl', False):
options.update({'ssl_cert_reqs': CERT_NONE})
self.client = MongoClient(self.uri, **options)
return self.client | airflow/contrib/hooks/mongo_hook.py |
apache/airflow | MongoHook.get_collection | def get_collection(self, mongo_collection, mongo_db=None):
mongo_db = mongo_db if mongo_db is not None else self.connection.schema
mongo_conn = self.get_conn()
return mongo_conn.get_database(mongo_db).get_collection(mongo_collection) | Fetches a mongo collection object for querying. Uses connection schema as DB unless specified. | def get_collection(self, mongo_collection, mongo_db=None):
"""
Fetches a mongo collection object for querying.
Uses connection schema as DB unless specified.
"""
mongo_db = mongo_db if mongo_db is not None else self.connection.schema
mongo_conn = self.get_conn()
return mongo_conn.get_database(mongo_db).get_collection(mongo_collection) | airflow/contrib/hooks/mongo_hook.py |
apache/airflow | MongoHook.replace_many | def replace_many(self, mongo_collection, docs,
filter_docs=None, mongo_db=None, upsert=False, collation=None,
**kwargs):
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_docs:
filter_docs = [{'_id': doc['_id']} for doc in docs]
requests = [
ReplaceOne(
filter_docs[i],
docs[i],
upsert=upsert,
collation=collation)
for i in range(len(docs))
]
return collection.bulk_write(requests, **kwargs) | Replaces many documents in a mongo collection. Uses bulk_write with multiple ReplaceOne operations | def replace_many(self, mongo_collection, docs,
filter_docs=None, mongo_db=None, upsert=False, collation=None,
**kwargs):
"""
Replaces many documents in a mongo collection.
Uses bulk_write with multiple ReplaceOne operations
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
.. note::
If no ``filter_docs``are given, it is assumed that all
replacement documents contain the ``_id`` field which are then
used as filters.
:param mongo_collection: The name of the collection to update.
:type mongo_collection: str
:param docs: The new documents.
:type docs: list[dict]
:param filter_docs: A list of queries that match the documents to replace.
Can be omitted; then the _id fields from docs will be used.
:type filter_docs: list[dict]
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
:param upsert: If ``True``, perform an insert if no documents
match the filters for the replace operation.
:type upsert: bool
:param collation: An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
:type collation: pymongo.collation.Collation
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if not filter_docs:
filter_docs = [{'_id': doc['_id']} for doc in docs]
requests = [
ReplaceOne(
filter_docs[i],
docs[i],
upsert=upsert,
collation=collation)
for i in range(len(docs))
]
return collection.bulk_write(requests, **kwargs) | airflow/contrib/hooks/mongo_hook.py |
apache/airflow | ImapHook.has_mail_attachment | def has_mail_attachment(self, name, mail_folder='INBOX', check_regex=False):
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only=True)
return len(mail_attachments) > 0 | Checks the mail folder for mails containing attachments with the given name. | def has_mail_attachment(self, name, mail_folder='INBOX', check_regex=False):
"""
Checks the mail folder for mails containing attachments with the given name.
:param name: The name of the attachment that will be searched for.
:type name: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:returns: True if there is an attachment with the given name and False if not.
:rtype: bool
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only=True)
return len(mail_attachments) > 0 | airflow/contrib/hooks/imap_hook.py |
apache/airflow | ImapHook.retrieve_mail_attachments | def retrieve_mail_attachments(self,
name,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments | Retrieves mail's attachments in the mail folder by its name. | def retrieve_mail_attachments(self,
name,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Retrieves mail's attachments in the mail folder by its name.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only retrieve
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
:returns: a list of tuple each containing the attachment filename and its payload.
:rtype: a list of tuple
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments | airflow/contrib/hooks/imap_hook.py |
apache/airflow | ImapHook.download_mail_attachments | def download_mail_attachments(self,
name,
local_output_directory,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory) | Downloads mail's attachments in the mail folder by its name to the local directory. | def download_mail_attachments(self,
name,
local_output_directory,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:type local_output_directory: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only download
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory) | airflow/contrib/hooks/imap_hook.py |
apache/airflow | Mail.get_attachments_by_name | def get_attachments_by_name(self, name, check_regex, find_first=False):
attachments = []
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
found_attachment = mail_part.has_matching_name(name) if check_regex \
else mail_part.has_equal_name(name)
if found_attachment:
file_name, file_payload = mail_part.get_file()
self.log.info('Found attachment: {}'.format(file_name))
attachments.append((file_name, file_payload))
if find_first:
break
return attachments | Gets all attachments by name for the mail. | def get_attachments_by_name(self, name, check_regex, find_first=False):
"""
Gets all attachments by name for the mail.
:param name: The name of the attachment to look for.
:type name: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param find_first: If set to True it will only find the first match and then quit.
:type find_first: bool
:returns: a list of tuples each containing name and payload
where the attachments name matches the given name.
:rtype: list of tuple
"""
attachments = []
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
found_attachment = mail_part.has_matching_name(name) if check_regex \
else mail_part.has_equal_name(name)
if found_attachment:
file_name, file_payload = mail_part.get_file()
self.log.info('Found attachment: {}'.format(file_name))
attachments.append((file_name, file_payload))
if find_first:
break
return attachments | airflow/contrib/hooks/imap_hook.py |
apache/airflow | MailPart.get_file | def get_file(self):
return self.part.get_filename(), self.part.get_payload(decode=True) | Gets the file including name and payload. | def get_file(self):
"""
Gets the file including name and payload.
:returns: the part's name and payload.
:rtype: tuple
"""
return self.part.get_filename(), self.part.get_payload(decode=True) | airflow/contrib/hooks/imap_hook.py |
apache/airflow | AwsFirehoseHook.put_records | def put_records(self, records):
firehose_conn = self.get_conn()
response = firehose_conn.put_record_batch(
DeliveryStreamName=self.delivery_stream,
Records=records
)
return response | Write batch records to Kinesis Firehose | def put_records(self, records):
"""
Write batch records to Kinesis Firehose
"""
firehose_conn = self.get_conn()
response = firehose_conn.put_record_batch(
DeliveryStreamName=self.delivery_stream,
Records=records
)
return response | airflow/contrib/hooks/aws_firehose_hook.py |
apache/airflow | send_email | def send_email(to, subject, html_content,
files=None, dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8', **kwargs):
path, attr = configuration.conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, attr)
to = get_email_address_list(to)
to = ", ".join(to)
return backend(to, subject, html_content, files=files,
dryrun=dryrun, cc=cc, bcc=bcc,
mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs) | Send email using backend specified in EMAIL_BACKEND. | def send_email(to, subject, html_content,
files=None, dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8', **kwargs):
"""
Send email using backend specified in EMAIL_BACKEND.
"""
path, attr = configuration.conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, attr)
to = get_email_address_list(to)
to = ", ".join(to)
return backend(to, subject, html_content, files=files,
dryrun=dryrun, cc=cc, bcc=bcc,
mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs) | airflow/utils/email.py |
apache/airflow | send_email_smtp | def send_email_smtp(to, subject, html_content, files=None,
dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8',
**kwargs):
smtp_mail_from = configuration.conf.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html', mime_charset)
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
part = MIMEApplication(
f.read(),
Name=basename
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename
part['Content-ID'] = '<%s>' % basename
msg.attach(part)
send_MIME_email(smtp_mail_from, recipients, msg, dryrun) | Send an email with html content | def send_email_smtp(to, subject, html_content, files=None,
dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8',
**kwargs):
"""
Send an email with html content
>>> send_email('test@example.com', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True)
"""
smtp_mail_from = configuration.conf.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html', mime_charset)
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
part = MIMEApplication(
f.read(),
Name=basename
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename
part['Content-ID'] = '<%s>' % basename
msg.attach(part)
send_MIME_email(smtp_mail_from, recipients, msg, dryrun) | airflow/utils/email.py |
apache/airflow | WasbHook.check_for_blob | def check_for_blob(self, container_name, blob_name, **kwargs):
return self.connection.exists(container_name, blob_name, **kwargs) | Check if a blob exists on Azure Blob Storage. | def check_for_blob(self, container_name, blob_name, **kwargs):
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.exists()` takes.
:type kwargs: object
:return: True if the blob exists, False otherwise.
:rtype: bool
"""
return self.connection.exists(container_name, blob_name, **kwargs) | airflow/contrib/hooks/wasb_hook.py |
apache/airflow | WasbHook.check_for_prefix | def check_for_prefix(self, container_name, prefix, **kwargs):
matches = self.connection.list_blobs(container_name, prefix,
num_results=1, **kwargs)
return len(list(matches)) > 0 | Check if a prefix exists on Azure Blob storage. | def check_for_prefix(self, container_name, prefix, **kwargs):
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:type container_name: str
:param prefix: Prefix of the blob.
:type prefix: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.list_blobs()` takes.
:type kwargs: object
:return: True if blobs matching the prefix exist, False otherwise.
:rtype: bool
"""
matches = self.connection.list_blobs(container_name, prefix,
num_results=1, **kwargs)
return len(list(matches)) > 0 | airflow/contrib/hooks/wasb_hook.py |
apache/airflow | WasbHook.load_string | def load_string(self, string_data, container_name, blob_name, **kwargs):
self.connection.create_blob_from_text(container_name, blob_name,
string_data, **kwargs) | Upload a string to Azure Blob Storage. | def load_string(self, string_data, container_name, blob_name, **kwargs):
"""
Upload a string to Azure Blob Storage.
:param string_data: String to load.
:type string_data: str
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.create_blob_from_text()` takes.
:type kwargs: object
"""
# Reorder the argument order from airflow.hooks.S3_hook.load_string.
self.connection.create_blob_from_text(container_name, blob_name,
string_data, **kwargs) | airflow/contrib/hooks/wasb_hook.py |
apache/airflow | WasbHook.read_file | def read_file(self, container_name, blob_name, **kwargs):
return self.connection.get_blob_to_text(container_name,
blob_name,
**kwargs).content | Read a file from Azure Blob Storage and return as a string. | def read_file(self, container_name, blob_name, **kwargs):
"""
Read a file from Azure Blob Storage and return as a string.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param kwargs: Optional keyword arguments that
`BlockBlobService.create_blob_from_path()` takes.
:type kwargs: object
"""
return self.connection.get_blob_to_text(container_name,
blob_name,
**kwargs).content | airflow/contrib/hooks/wasb_hook.py |
apache/airflow | WasbHook.delete_file | def delete_file(self, container_name, blob_name, is_prefix=False,
ignore_if_missing=False, **kwargs):
if is_prefix:
blobs_to_delete = [
blob.name for blob in self.connection.list_blobs(
container_name, prefix=blob_name, **kwargs
)
]
elif self.check_for_blob(container_name, blob_name):
blobs_to_delete = [blob_name]
else:
blobs_to_delete = []
if not ignore_if_missing and len(blobs_to_delete) == 0:
raise AirflowException('Blob(s) not found: {}'.format(blob_name))
for blob_uri in blobs_to_delete:
self.log.info("Deleting blob: " + blob_uri)
self.connection.delete_blob(container_name,
blob_uri,
delete_snapshots='include',
**kwargs) | Delete a file from Azure Blob Storage. | def delete_file(self, container_name, blob_name, is_prefix=False,
ignore_if_missing=False, **kwargs):
"""
Delete a file from Azure Blob Storage.
:param container_name: Name of the container.
:type container_name: str
:param blob_name: Name of the blob.
:type blob_name: str
:param is_prefix: If blob_name is a prefix, delete all matching files
:type is_prefix: bool
:param ignore_if_missing: if True, then return success even if the
blob does not exist.
:type ignore_if_missing: bool
:param kwargs: Optional keyword arguments that
`BlockBlobService.create_blob_from_path()` takes.
:type kwargs: object
"""
if is_prefix:
blobs_to_delete = [
blob.name for blob in self.connection.list_blobs(
container_name, prefix=blob_name, **kwargs
)
]
elif self.check_for_blob(container_name, blob_name):
blobs_to_delete = [blob_name]
else:
blobs_to_delete = []
if not ignore_if_missing and len(blobs_to_delete) == 0:
raise AirflowException('Blob(s) not found: {}'.format(blob_name))
for blob_uri in blobs_to_delete:
self.log.info("Deleting blob: " + blob_uri)
self.connection.delete_blob(container_name,
blob_uri,
delete_snapshots='include',
**kwargs) | airflow/contrib/hooks/wasb_hook.py |
apache/airflow | DiscordWebhookOperator.execute | def execute(self, context):
self.hook = DiscordWebhookHook(
self.http_conn_id,
self.webhook_endpoint,
self.message,
self.username,
self.avatar_url,
self.tts,
self.proxy
)
self.hook.execute() | Call the DiscordWebhookHook to post message | def execute(self, context):
"""
Call the DiscordWebhookHook to post message
"""
self.hook = DiscordWebhookHook(
self.http_conn_id,
self.webhook_endpoint,
self.message,
self.username,
self.avatar_url,
self.tts,
self.proxy
)
self.hook.execute() | airflow/contrib/operators/discord_webhook_operator.py |
apache/airflow | AzureFileShareHook.get_conn | def get_conn(self):
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
return FileService(account_name=conn.login,
account_key=conn.password, **service_options) | Return the FileService object. | def get_conn(self):
"""Return the FileService object."""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
return FileService(account_name=conn.login,
account_key=conn.password, **service_options) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.check_for_directory | def check_for_directory(self, share_name, directory_name, **kwargs):
return self.connection.exists(share_name, directory_name,
**kwargs) | Check if a directory exists on Azure File Share. | def check_for_directory(self, share_name, directory_name, **kwargs):
"""
Check if a directory exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
"""
return self.connection.exists(share_name, directory_name,
**kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.check_for_file | def check_for_file(self, share_name, directory_name, file_name, **kwargs):
return self.connection.exists(share_name, directory_name,
file_name, **kwargs) | Check if a file exists on Azure File Share. | def check_for_file(self, share_name, directory_name, file_name, **kwargs):
"""
Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
"""
return self.connection.exists(share_name, directory_name,
file_name, **kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.list_directories_and_files | def list_directories_and_files(self, share_name, directory_name=None, **kwargs):
return self.connection.list_directories_and_files(share_name,
directory_name,
**kwargs) | Return the list of directories and files stored on a Azure File Share. | def list_directories_and_files(self, share_name, directory_name=None, **kwargs):
"""
Return the list of directories and files stored on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.list_directories_and_files()` takes.
:type kwargs: object
:return: A list of files and directories
:rtype: list
"""
return self.connection.list_directories_and_files(share_name,
directory_name,
**kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.create_directory | def create_directory(self, share_name, directory_name, **kwargs):
return self.connection.create_directory(share_name, directory_name, **kwargs) | Create a new directory on a Azure File Share. | def create_directory(self, share_name, directory_name, **kwargs):
"""
Create a new directory on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_directory()` takes.
:type kwargs: object
:return: A list of files and directories
:rtype: list
"""
return self.connection.create_directory(share_name, directory_name, **kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.load_file | def load_file(self, file_path, share_name, directory_name, file_name, **kwargs):
self.connection.create_file_from_path(share_name, directory_name,
file_name, file_path, **kwargs) | Upload a file to Azure File Share. | def load_file(self, file_path, share_name, directory_name, file_name, **kwargs):
"""
Upload a file to Azure File Share.
:param file_path: Path to the file to load.
:type file_path: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_path()` takes.
:type kwargs: object
"""
self.connection.create_file_from_path(share_name, directory_name,
file_name, file_path, **kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.load_string | def load_string(self, string_data, share_name, directory_name, file_name, **kwargs):
self.connection.create_file_from_text(share_name, directory_name,
file_name, string_data, **kwargs) | Upload a string to Azure File Share. | def load_string(self, string_data, share_name, directory_name, file_name, **kwargs):
"""
Upload a string to Azure File Share.
:param string_data: String to load.
:type string_data: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_text()` takes.
:type kwargs: object
"""
self.connection.create_file_from_text(share_name, directory_name,
file_name, string_data, **kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | AzureFileShareHook.load_stream | def load_stream(self, stream, share_name, directory_name, file_name, count, **kwargs):
self.connection.create_file_from_stream(share_name, directory_name,
file_name, stream, count, **kwargs) | Upload a stream to Azure File Share. | def load_stream(self, stream, share_name, directory_name, file_name, count, **kwargs):
"""
Upload a stream to Azure File Share.
:param stream: Opened file/stream to upload as the file content.
:type stream: file-like
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param count: Size of the stream in bytes
:type count: int
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_stream()` takes.
:type kwargs: object
"""
self.connection.create_file_from_stream(share_name, directory_name,
file_name, stream, count, **kwargs) | airflow/contrib/hooks/azure_fileshare_hook.py |
apache/airflow | GoogleCloudStorageHook.copy | def copy(self, source_bucket, source_object, destination_bucket=None,
destination_object=None):
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and \
source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(source_bucket)
source_object = source_bucket.blob(source_object)
destination_bucket = client.get_bucket(destination_bucket)
destination_object = source_bucket.copy_blob(
blob=source_object,
destination_bucket=destination_bucket,
new_name=destination_object)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object.name, destination_bucket.name) | Copies an object from a bucket to another, with renaming if requested. destination_bucket or destination_object can be omitted, in which case source bucket/object is used, but not both. | def copy(self, source_bucket, source_object, destination_bucket=None,
destination_object=None):
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and \
source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(source_bucket)
source_object = source_bucket.blob(source_object)
destination_bucket = client.get_bucket(destination_bucket)
destination_object = source_bucket.copy_blob(
blob=source_object,
destination_bucket=destination_bucket,
new_name=destination_object)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object.name, destination_bucket.name) | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.download | def download(self, bucket_name, object_name, filename=None):
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename:
blob.download_to_filename(filename)
self.log.info('File downloaded to %s', filename)
return blob.download_as_string() | Get a file from Google Cloud Storage. | def download(self, bucket_name, object_name, filename=None):
"""
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename:
blob.download_to_filename(filename)
self.log.info('File downloaded to %s', filename)
return blob.download_as_string() | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.upload | def upload(self, bucket_name, object_name, filename,
mime_type='application/octet-stream', gzip=False):
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.upload_from_filename(filename=filename,
content_type=mime_type)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name) | Uploads a local file to Google Cloud Storage. | def upload(self, bucket_name, object_name, filename,
mime_type='application/octet-stream', gzip=False):
"""
Uploads a local file to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the local file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param mime_type: The MIME type to set when uploading the file.
:type mime_type: str
:param gzip: Option to compress file for upload
:type gzip: bool
"""
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.upload_from_filename(filename=filename,
content_type=mime_type)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name) | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.exists | def exists(self, bucket_name, object_name):
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists() | Checks for the existence of a file in Google Cloud Storage. | def exists(self, bucket_name, object_name):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists() | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.is_updated_after | def is_updated_after(self, bucket_name, object_name, ts):
client = self.get_conn()
bucket = storage.Bucket(client=client, name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_update_time = blob.updated
if blob_update_time is not None:
import dateutil.tz
if not ts.tzinfo:
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False | Checks if an blob_name is updated in Google Cloud Storage. | def is_updated_after(self, bucket_name, object_name, ts):
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
client = self.get_conn()
bucket = storage.Bucket(client=client, name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_update_time = blob.updated
if blob_update_time is not None:
import dateutil.tz
if not ts.tzinfo:
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.delete | def delete(self, bucket_name, object_name):
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name) | Deletes an object from the bucket. | def delete(self, bucket_name, object_name):
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name) | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.list | def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None):
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
ids = []
pageToken = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=pageToken,
prefix=prefix,
delimiter=delimiter,
versions=versions
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
pageToken = blobs.next_page_token
if pageToken is None:
break
return ids | List all objects from the bucket with the give string prefix in name | def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None):
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
ids = []
pageToken = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=pageToken,
prefix=prefix,
delimiter=delimiter,
versions=versions
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
pageToken = blobs.next_page_token
if pageToken is None:
# empty next page token
break
return ids | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.get_size | def get_size(self, bucket_name, object_name):
self.log.info('Checking the file size of object: %s in bucket_name: %s',
object_name,
bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size | Gets the size of a file in Google Cloud Storage. | def get_size(self, bucket_name, object_name):
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s',
object_name,
bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.get_crc32c | def get_crc32c(self, bucket_name, object_name):
self.log.info('Retrieving the crc32c checksum of '
'object_name: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c | Gets the CRC32c checksum of an object in Google Cloud Storage. | def get_crc32c(self, bucket_name, object_name):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the crc32c checksum of '
'object_name: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.get_md5hash | def get_md5hash(self, bucket_name, object_name):
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash | Gets the MD5 hash of an object in Google Cloud Storage. | def get_md5hash(self, bucket_name, object_name):
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.create_bucket | def create_bucket(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None
):
self.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s',
bucket_name, location, storage_class)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item])
bucket.storage_class = storage_class
bucket.labels = labels or {}
bucket.create(project=project_id, location=location)
return bucket.id | Creates a new bucket. Google Cloud Storage uses a flat namespace, so you can't create a bucket with a name that is already in use. | def create_bucket(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None
):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s',
bucket_name, location, storage_class)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item])
bucket.storage_class = storage_class
bucket.labels = labels or {}
bucket.create(project=project_id, location=location)
return bucket.id | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | GoogleCloudStorageHook.compose | def compose(self, bucket_name, source_objects, destination_object):
if not source_objects or not len(source_objects):
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s",
source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[
bucket.blob(blob_name=source_object) for source_object in source_objects
])
self.log.info("Completed successfully.") | Composes a list of existing object into a new object in the same storage bucket_name Currently it only supports up to 32 objects that can be concatenated in a single operation | def compose(self, bucket_name, source_objects, destination_object):
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
"""
if not source_objects or not len(source_objects):
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s",
source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[
bucket.blob(blob_name=source_object) for source_object in source_objects
])
self.log.info("Completed successfully.") | airflow/contrib/hooks/gcs_hook.py |
apache/airflow | SageMakerHook.tar_and_s3_upload | def tar_and_s3_upload(self, path, key, bucket):
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True) | Tar the local file or directory and upload to s3 | def tar_and_s3_upload(self, path, key, bucket):
"""
Tar the local file or directory and upload to s3
:param path: local file or directory
:type path: str
:param key: s3 key
:type key: str
:param bucket: s3 bucket
:type bucket: str
:return: None
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True) | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.configure_s3_resources | def configure_s3_resources(self, config):
s3_operations = config.pop('S3Operations', None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get('S3CreateBucket', [])
upload_ops = s3_operations.get('S3Upload', [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op['Bucket'])
for op in upload_ops:
if op['Tar']:
self.tar_and_s3_upload(op['Path'], op['Key'],
op['Bucket'])
else:
self.s3_hook.load_file(op['Path'], op['Key'],
op['Bucket']) | Extract the S3 operations from the configuration and execute them. | def configure_s3_resources(self, config):
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
:type config: dict
:rtype: dict
"""
s3_operations = config.pop('S3Operations', None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get('S3CreateBucket', [])
upload_ops = s3_operations.get('S3Upload', [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op['Bucket'])
for op in upload_ops:
if op['Tar']:
self.tar_and_s3_upload(op['Path'], op['Key'],
op['Bucket'])
else:
self.s3_hook.load_file(op['Path'], op['Key'],
op['Bucket']) | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.check_s3_url | def check_s3_url(self, s3url):
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(
"The input S3 Bucket {} does not exist ".format(bucket))
if key and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)\
and not self.s3_hook.check_for_prefix(
prefix=key, bucket_name=bucket, delimiter='/'):
raise AirflowException("The input S3 Key "
"or Prefix {} does not exist in the Bucket {}"
.format(s3url, bucket))
return True | Check if an S3 URL exists | def check_s3_url(self, s3url):
"""
Check if an S3 URL exists
:param s3url: S3 url
:type s3url: str
:rtype: bool
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(
"The input S3 Bucket {} does not exist ".format(bucket))
if key and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)\
and not self.s3_hook.check_for_prefix(
prefix=key, bucket_name=bucket, delimiter='/'):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException("The input S3 Key "
"or Prefix {} does not exist in the Bucket {}"
.format(s3url, bucket))
return True | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.get_log_conn | def get_log_conn(self):
config = botocore.config.Config(retries={'max_attempts': 15})
return self.get_client_type('logs', config=config) | Establish an AWS connection for retrieving logs during training | def get_log_conn(self):
"""
Establish an AWS connection for retrieving logs during training
:rtype: CloudWatchLogs.Client
"""
config = botocore.config.Config(retries={'max_attempts': 15})
return self.get_client_type('logs', config=config) | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.create_training_job | def create_training_job(self, config, wait_for_completion=True, print_log=True,
check_interval=30, max_ingestion_time=None):
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(config['TrainingJobName'],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval, max_ingestion_time
)
elif wait_for_completion:
describe_response = self.check_status(config['TrainingJobName'],
'TrainingJobStatus',
self.describe_training_job,
check_interval, max_ingestion_time
)
billable_time = \
(describe_response['TrainingEndTime'] - describe_response['TrainingStartTime']) * \
describe_response['ResourceConfig']['InstanceCount']
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1))
return response | Create a training job | def create_training_job(self, config, wait_for_completion=True, print_log=True,
check_interval=30, max_ingestion_time=None):
"""
Create a training job
:param config: the config for training
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(config['TrainingJobName'],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval, max_ingestion_time
)
elif wait_for_completion:
describe_response = self.check_status(config['TrainingJobName'],
'TrainingJobStatus',
self.describe_training_job,
check_interval, max_ingestion_time
)
billable_time = \
(describe_response['TrainingEndTime'] - describe_response['TrainingStartTime']) * \
describe_response['ResourceConfig']['InstanceCount']
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1))
return response | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.create_tuning_job | def create_tuning_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(config['HyperParameterTuningJobName'],
'HyperParameterTuningJobStatus',
self.describe_tuning_job,
check_interval, max_ingestion_time
)
return response | Create a tuning job | def create_tuning_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a tuning job
:param config: the config for tuning
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to tuning job creation
"""
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(config['HyperParameterTuningJobName'],
'HyperParameterTuningJobStatus',
self.describe_tuning_job,
check_interval, max_ingestion_time
)
return response | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.create_transform_job | def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response | Create a transform job | def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation
"""
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.create_endpoint | def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval, max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states
)
return response | Create an endpoint | def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval, max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states
)
return response | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.describe_training_job_with_log | def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call | Return the training job info associated with job_name and print CloudWatch logs | def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.check_status | def check_status(self, job_name, key,
describe_function, check_interval,
max_ingestion_time,
non_terminal_states=None):
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
running = True
while running:
time.sleep(check_interval)
sec = sec + check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info('Job still running for %s seconds... '
'current status is %s' % (sec, status))
except KeyError:
raise AirflowException('Could not get status of the SageMaker job')
except ClientError:
raise AirflowException('AWS request failed, check logs for more info')
if status in non_terminal_states:
running = True
elif status in self.failed_states:
raise AirflowException('SageMaker job failed because %s' % response['FailureReason'])
else:
running = False
if max_ingestion_time and sec > max_ingestion_time:
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
self.log.info('SageMaker Job Compeleted')
response = describe_function(job_name)
return response | Check status of a SageMaker job | def check_status(self, job_name, key,
describe_function, check_interval,
max_ingestion_time,
non_terminal_states=None):
"""
Check status of a SageMaker job
:param job_name: name of the job to check status
:type job_name: str
:param key: the key of the response dict
that points to the state
:type key: str
:param describe_function: the function used to retrieve the status
:type describe_function: python callable
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:param non_terminal_states: the set of nonterminal states
:type non_terminal_states: set
:return: response of describe call after job is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
running = True
while running:
time.sleep(check_interval)
sec = sec + check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info('Job still running for %s seconds... '
'current status is %s' % (sec, status))
except KeyError:
raise AirflowException('Could not get status of the SageMaker job')
except ClientError:
raise AirflowException('AWS request failed, check logs for more info')
if status in non_terminal_states:
running = True
elif status in self.failed_states:
raise AirflowException('SageMaker job failed because %s' % response['FailureReason'])
else:
running = False
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
self.log.info('SageMaker Job Compeleted')
response = describe_function(job_name)
return response | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | SageMakerHook.check_training_status_with_log | def check_training_status_with_log(self, job_name, non_terminal_states, failed_states,
wait_for_completion, check_interval, max_ingestion_time):
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names = []
positions = {}
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
last_describe_job_call = time.time()
last_description = description
while True:
time.sleep(check_interval)
sec = sec + check_interval
state, last_description, last_describe_job_call = \
self.describe_training_job_with_log(job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
if wait_for_completion:
status = last_description['TrainingJobStatus']
if status in failed_states:
reason = last_description.get('FailureReason', '(No reason provided)')
raise AirflowException('Error training {}: {} Reason: {}'.format(job_name, status, reason))
billable_time = (last_description['TrainingEndTime'] - last_description['TrainingStartTime']) \
* instance_count
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1)) | Display the logs for a given training job, optionally tailing them until the job is complete. | def check_training_status_with_log(self, job_name, non_terminal_states, failed_states,
wait_for_completion, check_interval, max_ingestion_time):
"""
Display the logs for a given training job, optionally tailing them until the
job is complete.
:param job_name: name of the training job to check status and display logs for
:type job_name: str
:param non_terminal_states: the set of non_terminal states
:type non_terminal_states: set
:param failed_states: the set of failed states
:type failed_states: set
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:type wait_for_completion: bool
:param check_interval: The interval in seconds between polling for new log entries and job completion
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: None
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names = [] # The list of log streams
positions = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.time()
last_description = description
while True:
time.sleep(check_interval)
sec = sec + check_interval
state, last_description, last_describe_job_call = \
self.describe_training_job_with_log(job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
if wait_for_completion:
status = last_description['TrainingJobStatus']
if status in failed_states:
reason = last_description.get('FailureReason', '(No reason provided)')
raise AirflowException('Error training {}: {} Reason: {}'.format(job_name, status, reason))
billable_time = (last_description['TrainingEndTime'] - last_description['TrainingStartTime']) \
* instance_count
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1)) | airflow/contrib/hooks/sagemaker_hook.py |
apache/airflow | DataFlowPythonOperator.execute | def execute(self, context):
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.job_name, formatted_options,
self.py_file, self.py_options) | Execute the python dataflow job. | def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.job_name, formatted_options,
self.py_file, self.py_options) | airflow/contrib/operators/dataflow_operator.py |
apache/airflow | run_migrations_online | def run_migrations_online():
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
)
with context.begin_transaction():
context.run_migrations() | airflow/migrations/env.py |
apache/airflow | BigtableHook.delete_instance | def delete_instance(self, instance_id, project_id=None):
instance = self.get_instance(instance_id=instance_id, project_id=project_id)
if instance:
instance.delete()
else:
self.log.info("The instance '%s' does not exist in project '%s'. Exiting", instance_id,
project_id) | Deletes the specified Cloud Bigtable instance. | def delete_instance(self, instance_id, project_id=None):
"""
Deletes the specified Cloud Bigtable instance.
Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does
not exist.
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type instance_id: str
"""
instance = self.get_instance(instance_id=instance_id, project_id=project_id)
if instance:
instance.delete()
else:
self.log.info("The instance '%s' does not exist in project '%s'. Exiting", instance_id,
project_id) | airflow/contrib/hooks/gcp_bigtable_hook.py |
apache/airflow | BigtableHook.create_instance | def create_instance(self,
instance_id,
main_cluster_id,
main_cluster_zone,
project_id=None,
replica_cluster_id=None,
replica_cluster_zone=None,
instance_display_name=None,
instance_type=enums.Instance.Type.TYPE_UNSPECIFIED,
instance_labels=None,
cluster_nodes=None,
cluster_storage_type=enums.StorageType.STORAGE_TYPE_UNSPECIFIED,
timeout=None):
cluster_storage_type = enums.StorageType(cluster_storage_type)
instance_type = enums.Instance.Type(instance_type)
instance = Instance(
instance_id,
self._get_client(project_id=project_id),
instance_display_name,
instance_type,
instance_labels,
)
clusters = [
instance.cluster(
main_cluster_id,
main_cluster_zone,
cluster_nodes,
cluster_storage_type
)
]
if replica_cluster_id and replica_cluster_zone:
clusters.append(instance.cluster(
replica_cluster_id,
replica_cluster_zone,
cluster_nodes,
cluster_storage_type
))
operation = instance.create(
clusters=clusters
)
operation.result(timeout)
return instance | Creates new instance. | def create_instance(self,
instance_id,
main_cluster_id,
main_cluster_zone,
project_id=None,
replica_cluster_id=None,
replica_cluster_zone=None,
instance_display_name=None,
instance_type=enums.Instance.Type.TYPE_UNSPECIFIED,
instance_labels=None,
cluster_nodes=None,
cluster_storage_type=enums.StorageType.STORAGE_TYPE_UNSPECIFIED,
timeout=None):
"""
Creates new instance.
:type instance_id: str
:param instance_id: The ID for the new instance.
:type main_cluster_id: str
:param main_cluster_id: The ID for main cluster for the new instance.
:type main_cluster_zone: str
:param main_cluster_zone: The zone for main cluster.
See https://cloud.google.com/bigtable/docs/locations for more details.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type replica_cluster_id: str
:param replica_cluster_id: (optional) The ID for replica cluster for the new
instance.
:type replica_cluster_zone: str
:param replica_cluster_zone: (optional) The zone for replica cluster.
:type instance_type: enums.Instance.Type
:param instance_type: (optional) The type of the instance.
:type instance_display_name: str
:param instance_display_name: (optional) Human-readable name of the instance.
Defaults to ``instance_id``.
:type instance_labels: dict
:param instance_labels: (optional) Dictionary of labels to associate with the
instance.
:type cluster_nodes: int
:param cluster_nodes: (optional) Number of nodes for cluster.
:type cluster_storage_type: enums.StorageType
:param cluster_storage_type: (optional) The type of storage.
:type timeout: int
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
"""
cluster_storage_type = enums.StorageType(cluster_storage_type)
instance_type = enums.Instance.Type(instance_type)
instance = Instance(
instance_id,
self._get_client(project_id=project_id),
instance_display_name,
instance_type,
instance_labels,
)
clusters = [
instance.cluster(
main_cluster_id,
main_cluster_zone,
cluster_nodes,
cluster_storage_type
)
]
if replica_cluster_id and replica_cluster_zone:
clusters.append(instance.cluster(
replica_cluster_id,
replica_cluster_zone,
cluster_nodes,
cluster_storage_type
))
operation = instance.create(
clusters=clusters
)
operation.result(timeout)
return instance | airflow/contrib/hooks/gcp_bigtable_hook.py |
apache/airflow | BigtableHook.create_table | def create_table(instance,
table_id,
initial_split_keys=None,
column_families=None):
if column_families is None:
column_families = {}
if initial_split_keys is None:
initial_split_keys = []
table = Table(table_id, instance)
table.create(initial_split_keys, column_families) | Creates the specified Cloud Bigtable table. | def create_table(instance,
table_id,
initial_split_keys=None,
column_families=None):
"""
Creates the specified Cloud Bigtable table.
Raises ``google.api_core.exceptions.AlreadyExists`` if the table exists.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the table.
:type table_id: str
:param table_id: The ID of the table to create in Cloud Bigtable.
:type initial_split_keys: list
:param initial_split_keys: (Optional) A list of row keys in bytes to use to
initially split the table.
:type column_families: dict
:param column_families: (Optional) A map of columns to create. The key is the
column_id str, and the value is a
:class:`google.cloud.bigtable.column_family.GarbageCollectionRule`.
"""
if column_families is None:
column_families = {}
if initial_split_keys is None:
initial_split_keys = []
table = Table(table_id, instance)
table.create(initial_split_keys, column_families) | airflow/contrib/hooks/gcp_bigtable_hook.py |
apache/airflow | BigtableHook.delete_table | def delete_table(self, instance_id, table_id, project_id=None):
table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)
table.delete() | Deletes the specified table in Cloud Bigtable. | def delete_table(self, instance_id, table_id, project_id=None):
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
"""
table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)
table.delete() | airflow/contrib/hooks/gcp_bigtable_hook.py |
apache/airflow | BigtableHook.update_cluster | def update_cluster(instance, cluster_id, nodes):
cluster = Cluster(cluster_id, instance)
cluster.serve_nodes = nodes
cluster.update() | Updates number of nodes in the specified Cloud Bigtable cluster. | def update_cluster(instance, cluster_id, nodes):
"""
Updates number of nodes in the specified Cloud Bigtable cluster.
Raises google.api_core.exceptions.NotFound if the cluster does not exist.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the cluster.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type nodes: int
:param nodes: The desired number of nodes.
"""
cluster = Cluster(cluster_id, instance)
cluster.serve_nodes = nodes
cluster.update() | airflow/contrib/hooks/gcp_bigtable_hook.py |
apache/airflow | HiveCliHook._prepare_cli_cmd | def _prepare_cli_cmd(self):
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = ""
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list | This function creates the command list from available information | def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list | airflow/hooks/hive_hooks.py |
apache/airflow | HiveCliHook._prepare_hiveconf | def _prepare_hiveconf(d):
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
) | This function prepares a list of hiveconf params from a dictionary of key value pairs. | def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
) | airflow/hooks/hive_hooks.py |
apache/airflow | HiveCliHook.load_df | def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN',
'i': 'BIGINT',
'u': 'BIGINT',
'f': 'DOUBLE',
'c': 'STRING',
'M': 'TIMESTAMP',
'O': 'STRING',
'S': 'STRING',
'U': 'STRING',
'V': 'STRING'
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs) | Loads a pandas DataFrame into hive. Hive data types will be inferred if not passed but column names will not be sanitized. | def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs) | airflow/hooks/hive_hooks.py |
apache/airflow | HiveMetastoreHook.check_for_named_partition | def check_for_named_partition(self, schema, table, partition_name):
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name) | Checks whether a partition with a given name exists | def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name) | airflow/hooks/hive_hooks.py |
apache/airflow | HiveMetastoreHook.table_exists | def table_exists(self, table_name, db='default'):
try:
self.get_table(table_name, db)
return True
except Exception:
return False | Check if table exists | def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False | airflow/hooks/hive_hooks.py |
apache/airflow | HiveServer2Hook.get_results | def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results | Get results of the provided hql in target schema. | def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results | airflow/hooks/hive_hooks.py |
apache/airflow | HiveServer2Hook.to_csv | def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i) | Execute hql in target schema and write results to a csv file. | def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i) | airflow/hooks/hive_hooks.py |
apache/airflow | HiveServer2Hook.get_records | def get_records(self, hql, schema='default', hive_conf=None):
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data'] | Get a set of records from a Hive query. | def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data'] | airflow/hooks/hive_hooks.py |
apache/airflow | HiveServer2Hook.get_pandas_df | def get_pandas_df(self, hql, schema='default'):
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df | Get a pandas dataframe from a Hive query | def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df | airflow/hooks/hive_hooks.py |
apache/airflow | CloudVisionHook.get_conn | def get_conn(self):
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client | Retrieves connection to Cloud Vision. | def get_conn(self):
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client | airflow/contrib/hooks/gcp_vision_hook.py |