repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
alephdata/memorious | memorious/operations/fetch.py | session | def session(context, data):
"""Set some HTTP parameters for all subsequent requests.
This includes ``user`` and ``password`` for HTTP basic authentication,
and ``user_agent`` as a header.
"""
context.http.reset()
user = context.get('user')
password = context.get('password')
if user is not None and password is not None:
context.http.session.auth = (user, password)
user_agent = context.get('user_agent')
if user_agent is not None:
context.http.session.headers['User-Agent'] = user_agent
referer = context.get('url')
if referer is not None:
context.http.session.headers['Referer'] = referer
proxy = context.get('proxy')
if proxy is not None:
proxies = {'http': proxy, 'https': proxy}
context.http.session.proxies = proxies
# Explictly save the session because no actual HTTP requests were made.
context.http.save()
context.emit(data=data) | python | def session(context, data):
"""Set some HTTP parameters for all subsequent requests.
This includes ``user`` and ``password`` for HTTP basic authentication,
and ``user_agent`` as a header.
"""
context.http.reset()
user = context.get('user')
password = context.get('password')
if user is not None and password is not None:
context.http.session.auth = (user, password)
user_agent = context.get('user_agent')
if user_agent is not None:
context.http.session.headers['User-Agent'] = user_agent
referer = context.get('url')
if referer is not None:
context.http.session.headers['Referer'] = referer
proxy = context.get('proxy')
if proxy is not None:
proxies = {'http': proxy, 'https': proxy}
context.http.session.proxies = proxies
# Explictly save the session because no actual HTTP requests were made.
context.http.save()
context.emit(data=data) | [
"def",
"session",
"(",
"context",
",",
"data",
")",
":",
"context",
".",
"http",
".",
"reset",
"(",
")",
"user",
"=",
"context",
".",
"get",
"(",
"'user'",
")",
"password",
"=",
"context",
".",
"get",
"(",
"'password'",
")",
"if",
"user",
"is",
"not",
"None",
"and",
"password",
"is",
"not",
"None",
":",
"context",
".",
"http",
".",
"session",
".",
"auth",
"=",
"(",
"user",
",",
"password",
")",
"user_agent",
"=",
"context",
".",
"get",
"(",
"'user_agent'",
")",
"if",
"user_agent",
"is",
"not",
"None",
":",
"context",
".",
"http",
".",
"session",
".",
"headers",
"[",
"'User-Agent'",
"]",
"=",
"user_agent",
"referer",
"=",
"context",
".",
"get",
"(",
"'url'",
")",
"if",
"referer",
"is",
"not",
"None",
":",
"context",
".",
"http",
".",
"session",
".",
"headers",
"[",
"'Referer'",
"]",
"=",
"referer",
"proxy",
"=",
"context",
".",
"get",
"(",
"'proxy'",
")",
"if",
"proxy",
"is",
"not",
"None",
":",
"proxies",
"=",
"{",
"'http'",
":",
"proxy",
",",
"'https'",
":",
"proxy",
"}",
"context",
".",
"http",
".",
"session",
".",
"proxies",
"=",
"proxies",
"# Explictly save the session because no actual HTTP requests were made.",
"context",
".",
"http",
".",
"save",
"(",
")",
"context",
".",
"emit",
"(",
"data",
"=",
"data",
")"
] | Set some HTTP parameters for all subsequent requests.
This includes ``user`` and ``password`` for HTTP basic authentication,
and ``user_agent`` as a header. | [
"Set",
"some",
"HTTP",
"parameters",
"for",
"all",
"subsequent",
"requests",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/fetch.py#L74-L103 | train |
alephdata/memorious | memorious/model/event.py | Event.save | def save(cls, crawler, stage, level, run_id, error=None, message=None):
"""Create an event, possibly based on an exception."""
event = {
'stage': stage.name,
'level': level,
'timestamp': pack_now(),
'error': error,
'message': message
}
data = dump_json(event)
conn.lpush(make_key(crawler, "events"), data)
conn.lpush(make_key(crawler, "events", level), data)
conn.lpush(make_key(crawler, "events", stage), data)
conn.lpush(make_key(crawler, "events", stage, level), data)
conn.lpush(make_key(crawler, "events", run_id), data)
conn.lpush(make_key(crawler, "events", run_id, level), data)
return event | python | def save(cls, crawler, stage, level, run_id, error=None, message=None):
"""Create an event, possibly based on an exception."""
event = {
'stage': stage.name,
'level': level,
'timestamp': pack_now(),
'error': error,
'message': message
}
data = dump_json(event)
conn.lpush(make_key(crawler, "events"), data)
conn.lpush(make_key(crawler, "events", level), data)
conn.lpush(make_key(crawler, "events", stage), data)
conn.lpush(make_key(crawler, "events", stage, level), data)
conn.lpush(make_key(crawler, "events", run_id), data)
conn.lpush(make_key(crawler, "events", run_id, level), data)
return event | [
"def",
"save",
"(",
"cls",
",",
"crawler",
",",
"stage",
",",
"level",
",",
"run_id",
",",
"error",
"=",
"None",
",",
"message",
"=",
"None",
")",
":",
"event",
"=",
"{",
"'stage'",
":",
"stage",
".",
"name",
",",
"'level'",
":",
"level",
",",
"'timestamp'",
":",
"pack_now",
"(",
")",
",",
"'error'",
":",
"error",
",",
"'message'",
":",
"message",
"}",
"data",
"=",
"dump_json",
"(",
"event",
")",
"conn",
".",
"lpush",
"(",
"make_key",
"(",
"crawler",
",",
"\"events\"",
")",
",",
"data",
")",
"conn",
".",
"lpush",
"(",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"level",
")",
",",
"data",
")",
"conn",
".",
"lpush",
"(",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"stage",
")",
",",
"data",
")",
"conn",
".",
"lpush",
"(",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"stage",
",",
"level",
")",
",",
"data",
")",
"conn",
".",
"lpush",
"(",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"run_id",
")",
",",
"data",
")",
"conn",
".",
"lpush",
"(",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"run_id",
",",
"level",
")",
",",
"data",
")",
"return",
"event"
] | Create an event, possibly based on an exception. | [
"Create",
"an",
"event",
"possibly",
"based",
"on",
"an",
"exception",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/event.py#L19-L35 | train |
alephdata/memorious | memorious/model/event.py | Event.get_stage_events | def get_stage_events(cls, crawler, stage_name, start, end, level=None):
"""events from a particular stage"""
key = make_key(crawler, "events", stage_name, level)
return cls.event_list(key, start, end) | python | def get_stage_events(cls, crawler, stage_name, start, end, level=None):
"""events from a particular stage"""
key = make_key(crawler, "events", stage_name, level)
return cls.event_list(key, start, end) | [
"def",
"get_stage_events",
"(",
"cls",
",",
"crawler",
",",
"stage_name",
",",
"start",
",",
"end",
",",
"level",
"=",
"None",
")",
":",
"key",
"=",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"stage_name",
",",
"level",
")",
"return",
"cls",
".",
"event_list",
"(",
"key",
",",
"start",
",",
"end",
")"
] | events from a particular stage | [
"events",
"from",
"a",
"particular",
"stage"
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/event.py#L93-L96 | train |
alephdata/memorious | memorious/model/event.py | Event.get_run_events | def get_run_events(cls, crawler, run_id, start, end, level=None):
"""Events from a particular run"""
key = make_key(crawler, "events", run_id, level)
return cls.event_list(key, start, end) | python | def get_run_events(cls, crawler, run_id, start, end, level=None):
"""Events from a particular run"""
key = make_key(crawler, "events", run_id, level)
return cls.event_list(key, start, end) | [
"def",
"get_run_events",
"(",
"cls",
",",
"crawler",
",",
"run_id",
",",
"start",
",",
"end",
",",
"level",
"=",
"None",
")",
":",
"key",
"=",
"make_key",
"(",
"crawler",
",",
"\"events\"",
",",
"run_id",
",",
"level",
")",
"return",
"cls",
".",
"event_list",
"(",
"key",
",",
"start",
",",
"end",
")"
] | Events from a particular run | [
"Events",
"from",
"a",
"particular",
"run"
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/event.py#L99-L102 | train |
alephdata/memorious | memorious/helpers/__init__.py | soviet_checksum | def soviet_checksum(code):
"""Courtesy of Sir Vlad Lavrov."""
def sum_digits(code, offset=1):
total = 0
for digit, index in zip(code[:7], count(offset)):
total += int(digit) * index
summed = (total / 11 * 11)
return total - summed
check = sum_digits(code, 1)
if check == 10:
check = sum_digits(code, 3)
if check == 10:
return code + '0'
return code + str(check) | python | def soviet_checksum(code):
"""Courtesy of Sir Vlad Lavrov."""
def sum_digits(code, offset=1):
total = 0
for digit, index in zip(code[:7], count(offset)):
total += int(digit) * index
summed = (total / 11 * 11)
return total - summed
check = sum_digits(code, 1)
if check == 10:
check = sum_digits(code, 3)
if check == 10:
return code + '0'
return code + str(check) | [
"def",
"soviet_checksum",
"(",
"code",
")",
":",
"def",
"sum_digits",
"(",
"code",
",",
"offset",
"=",
"1",
")",
":",
"total",
"=",
"0",
"for",
"digit",
",",
"index",
"in",
"zip",
"(",
"code",
"[",
":",
"7",
"]",
",",
"count",
"(",
"offset",
")",
")",
":",
"total",
"+=",
"int",
"(",
"digit",
")",
"*",
"index",
"summed",
"=",
"(",
"total",
"/",
"11",
"*",
"11",
")",
"return",
"total",
"-",
"summed",
"check",
"=",
"sum_digits",
"(",
"code",
",",
"1",
")",
"if",
"check",
"==",
"10",
":",
"check",
"=",
"sum_digits",
"(",
"code",
",",
"3",
")",
"if",
"check",
"==",
"10",
":",
"return",
"code",
"+",
"'0'",
"return",
"code",
"+",
"str",
"(",
"check",
")"
] | Courtesy of Sir Vlad Lavrov. | [
"Courtesy",
"of",
"Sir",
"Vlad",
"Lavrov",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L16-L30 | train |
alephdata/memorious | memorious/helpers/__init__.py | search_results_total | def search_results_total(html, xpath, check, delimiter):
""" Get the total number of results from the DOM of a search index. """
for container in html.findall(xpath):
if check in container.findtext('.'):
text = container.findtext('.').split(delimiter)
total = int(text[-1].strip())
return total | python | def search_results_total(html, xpath, check, delimiter):
""" Get the total number of results from the DOM of a search index. """
for container in html.findall(xpath):
if check in container.findtext('.'):
text = container.findtext('.').split(delimiter)
total = int(text[-1].strip())
return total | [
"def",
"search_results_total",
"(",
"html",
",",
"xpath",
",",
"check",
",",
"delimiter",
")",
":",
"for",
"container",
"in",
"html",
".",
"findall",
"(",
"xpath",
")",
":",
"if",
"check",
"in",
"container",
".",
"findtext",
"(",
"'.'",
")",
":",
"text",
"=",
"container",
".",
"findtext",
"(",
"'.'",
")",
".",
"split",
"(",
"delimiter",
")",
"total",
"=",
"int",
"(",
"text",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
")",
"return",
"total"
] | Get the total number of results from the DOM of a search index. | [
"Get",
"the",
"total",
"number",
"of",
"results",
"from",
"the",
"DOM",
"of",
"a",
"search",
"index",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L33-L39 | train |
alephdata/memorious | memorious/helpers/__init__.py | search_results_last_url | def search_results_last_url(html, xpath, label):
""" Get the URL of the 'last' button in a search results listing. """
for container in html.findall(xpath):
if container.text_content().strip() == label:
return container.find('.//a').get('href') | python | def search_results_last_url(html, xpath, label):
""" Get the URL of the 'last' button in a search results listing. """
for container in html.findall(xpath):
if container.text_content().strip() == label:
return container.find('.//a').get('href') | [
"def",
"search_results_last_url",
"(",
"html",
",",
"xpath",
",",
"label",
")",
":",
"for",
"container",
"in",
"html",
".",
"findall",
"(",
"xpath",
")",
":",
"if",
"container",
".",
"text_content",
"(",
")",
".",
"strip",
"(",
")",
"==",
"label",
":",
"return",
"container",
".",
"find",
"(",
"'.//a'",
")",
".",
"get",
"(",
"'href'",
")"
] | Get the URL of the 'last' button in a search results listing. | [
"Get",
"the",
"URL",
"of",
"the",
"last",
"button",
"in",
"a",
"search",
"results",
"listing",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/helpers/__init__.py#L42-L46 | train |
alephdata/memorious | memorious/model/crawl.py | Crawl.op_count | def op_count(cls, crawler, stage=None):
"""Total operations performed for this crawler"""
if stage:
total_ops = conn.get(make_key(crawler, stage))
else:
total_ops = conn.get(make_key(crawler, "total_ops"))
return unpack_int(total_ops) | python | def op_count(cls, crawler, stage=None):
"""Total operations performed for this crawler"""
if stage:
total_ops = conn.get(make_key(crawler, stage))
else:
total_ops = conn.get(make_key(crawler, "total_ops"))
return unpack_int(total_ops) | [
"def",
"op_count",
"(",
"cls",
",",
"crawler",
",",
"stage",
"=",
"None",
")",
":",
"if",
"stage",
":",
"total_ops",
"=",
"conn",
".",
"get",
"(",
"make_key",
"(",
"crawler",
",",
"stage",
")",
")",
"else",
":",
"total_ops",
"=",
"conn",
".",
"get",
"(",
"make_key",
"(",
"crawler",
",",
"\"total_ops\"",
")",
")",
"return",
"unpack_int",
"(",
"total_ops",
")"
] | Total operations performed for this crawler | [
"Total",
"operations",
"performed",
"for",
"this",
"crawler"
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/model/crawl.py#L21-L27 | train |
alephdata/memorious | memorious/ui/views.py | index | def index():
"""Generate a list of all crawlers, alphabetically, with op counts."""
crawlers = []
for crawler in manager:
data = Event.get_counts(crawler)
data['last_active'] = crawler.last_run
data['total_ops'] = crawler.op_count
data['running'] = crawler.is_running
data['crawler'] = crawler
crawlers.append(data)
return render_template('index.html', crawlers=crawlers) | python | def index():
"""Generate a list of all crawlers, alphabetically, with op counts."""
crawlers = []
for crawler in manager:
data = Event.get_counts(crawler)
data['last_active'] = crawler.last_run
data['total_ops'] = crawler.op_count
data['running'] = crawler.is_running
data['crawler'] = crawler
crawlers.append(data)
return render_template('index.html', crawlers=crawlers) | [
"def",
"index",
"(",
")",
":",
"crawlers",
"=",
"[",
"]",
"for",
"crawler",
"in",
"manager",
":",
"data",
"=",
"Event",
".",
"get_counts",
"(",
"crawler",
")",
"data",
"[",
"'last_active'",
"]",
"=",
"crawler",
".",
"last_run",
"data",
"[",
"'total_ops'",
"]",
"=",
"crawler",
".",
"op_count",
"data",
"[",
"'running'",
"]",
"=",
"crawler",
".",
"is_running",
"data",
"[",
"'crawler'",
"]",
"=",
"crawler",
"crawlers",
".",
"append",
"(",
"data",
")",
"return",
"render_template",
"(",
"'index.html'",
",",
"crawlers",
"=",
"crawlers",
")"
] | Generate a list of all crawlers, alphabetically, with op counts. | [
"Generate",
"a",
"list",
"of",
"all",
"crawlers",
"alphabetically",
"with",
"op",
"counts",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/ui/views.py#L67-L77 | train |
alephdata/memorious | memorious/operations/clean.py | clean_html | def clean_html(context, data):
"""Clean an HTML DOM and store the changed version."""
doc = _get_html_document(context, data)
if doc is None:
context.emit(data=data)
return
remove_paths = context.params.get('remove_paths')
for path in ensure_list(remove_paths):
for el in doc.findall(path):
el.drop_tree()
html_text = html.tostring(doc, pretty_print=True)
content_hash = context.store_data(html_text)
data['content_hash'] = content_hash
context.emit(data=data) | python | def clean_html(context, data):
"""Clean an HTML DOM and store the changed version."""
doc = _get_html_document(context, data)
if doc is None:
context.emit(data=data)
return
remove_paths = context.params.get('remove_paths')
for path in ensure_list(remove_paths):
for el in doc.findall(path):
el.drop_tree()
html_text = html.tostring(doc, pretty_print=True)
content_hash = context.store_data(html_text)
data['content_hash'] = content_hash
context.emit(data=data) | [
"def",
"clean_html",
"(",
"context",
",",
"data",
")",
":",
"doc",
"=",
"_get_html_document",
"(",
"context",
",",
"data",
")",
"if",
"doc",
"is",
"None",
":",
"context",
".",
"emit",
"(",
"data",
"=",
"data",
")",
"return",
"remove_paths",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'remove_paths'",
")",
"for",
"path",
"in",
"ensure_list",
"(",
"remove_paths",
")",
":",
"for",
"el",
"in",
"doc",
".",
"findall",
"(",
"path",
")",
":",
"el",
".",
"drop_tree",
"(",
")",
"html_text",
"=",
"html",
".",
"tostring",
"(",
"doc",
",",
"pretty_print",
"=",
"True",
")",
"content_hash",
"=",
"context",
".",
"store_data",
"(",
"html_text",
")",
"data",
"[",
"'content_hash'",
"]",
"=",
"content_hash",
"context",
".",
"emit",
"(",
"data",
"=",
"data",
")"
] | Clean an HTML DOM and store the changed version. | [
"Clean",
"an",
"HTML",
"DOM",
"and",
"store",
"the",
"changed",
"version",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/clean.py#L11-L26 | train |
alephdata/memorious | memorious/task_runner.py | TaskRunner.execute | def execute(cls, stage, state, data, next_allowed_exec_time=None):
"""Execute the operation, rate limiting allowing."""
try:
context = Context.from_state(state, stage)
now = datetime.utcnow()
if next_allowed_exec_time and now < next_allowed_exec_time:
# task not allowed to run yet; put it back in the queue
Queue.queue(stage, state, data, delay=next_allowed_exec_time)
elif context.crawler.disabled:
pass
elif context.stage.rate_limit:
try:
with rate_limiter(context):
context.execute(data)
except RateLimitException:
delay = max(1, 1.0/context.stage.rate_limit)
delay = random.randint(1, int(delay))
context.log.info(
"Rate limit exceeded, delaying %d sec.", delay
)
Queue.queue(stage, state, data, delay=delay)
else:
context.execute(data)
except Exception:
log.exception("Task failed to execute:")
finally:
# Decrease the pending task count after excuting a task.
Queue.decr_pending(context.crawler)
# If we don't have anymore tasks to execute, time to clean up.
if not context.crawler.is_running:
context.crawler.aggregate(context) | python | def execute(cls, stage, state, data, next_allowed_exec_time=None):
"""Execute the operation, rate limiting allowing."""
try:
context = Context.from_state(state, stage)
now = datetime.utcnow()
if next_allowed_exec_time and now < next_allowed_exec_time:
# task not allowed to run yet; put it back in the queue
Queue.queue(stage, state, data, delay=next_allowed_exec_time)
elif context.crawler.disabled:
pass
elif context.stage.rate_limit:
try:
with rate_limiter(context):
context.execute(data)
except RateLimitException:
delay = max(1, 1.0/context.stage.rate_limit)
delay = random.randint(1, int(delay))
context.log.info(
"Rate limit exceeded, delaying %d sec.", delay
)
Queue.queue(stage, state, data, delay=delay)
else:
context.execute(data)
except Exception:
log.exception("Task failed to execute:")
finally:
# Decrease the pending task count after excuting a task.
Queue.decr_pending(context.crawler)
# If we don't have anymore tasks to execute, time to clean up.
if not context.crawler.is_running:
context.crawler.aggregate(context) | [
"def",
"execute",
"(",
"cls",
",",
"stage",
",",
"state",
",",
"data",
",",
"next_allowed_exec_time",
"=",
"None",
")",
":",
"try",
":",
"context",
"=",
"Context",
".",
"from_state",
"(",
"state",
",",
"stage",
")",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"next_allowed_exec_time",
"and",
"now",
"<",
"next_allowed_exec_time",
":",
"# task not allowed to run yet; put it back in the queue",
"Queue",
".",
"queue",
"(",
"stage",
",",
"state",
",",
"data",
",",
"delay",
"=",
"next_allowed_exec_time",
")",
"elif",
"context",
".",
"crawler",
".",
"disabled",
":",
"pass",
"elif",
"context",
".",
"stage",
".",
"rate_limit",
":",
"try",
":",
"with",
"rate_limiter",
"(",
"context",
")",
":",
"context",
".",
"execute",
"(",
"data",
")",
"except",
"RateLimitException",
":",
"delay",
"=",
"max",
"(",
"1",
",",
"1.0",
"/",
"context",
".",
"stage",
".",
"rate_limit",
")",
"delay",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"int",
"(",
"delay",
")",
")",
"context",
".",
"log",
".",
"info",
"(",
"\"Rate limit exceeded, delaying %d sec.\"",
",",
"delay",
")",
"Queue",
".",
"queue",
"(",
"stage",
",",
"state",
",",
"data",
",",
"delay",
"=",
"delay",
")",
"else",
":",
"context",
".",
"execute",
"(",
"data",
")",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"\"Task failed to execute:\"",
")",
"finally",
":",
"# Decrease the pending task count after excuting a task.",
"Queue",
".",
"decr_pending",
"(",
"context",
".",
"crawler",
")",
"# If we don't have anymore tasks to execute, time to clean up.",
"if",
"not",
"context",
".",
"crawler",
".",
"is_running",
":",
"context",
".",
"crawler",
".",
"aggregate",
"(",
"context",
")"
] | Execute the operation, rate limiting allowing. | [
"Execute",
"the",
"operation",
"rate",
"limiting",
"allowing",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/task_runner.py#L19-L49 | train |
alephdata/memorious | memorious/operations/db.py | _recursive_upsert | def _recursive_upsert(context, params, data):
"""Insert or update nested dicts recursively into db tables"""
children = params.get("children", {})
nested_calls = []
for child_params in children:
key = child_params.get("key")
child_data_list = ensure_list(data.pop(key))
if isinstance(child_data_list, dict):
child_data_list = [child_data_list]
if not (isinstance(child_data_list, list) and
all(isinstance(i, dict) for i in child_data_list)):
context.log.warn(
"Expecting a dict or a lost of dicts as children for key", key
)
continue
if child_data_list:
table_suffix = child_params.get("table_suffix", key)
child_params["table"] = params.get("table") + "_" + table_suffix
# copy some properties over from parent to child
inherit = child_params.get("inherit", {})
for child_data in child_data_list:
for dest, src in inherit.items():
child_data[dest] = data.get(src)
nested_calls.append((child_params, child_data))
# Insert or update data
_upsert(context, params, data)
for child_params, child_data in nested_calls:
_recursive_upsert(context, child_params, child_data) | python | def _recursive_upsert(context, params, data):
"""Insert or update nested dicts recursively into db tables"""
children = params.get("children", {})
nested_calls = []
for child_params in children:
key = child_params.get("key")
child_data_list = ensure_list(data.pop(key))
if isinstance(child_data_list, dict):
child_data_list = [child_data_list]
if not (isinstance(child_data_list, list) and
all(isinstance(i, dict) for i in child_data_list)):
context.log.warn(
"Expecting a dict or a lost of dicts as children for key", key
)
continue
if child_data_list:
table_suffix = child_params.get("table_suffix", key)
child_params["table"] = params.get("table") + "_" + table_suffix
# copy some properties over from parent to child
inherit = child_params.get("inherit", {})
for child_data in child_data_list:
for dest, src in inherit.items():
child_data[dest] = data.get(src)
nested_calls.append((child_params, child_data))
# Insert or update data
_upsert(context, params, data)
for child_params, child_data in nested_calls:
_recursive_upsert(context, child_params, child_data) | [
"def",
"_recursive_upsert",
"(",
"context",
",",
"params",
",",
"data",
")",
":",
"children",
"=",
"params",
".",
"get",
"(",
"\"children\"",
",",
"{",
"}",
")",
"nested_calls",
"=",
"[",
"]",
"for",
"child_params",
"in",
"children",
":",
"key",
"=",
"child_params",
".",
"get",
"(",
"\"key\"",
")",
"child_data_list",
"=",
"ensure_list",
"(",
"data",
".",
"pop",
"(",
"key",
")",
")",
"if",
"isinstance",
"(",
"child_data_list",
",",
"dict",
")",
":",
"child_data_list",
"=",
"[",
"child_data_list",
"]",
"if",
"not",
"(",
"isinstance",
"(",
"child_data_list",
",",
"list",
")",
"and",
"all",
"(",
"isinstance",
"(",
"i",
",",
"dict",
")",
"for",
"i",
"in",
"child_data_list",
")",
")",
":",
"context",
".",
"log",
".",
"warn",
"(",
"\"Expecting a dict or a lost of dicts as children for key\"",
",",
"key",
")",
"continue",
"if",
"child_data_list",
":",
"table_suffix",
"=",
"child_params",
".",
"get",
"(",
"\"table_suffix\"",
",",
"key",
")",
"child_params",
"[",
"\"table\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"table\"",
")",
"+",
"\"_\"",
"+",
"table_suffix",
"# copy some properties over from parent to child",
"inherit",
"=",
"child_params",
".",
"get",
"(",
"\"inherit\"",
",",
"{",
"}",
")",
"for",
"child_data",
"in",
"child_data_list",
":",
"for",
"dest",
",",
"src",
"in",
"inherit",
".",
"items",
"(",
")",
":",
"child_data",
"[",
"dest",
"]",
"=",
"data",
".",
"get",
"(",
"src",
")",
"nested_calls",
".",
"append",
"(",
"(",
"child_params",
",",
"child_data",
")",
")",
"# Insert or update data",
"_upsert",
"(",
"context",
",",
"params",
",",
"data",
")",
"for",
"child_params",
",",
"child_data",
"in",
"nested_calls",
":",
"_recursive_upsert",
"(",
"context",
",",
"child_params",
",",
"child_data",
")"
] | Insert or update nested dicts recursively into db tables | [
"Insert",
"or",
"update",
"nested",
"dicts",
"recursively",
"into",
"db",
"tables"
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/db.py#L21-L48 | train |
alephdata/memorious | memorious/operations/db.py | db | def db(context, data):
"""Insert or update `data` as a row into specified db table"""
table = context.params.get("table", context.crawler.name)
params = context.params
params["table"] = table
_recursive_upsert(context, params, data) | python | def db(context, data):
"""Insert or update `data` as a row into specified db table"""
table = context.params.get("table", context.crawler.name)
params = context.params
params["table"] = table
_recursive_upsert(context, params, data) | [
"def",
"db",
"(",
"context",
",",
"data",
")",
":",
"table",
"=",
"context",
".",
"params",
".",
"get",
"(",
"\"table\"",
",",
"context",
".",
"crawler",
".",
"name",
")",
"params",
"=",
"context",
".",
"params",
"params",
"[",
"\"table\"",
"]",
"=",
"table",
"_recursive_upsert",
"(",
"context",
",",
"params",
",",
"data",
")"
] | Insert or update `data` as a row into specified db table | [
"Insert",
"or",
"update",
"data",
"as",
"a",
"row",
"into",
"specified",
"db",
"table"
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/db.py#L51-L56 | train |
alephdata/memorious | memorious/cli.py | cli | def cli(debug, cache, incremental):
"""Crawler framework for documents and structured scrapers."""
settings.HTTP_CACHE = cache
settings.INCREMENTAL = incremental
settings.DEBUG = debug
if settings.DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
init_memorious() | python | def cli(debug, cache, incremental):
"""Crawler framework for documents and structured scrapers."""
settings.HTTP_CACHE = cache
settings.INCREMENTAL = incremental
settings.DEBUG = debug
if settings.DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
init_memorious() | [
"def",
"cli",
"(",
"debug",
",",
"cache",
",",
"incremental",
")",
":",
"settings",
".",
"HTTP_CACHE",
"=",
"cache",
"settings",
".",
"INCREMENTAL",
"=",
"incremental",
"settings",
".",
"DEBUG",
"=",
"debug",
"if",
"settings",
".",
"DEBUG",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"init_memorious",
"(",
")"
] | Crawler framework for documents and structured scrapers. | [
"Crawler",
"framework",
"for",
"documents",
"and",
"structured",
"scrapers",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L21-L30 | train |
alephdata/memorious | memorious/cli.py | run | def run(crawler):
"""Run a specified crawler."""
crawler = get_crawler(crawler)
crawler.run()
if is_sync_mode():
TaskRunner.run_sync() | python | def run(crawler):
"""Run a specified crawler."""
crawler = get_crawler(crawler)
crawler.run()
if is_sync_mode():
TaskRunner.run_sync() | [
"def",
"run",
"(",
"crawler",
")",
":",
"crawler",
"=",
"get_crawler",
"(",
"crawler",
")",
"crawler",
".",
"run",
"(",
")",
"if",
"is_sync_mode",
"(",
")",
":",
"TaskRunner",
".",
"run_sync",
"(",
")"
] | Run a specified crawler. | [
"Run",
"a",
"specified",
"crawler",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L43-L48 | train |
alephdata/memorious | memorious/cli.py | index | def index():
"""List the available crawlers."""
crawler_list = []
for crawler in manager:
is_due = 'yes' if crawler.check_due() else 'no'
if crawler.disabled:
is_due = 'off'
crawler_list.append([crawler.name,
crawler.description,
crawler.schedule,
is_due,
Queue.size(crawler)])
headers = ['Name', 'Description', 'Schedule', 'Due', 'Pending']
print(tabulate(crawler_list, headers=headers)) | python | def index():
"""List the available crawlers."""
crawler_list = []
for crawler in manager:
is_due = 'yes' if crawler.check_due() else 'no'
if crawler.disabled:
is_due = 'off'
crawler_list.append([crawler.name,
crawler.description,
crawler.schedule,
is_due,
Queue.size(crawler)])
headers = ['Name', 'Description', 'Schedule', 'Due', 'Pending']
print(tabulate(crawler_list, headers=headers)) | [
"def",
"index",
"(",
")",
":",
"crawler_list",
"=",
"[",
"]",
"for",
"crawler",
"in",
"manager",
":",
"is_due",
"=",
"'yes'",
"if",
"crawler",
".",
"check_due",
"(",
")",
"else",
"'no'",
"if",
"crawler",
".",
"disabled",
":",
"is_due",
"=",
"'off'",
"crawler_list",
".",
"append",
"(",
"[",
"crawler",
".",
"name",
",",
"crawler",
".",
"description",
",",
"crawler",
".",
"schedule",
",",
"is_due",
",",
"Queue",
".",
"size",
"(",
"crawler",
")",
"]",
")",
"headers",
"=",
"[",
"'Name'",
",",
"'Description'",
",",
"'Schedule'",
",",
"'Due'",
",",
"'Pending'",
"]",
"print",
"(",
"tabulate",
"(",
"crawler_list",
",",
"headers",
"=",
"headers",
")",
")"
] | List the available crawlers. | [
"List",
"the",
"available",
"crawlers",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L74-L87 | train |
alephdata/memorious | memorious/cli.py | scheduled | def scheduled(wait=False):
"""Run crawlers that are due."""
manager.run_scheduled()
while wait:
# Loop and try to run scheduled crawlers at short intervals
manager.run_scheduled()
time.sleep(settings.SCHEDULER_INTERVAL) | python | def scheduled(wait=False):
"""Run crawlers that are due."""
manager.run_scheduled()
while wait:
# Loop and try to run scheduled crawlers at short intervals
manager.run_scheduled()
time.sleep(settings.SCHEDULER_INTERVAL) | [
"def",
"scheduled",
"(",
"wait",
"=",
"False",
")",
":",
"manager",
".",
"run_scheduled",
"(",
")",
"while",
"wait",
":",
"# Loop and try to run scheduled crawlers at short intervals",
"manager",
".",
"run_scheduled",
"(",
")",
"time",
".",
"sleep",
"(",
"settings",
".",
"SCHEDULER_INTERVAL",
")"
] | Run crawlers that are due. | [
"Run",
"crawlers",
"that",
"are",
"due",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L92-L98 | train |
alephdata/memorious | memorious/operations/store.py | _get_directory_path | def _get_directory_path(context):
"""Get the storage path fro the output."""
path = os.path.join(settings.BASE_PATH, 'store')
path = context.params.get('path', path)
path = os.path.join(path, context.crawler.name)
path = os.path.abspath(os.path.expandvars(path))
try:
os.makedirs(path)
except Exception:
pass
return path | python | def _get_directory_path(context):
"""Get the storage path fro the output."""
path = os.path.join(settings.BASE_PATH, 'store')
path = context.params.get('path', path)
path = os.path.join(path, context.crawler.name)
path = os.path.abspath(os.path.expandvars(path))
try:
os.makedirs(path)
except Exception:
pass
return path | [
"def",
"_get_directory_path",
"(",
"context",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"BASE_PATH",
",",
"'store'",
")",
"path",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'path'",
",",
"path",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"context",
".",
"crawler",
".",
"name",
")",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"path",
")",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"Exception",
":",
"pass",
"return",
"path"
] | Get the storage path fro the output. | [
"Get",
"the",
"storage",
"path",
"fro",
"the",
"output",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/store.py#L9-L19 | train |
alephdata/memorious | memorious/operations/store.py | directory | def directory(context, data):
"""Store the collected files to a given directory."""
with context.http.rehash(data) as result:
if not result.ok:
return
content_hash = data.get('content_hash')
if content_hash is None:
context.emit_warning("No content hash in data.")
return
path = _get_directory_path(context)
file_name = data.get('file_name', result.file_name)
file_name = safe_filename(file_name, default='raw')
file_name = '%s.%s' % (content_hash, file_name)
data['_file_name'] = file_name
file_path = os.path.join(path, file_name)
if not os.path.exists(file_path):
shutil.copyfile(result.file_path, file_path)
context.log.info("Store [directory]: %s", file_name)
meta_path = os.path.join(path, '%s.json' % content_hash)
with open(meta_path, 'w') as fh:
json.dump(data, fh) | python | def directory(context, data):
"""Store the collected files to a given directory."""
with context.http.rehash(data) as result:
if not result.ok:
return
content_hash = data.get('content_hash')
if content_hash is None:
context.emit_warning("No content hash in data.")
return
path = _get_directory_path(context)
file_name = data.get('file_name', result.file_name)
file_name = safe_filename(file_name, default='raw')
file_name = '%s.%s' % (content_hash, file_name)
data['_file_name'] = file_name
file_path = os.path.join(path, file_name)
if not os.path.exists(file_path):
shutil.copyfile(result.file_path, file_path)
context.log.info("Store [directory]: %s", file_name)
meta_path = os.path.join(path, '%s.json' % content_hash)
with open(meta_path, 'w') as fh:
json.dump(data, fh) | [
"def",
"directory",
"(",
"context",
",",
"data",
")",
":",
"with",
"context",
".",
"http",
".",
"rehash",
"(",
"data",
")",
"as",
"result",
":",
"if",
"not",
"result",
".",
"ok",
":",
"return",
"content_hash",
"=",
"data",
".",
"get",
"(",
"'content_hash'",
")",
"if",
"content_hash",
"is",
"None",
":",
"context",
".",
"emit_warning",
"(",
"\"No content hash in data.\"",
")",
"return",
"path",
"=",
"_get_directory_path",
"(",
"context",
")",
"file_name",
"=",
"data",
".",
"get",
"(",
"'file_name'",
",",
"result",
".",
"file_name",
")",
"file_name",
"=",
"safe_filename",
"(",
"file_name",
",",
"default",
"=",
"'raw'",
")",
"file_name",
"=",
"'%s.%s'",
"%",
"(",
"content_hash",
",",
"file_name",
")",
"data",
"[",
"'_file_name'",
"]",
"=",
"file_name",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"shutil",
".",
"copyfile",
"(",
"result",
".",
"file_path",
",",
"file_path",
")",
"context",
".",
"log",
".",
"info",
"(",
"\"Store [directory]: %s\"",
",",
"file_name",
")",
"meta_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'%s.json'",
"%",
"content_hash",
")",
"with",
"open",
"(",
"meta_path",
",",
"'w'",
")",
"as",
"fh",
":",
"json",
".",
"dump",
"(",
"data",
",",
"fh",
")"
] | Store the collected files to a given directory. | [
"Store",
"the",
"collected",
"files",
"to",
"a",
"given",
"directory",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/store.py#L22-L46 | train |
alephdata/memorious | memorious/operations/initializers.py | seed | def seed(context, data):
"""Initialize a crawler with a set of seed URLs.
The URLs are given as a list or single value to the ``urls`` parameter.
If this is called as a second stage in a crawler, the URL will be formatted
against the supplied ``data`` values, e.g.:
https://crawl.site/entries/%(number)s.html
"""
for key in ('url', 'urls'):
for url in ensure_list(context.params.get(key)):
url = url % data
context.emit(data={'url': url}) | python | def seed(context, data):
"""Initialize a crawler with a set of seed URLs.
The URLs are given as a list or single value to the ``urls`` parameter.
If this is called as a second stage in a crawler, the URL will be formatted
against the supplied ``data`` values, e.g.:
https://crawl.site/entries/%(number)s.html
"""
for key in ('url', 'urls'):
for url in ensure_list(context.params.get(key)):
url = url % data
context.emit(data={'url': url}) | [
"def",
"seed",
"(",
"context",
",",
"data",
")",
":",
"for",
"key",
"in",
"(",
"'url'",
",",
"'urls'",
")",
":",
"for",
"url",
"in",
"ensure_list",
"(",
"context",
".",
"params",
".",
"get",
"(",
"key",
")",
")",
":",
"url",
"=",
"url",
"%",
"data",
"context",
".",
"emit",
"(",
"data",
"=",
"{",
"'url'",
":",
"url",
"}",
")"
] | Initialize a crawler with a set of seed URLs.
The URLs are given as a list or single value to the ``urls`` parameter.
If this is called as a second stage in a crawler, the URL will be formatted
against the supplied ``data`` values, e.g.:
https://crawl.site/entries/%(number)s.html | [
"Initialize",
"a",
"crawler",
"with",
"a",
"set",
"of",
"seed",
"URLs",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L5-L18 | train |
alephdata/memorious | memorious/operations/initializers.py | enumerate | def enumerate(context, data):
"""Iterate through a set of items and emit each one of them."""
items = ensure_list(context.params.get('items'))
for item in items:
data['item'] = item
context.emit(data=data) | python | def enumerate(context, data):
"""Iterate through a set of items and emit each one of them."""
items = ensure_list(context.params.get('items'))
for item in items:
data['item'] = item
context.emit(data=data) | [
"def",
"enumerate",
"(",
"context",
",",
"data",
")",
":",
"items",
"=",
"ensure_list",
"(",
"context",
".",
"params",
".",
"get",
"(",
"'items'",
")",
")",
"for",
"item",
"in",
"items",
":",
"data",
"[",
"'item'",
"]",
"=",
"item",
"context",
".",
"emit",
"(",
"data",
"=",
"data",
")"
] | Iterate through a set of items and emit each one of them. | [
"Iterate",
"through",
"a",
"set",
"of",
"items",
"and",
"emit",
"each",
"one",
"of",
"them",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L21-L26 | train |
alephdata/memorious | memorious/operations/initializers.py | sequence | def sequence(context, data):
"""Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler.
"""
number = data.get('number', context.params.get('start', 1))
stop = context.params.get('stop')
step = context.params.get('step', 1)
delay = context.params.get('delay')
prefix = context.params.get('tag')
while True:
tag = None if prefix is None else '%s:%s' % (prefix, number)
if tag is None or not context.check_tag(tag):
context.emit(data={'number': number})
if tag is not None:
context.set_tag(tag, True)
number = number + step
if step > 0 and number >= stop:
break
if step < 0 and number <= stop:
break
if delay is not None:
context.recurse(data={'number': number}, delay=delay)
break | python | def sequence(context, data):
"""Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler.
"""
number = data.get('number', context.params.get('start', 1))
stop = context.params.get('stop')
step = context.params.get('step', 1)
delay = context.params.get('delay')
prefix = context.params.get('tag')
while True:
tag = None if prefix is None else '%s:%s' % (prefix, number)
if tag is None or not context.check_tag(tag):
context.emit(data={'number': number})
if tag is not None:
context.set_tag(tag, True)
number = number + step
if step > 0 and number >= stop:
break
if step < 0 and number <= stop:
break
if delay is not None:
context.recurse(data={'number': number}, delay=delay)
break | [
"def",
"sequence",
"(",
"context",
",",
"data",
")",
":",
"number",
"=",
"data",
".",
"get",
"(",
"'number'",
",",
"context",
".",
"params",
".",
"get",
"(",
"'start'",
",",
"1",
")",
")",
"stop",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'stop'",
")",
"step",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'step'",
",",
"1",
")",
"delay",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'delay'",
")",
"prefix",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'tag'",
")",
"while",
"True",
":",
"tag",
"=",
"None",
"if",
"prefix",
"is",
"None",
"else",
"'%s:%s'",
"%",
"(",
"prefix",
",",
"number",
")",
"if",
"tag",
"is",
"None",
"or",
"not",
"context",
".",
"check_tag",
"(",
"tag",
")",
":",
"context",
".",
"emit",
"(",
"data",
"=",
"{",
"'number'",
":",
"number",
"}",
")",
"if",
"tag",
"is",
"not",
"None",
":",
"context",
".",
"set_tag",
"(",
"tag",
",",
"True",
")",
"number",
"=",
"number",
"+",
"step",
"if",
"step",
">",
"0",
"and",
"number",
">=",
"stop",
":",
"break",
"if",
"step",
"<",
"0",
"and",
"number",
"<=",
"stop",
":",
"break",
"if",
"delay",
"is",
"not",
"None",
":",
"context",
".",
"recurse",
"(",
"data",
"=",
"{",
"'number'",
":",
"number",
"}",
",",
"delay",
"=",
"delay",
")",
"break"
] | Generate a sequence of numbers.
It is the memorious equivalent of the xrange function, accepting the
``start``, ``stop`` and ``step`` parameters.
This can run in two ways:
* As a single function generating all numbers in the given range.
* Recursively, generating numbers one by one with an optional ``delay``.
The latter mode is useful in order to generate very large sequences
without completely clogging up the user queue.
If an optional ``tag`` is given, each number will be emitted only once
across multiple runs of the crawler. | [
"Generate",
"a",
"sequence",
"of",
"numbers",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/operations/initializers.py#L29-L67 | train |
alephdata/memorious | memorious/logic/http.py | ContextHttpResponse.fetch | def fetch(self):
"""Lazily trigger download of the data when requested."""
if self._file_path is not None:
return self._file_path
temp_path = self.context.work_path
if self._content_hash is not None:
self._file_path = storage.load_file(self._content_hash,
temp_path=temp_path)
return self._file_path
if self.response is not None:
self._file_path = random_filename(temp_path)
content_hash = sha1()
with open(self._file_path, 'wb') as fh:
for chunk in self.response.iter_content(chunk_size=8192):
content_hash.update(chunk)
fh.write(chunk)
self._remove_file = True
chash = content_hash.hexdigest()
self._content_hash = storage.archive_file(self._file_path,
content_hash=chash)
if self.http.cache and self.ok:
self.context.set_tag(self.request_id, self.serialize())
self.retrieved_at = datetime.utcnow().isoformat()
return self._file_path | python | def fetch(self):
"""Lazily trigger download of the data when requested."""
if self._file_path is not None:
return self._file_path
temp_path = self.context.work_path
if self._content_hash is not None:
self._file_path = storage.load_file(self._content_hash,
temp_path=temp_path)
return self._file_path
if self.response is not None:
self._file_path = random_filename(temp_path)
content_hash = sha1()
with open(self._file_path, 'wb') as fh:
for chunk in self.response.iter_content(chunk_size=8192):
content_hash.update(chunk)
fh.write(chunk)
self._remove_file = True
chash = content_hash.hexdigest()
self._content_hash = storage.archive_file(self._file_path,
content_hash=chash)
if self.http.cache and self.ok:
self.context.set_tag(self.request_id, self.serialize())
self.retrieved_at = datetime.utcnow().isoformat()
return self._file_path | [
"def",
"fetch",
"(",
"self",
")",
":",
"if",
"self",
".",
"_file_path",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_file_path",
"temp_path",
"=",
"self",
".",
"context",
".",
"work_path",
"if",
"self",
".",
"_content_hash",
"is",
"not",
"None",
":",
"self",
".",
"_file_path",
"=",
"storage",
".",
"load_file",
"(",
"self",
".",
"_content_hash",
",",
"temp_path",
"=",
"temp_path",
")",
"return",
"self",
".",
"_file_path",
"if",
"self",
".",
"response",
"is",
"not",
"None",
":",
"self",
".",
"_file_path",
"=",
"random_filename",
"(",
"temp_path",
")",
"content_hash",
"=",
"sha1",
"(",
")",
"with",
"open",
"(",
"self",
".",
"_file_path",
",",
"'wb'",
")",
"as",
"fh",
":",
"for",
"chunk",
"in",
"self",
".",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"8192",
")",
":",
"content_hash",
".",
"update",
"(",
"chunk",
")",
"fh",
".",
"write",
"(",
"chunk",
")",
"self",
".",
"_remove_file",
"=",
"True",
"chash",
"=",
"content_hash",
".",
"hexdigest",
"(",
")",
"self",
".",
"_content_hash",
"=",
"storage",
".",
"archive_file",
"(",
"self",
".",
"_file_path",
",",
"content_hash",
"=",
"chash",
")",
"if",
"self",
".",
"http",
".",
"cache",
"and",
"self",
".",
"ok",
":",
"self",
".",
"context",
".",
"set_tag",
"(",
"self",
".",
"request_id",
",",
"self",
".",
"serialize",
"(",
")",
")",
"self",
".",
"retrieved_at",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"return",
"self",
".",
"_file_path"
] | Lazily trigger download of the data when requested. | [
"Lazily",
"trigger",
"download",
"of",
"the",
"data",
"when",
"requested",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/http.py#L162-L185 | train |
alephdata/memorious | memorious/util.py | make_key | def make_key(*criteria):
"""Make a string key out of many criteria."""
criteria = [stringify(c) for c in criteria]
criteria = [c for c in criteria if c is not None]
if len(criteria):
return ':'.join(criteria) | python | def make_key(*criteria):
"""Make a string key out of many criteria."""
criteria = [stringify(c) for c in criteria]
criteria = [c for c in criteria if c is not None]
if len(criteria):
return ':'.join(criteria) | [
"def",
"make_key",
"(",
"*",
"criteria",
")",
":",
"criteria",
"=",
"[",
"stringify",
"(",
"c",
")",
"for",
"c",
"in",
"criteria",
"]",
"criteria",
"=",
"[",
"c",
"for",
"c",
"in",
"criteria",
"if",
"c",
"is",
"not",
"None",
"]",
"if",
"len",
"(",
"criteria",
")",
":",
"return",
"':'",
".",
"join",
"(",
"criteria",
")"
] | Make a string key out of many criteria. | [
"Make",
"a",
"string",
"key",
"out",
"of",
"many",
"criteria",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/util.py#L6-L11 | train |
alephdata/memorious | memorious/util.py | random_filename | def random_filename(path=None):
"""Make a UUID-based file name which is extremely unlikely
to exist already."""
filename = uuid4().hex
if path is not None:
filename = os.path.join(path, filename)
return filename | python | def random_filename(path=None):
"""Make a UUID-based file name which is extremely unlikely
to exist already."""
filename = uuid4().hex
if path is not None:
filename = os.path.join(path, filename)
return filename | [
"def",
"random_filename",
"(",
"path",
"=",
"None",
")",
":",
"filename",
"=",
"uuid4",
"(",
")",
".",
"hex",
"if",
"path",
"is",
"not",
"None",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"return",
"filename"
] | Make a UUID-based file name which is extremely unlikely
to exist already. | [
"Make",
"a",
"UUID",
"-",
"based",
"file",
"name",
"which",
"is",
"extremely",
"unlikely",
"to",
"exist",
"already",
"."
] | b4033c5064447ed5f696f9c2bbbc6c12062d2fa4 | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/util.py#L14-L20 | train |
jasonlaska/spherecluster | spherecluster/util.py | sample_vMF | def sample_vMF(mu, kappa, num_samples):
"""Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa.
"""
dim = len(mu)
result = np.zeros((num_samples, dim))
for nn in range(num_samples):
# sample offset from center (on sphere) with spread kappa
w = _sample_weight(kappa, dim)
# sample a point v on the unit sphere that's orthogonal to mu
v = _sample_orthonormal_to(mu)
# compute new point
result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu
return result | python | def sample_vMF(mu, kappa, num_samples):
"""Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa.
"""
dim = len(mu)
result = np.zeros((num_samples, dim))
for nn in range(num_samples):
# sample offset from center (on sphere) with spread kappa
w = _sample_weight(kappa, dim)
# sample a point v on the unit sphere that's orthogonal to mu
v = _sample_orthonormal_to(mu)
# compute new point
result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu
return result | [
"def",
"sample_vMF",
"(",
"mu",
",",
"kappa",
",",
"num_samples",
")",
":",
"dim",
"=",
"len",
"(",
"mu",
")",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_samples",
",",
"dim",
")",
")",
"for",
"nn",
"in",
"range",
"(",
"num_samples",
")",
":",
"# sample offset from center (on sphere) with spread kappa",
"w",
"=",
"_sample_weight",
"(",
"kappa",
",",
"dim",
")",
"# sample a point v on the unit sphere that's orthogonal to mu",
"v",
"=",
"_sample_orthonormal_to",
"(",
"mu",
")",
"# compute new point",
"result",
"[",
"nn",
",",
":",
"]",
"=",
"v",
"*",
"np",
".",
"sqrt",
"(",
"1.",
"-",
"w",
"**",
"2",
")",
"+",
"w",
"*",
"mu",
"return",
"result"
] | Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa. | [
"Generate",
"num_samples",
"N",
"-",
"dimensional",
"samples",
"from",
"von",
"Mises",
"Fisher",
"distribution",
"around",
"center",
"mu",
"\\",
"in",
"R^N",
"with",
"concentration",
"kappa",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L16-L32 | train |
jasonlaska/spherecluster | spherecluster/util.py | _sample_weight | def _sample_weight(kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2)
while True:
z = np.random.beta(dim / 2., dim / 2.)
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u):
return w | python | def _sample_weight(kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2)
while True:
z = np.random.beta(dim / 2., dim / 2.)
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u):
return w | [
"def",
"_sample_weight",
"(",
"kappa",
",",
"dim",
")",
":",
"dim",
"=",
"dim",
"-",
"1",
"# since S^{n-1}",
"b",
"=",
"dim",
"/",
"(",
"np",
".",
"sqrt",
"(",
"4.",
"*",
"kappa",
"**",
"2",
"+",
"dim",
"**",
"2",
")",
"+",
"2",
"*",
"kappa",
")",
"x",
"=",
"(",
"1.",
"-",
"b",
")",
"/",
"(",
"1.",
"+",
"b",
")",
"c",
"=",
"kappa",
"*",
"x",
"+",
"dim",
"*",
"np",
".",
"log",
"(",
"1",
"-",
"x",
"**",
"2",
")",
"while",
"True",
":",
"z",
"=",
"np",
".",
"random",
".",
"beta",
"(",
"dim",
"/",
"2.",
",",
"dim",
"/",
"2.",
")",
"w",
"=",
"(",
"1.",
"-",
"(",
"1.",
"+",
"b",
")",
"*",
"z",
")",
"/",
"(",
"1.",
"-",
"(",
"1.",
"-",
"b",
")",
"*",
"z",
")",
"u",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"1",
")",
"if",
"kappa",
"*",
"w",
"+",
"dim",
"*",
"np",
".",
"log",
"(",
"1.",
"-",
"x",
"*",
"w",
")",
"-",
"c",
">=",
"np",
".",
"log",
"(",
"u",
")",
":",
"return",
"w"
] | Rejection sampling scheme for sampling distance from center on
surface of the sphere. | [
"Rejection",
"sampling",
"scheme",
"for",
"sampling",
"distance",
"from",
"center",
"on",
"surface",
"of",
"the",
"sphere",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L35-L49 | train |
jasonlaska/spherecluster | spherecluster/util.py | _sample_orthonormal_to | def _sample_orthonormal_to(mu):
"""Sample point on sphere orthogonal to mu."""
v = np.random.randn(mu.shape[0])
proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)
orthto = v - proj_mu_v
return orthto / np.linalg.norm(orthto) | python | def _sample_orthonormal_to(mu):
"""Sample point on sphere orthogonal to mu."""
v = np.random.randn(mu.shape[0])
proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)
orthto = v - proj_mu_v
return orthto / np.linalg.norm(orthto) | [
"def",
"_sample_orthonormal_to",
"(",
"mu",
")",
":",
"v",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"mu",
".",
"shape",
"[",
"0",
"]",
")",
"proj_mu_v",
"=",
"mu",
"*",
"np",
".",
"dot",
"(",
"mu",
",",
"v",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"mu",
")",
"orthto",
"=",
"v",
"-",
"proj_mu_v",
"return",
"orthto",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"orthto",
")"
] | Sample point on sphere orthogonal to mu. | [
"Sample",
"point",
"on",
"sphere",
"orthogonal",
"to",
"mu",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L52-L57 | train |
jasonlaska/spherecluster | spherecluster/spherical_kmeans.py | _spherical_kmeans_single_lloyd | def _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight=None,
max_iter=300,
init="k-means++",
verbose=False,
x_squared_norms=None,
random_state=None,
tol=1e-4,
precompute_distances=True,
):
"""
Modified from sklearn.cluster.k_means_.k_means_single_lloyd.
"""
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(X, sample_weight)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(
X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms
)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment
# TODO: _labels_inertia should be done with cosine distance
# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized
# this doesn't really matter.
labels, inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
centers,
precompute_distances=precompute_distances,
distances=distances,
)
# computation of the means
if sp.issparse(X):
centers = _k_means._centers_sparse(
X, sample_weight, labels, n_clusters, distances
)
else:
centers = _k_means._centers_dense(
X, sample_weight, labels, n_clusters, distances
)
# l2-normalize centers (this is the main contibution here)
centers = normalize(centers)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (i, center_shift_total, tol)
)
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
best_centers,
precompute_distances=precompute_distances,
distances=distances,
)
return best_labels, best_inertia, best_centers, i + 1 | python | def _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight=None,
max_iter=300,
init="k-means++",
verbose=False,
x_squared_norms=None,
random_state=None,
tol=1e-4,
precompute_distances=True,
):
"""
Modified from sklearn.cluster.k_means_.k_means_single_lloyd.
"""
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(X, sample_weight)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(
X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms
)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment
# TODO: _labels_inertia should be done with cosine distance
# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized
# this doesn't really matter.
labels, inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
centers,
precompute_distances=precompute_distances,
distances=distances,
)
# computation of the means
if sp.issparse(X):
centers = _k_means._centers_sparse(
X, sample_weight, labels, n_clusters, distances
)
else:
centers = _k_means._centers_dense(
X, sample_weight, labels, n_clusters, distances
)
# l2-normalize centers (this is the main contibution here)
centers = normalize(centers)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (i, center_shift_total, tol)
)
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
best_centers,
precompute_distances=precompute_distances,
distances=distances,
)
return best_labels, best_inertia, best_centers, i + 1 | [
"def",
"_spherical_kmeans_single_lloyd",
"(",
"X",
",",
"n_clusters",
",",
"sample_weight",
"=",
"None",
",",
"max_iter",
"=",
"300",
",",
"init",
"=",
"\"k-means++\"",
",",
"verbose",
"=",
"False",
",",
"x_squared_norms",
"=",
"None",
",",
"random_state",
"=",
"None",
",",
"tol",
"=",
"1e-4",
",",
"precompute_distances",
"=",
"True",
",",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"sample_weight",
"=",
"_check_sample_weight",
"(",
"X",
",",
"sample_weight",
")",
"best_labels",
",",
"best_inertia",
",",
"best_centers",
"=",
"None",
",",
"None",
",",
"None",
"# init",
"centers",
"=",
"_init_centroids",
"(",
"X",
",",
"n_clusters",
",",
"init",
",",
"random_state",
"=",
"random_state",
",",
"x_squared_norms",
"=",
"x_squared_norms",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Initialization complete\"",
")",
"# Allocate memory to store the distances for each sample to its",
"# closer center for reallocation in case of ties",
"distances",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
")",
",",
"dtype",
"=",
"X",
".",
"dtype",
")",
"# iterations",
"for",
"i",
"in",
"range",
"(",
"max_iter",
")",
":",
"centers_old",
"=",
"centers",
".",
"copy",
"(",
")",
"# labels assignment",
"# TODO: _labels_inertia should be done with cosine distance",
"# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized",
"# this doesn't really matter.",
"labels",
",",
"inertia",
"=",
"_labels_inertia",
"(",
"X",
",",
"sample_weight",
",",
"x_squared_norms",
",",
"centers",
",",
"precompute_distances",
"=",
"precompute_distances",
",",
"distances",
"=",
"distances",
",",
")",
"# computation of the means",
"if",
"sp",
".",
"issparse",
"(",
"X",
")",
":",
"centers",
"=",
"_k_means",
".",
"_centers_sparse",
"(",
"X",
",",
"sample_weight",
",",
"labels",
",",
"n_clusters",
",",
"distances",
")",
"else",
":",
"centers",
"=",
"_k_means",
".",
"_centers_dense",
"(",
"X",
",",
"sample_weight",
",",
"labels",
",",
"n_clusters",
",",
"distances",
")",
"# l2-normalize centers (this is the main contibution here)",
"centers",
"=",
"normalize",
"(",
"centers",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Iteration %2d, inertia %.3f\"",
"%",
"(",
"i",
",",
"inertia",
")",
")",
"if",
"best_inertia",
"is",
"None",
"or",
"inertia",
"<",
"best_inertia",
":",
"best_labels",
"=",
"labels",
".",
"copy",
"(",
")",
"best_centers",
"=",
"centers",
".",
"copy",
"(",
")",
"best_inertia",
"=",
"inertia",
"center_shift_total",
"=",
"squared_norm",
"(",
"centers_old",
"-",
"centers",
")",
"if",
"center_shift_total",
"<=",
"tol",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Converged at iteration %d: \"",
"\"center shift %e within tolerance %e\"",
"%",
"(",
"i",
",",
"center_shift_total",
",",
"tol",
")",
")",
"break",
"if",
"center_shift_total",
">",
"0",
":",
"# rerun E-step in case of non-convergence so that predicted labels",
"# match cluster centers",
"best_labels",
",",
"best_inertia",
"=",
"_labels_inertia",
"(",
"X",
",",
"sample_weight",
",",
"x_squared_norms",
",",
"best_centers",
",",
"precompute_distances",
"=",
"precompute_distances",
",",
"distances",
"=",
"distances",
",",
")",
"return",
"best_labels",
",",
"best_inertia",
",",
"best_centers",
",",
"i",
"+",
"1"
] | Modified from sklearn.cluster.k_means_.k_means_single_lloyd. | [
"Modified",
"from",
"sklearn",
".",
"cluster",
".",
"k_means_",
".",
"k_means_single_lloyd",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L22-L113 | train |
jasonlaska/spherecluster | spherecluster/spherical_kmeans.py | spherical_k_means | def spherical_k_means(
X,
n_clusters,
sample_weight=None,
init="k-means++",
n_init=10,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=1,
algorithm="auto",
return_n_iter=False,
):
"""Modified from sklearn.cluster.k_means_.k_means.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
# avoid forcing order when copy_x=False
order = "C" if copy_x else None
X = check_array(
X, accept_sparse="csr", dtype=[np.float64, np.float32], order=order, copy=copy_x
)
# verify that the number of samples given is larger than k
if _num_samples(X) < n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d" % (_num_samples(X), n_clusters)
)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, order="C", copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
random_state=random_state,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_spherical_kmeans_single_lloyd)(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed,
)
for seed in seeds
)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia | python | def spherical_k_means(
X,
n_clusters,
sample_weight=None,
init="k-means++",
n_init=10,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=1,
algorithm="auto",
return_n_iter=False,
):
"""Modified from sklearn.cluster.k_means_.k_means.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
# avoid forcing order when copy_x=False
order = "C" if copy_x else None
X = check_array(
X, accept_sparse="csr", dtype=[np.float64, np.float32], order=order, copy=copy_x
)
# verify that the number of samples given is larger than k
if _num_samples(X) < n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d" % (_num_samples(X), n_clusters)
)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, order="C", copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
random_state=random_state,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_spherical_kmeans_single_lloyd)(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed,
)
for seed in seeds
)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia | [
"def",
"spherical_k_means",
"(",
"X",
",",
"n_clusters",
",",
"sample_weight",
"=",
"None",
",",
"init",
"=",
"\"k-means++\"",
",",
"n_init",
"=",
"10",
",",
"max_iter",
"=",
"300",
",",
"verbose",
"=",
"False",
",",
"tol",
"=",
"1e-4",
",",
"random_state",
"=",
"None",
",",
"copy_x",
"=",
"True",
",",
"n_jobs",
"=",
"1",
",",
"algorithm",
"=",
"\"auto\"",
",",
"return_n_iter",
"=",
"False",
",",
")",
":",
"if",
"n_init",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of initializations.\"",
"\" n_init=%d must be bigger than zero.\"",
"%",
"n_init",
")",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"if",
"max_iter",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Number of iterations should be a positive number,\"",
"\" got %d instead\"",
"%",
"max_iter",
")",
"best_inertia",
"=",
"np",
".",
"infty",
"# avoid forcing order when copy_x=False",
"order",
"=",
"\"C\"",
"if",
"copy_x",
"else",
"None",
"X",
"=",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"\"csr\"",
",",
"dtype",
"=",
"[",
"np",
".",
"float64",
",",
"np",
".",
"float32",
"]",
",",
"order",
"=",
"order",
",",
"copy",
"=",
"copy_x",
")",
"# verify that the number of samples given is larger than k",
"if",
"_num_samples",
"(",
"X",
")",
"<",
"n_clusters",
":",
"raise",
"ValueError",
"(",
"\"n_samples=%d should be >= n_clusters=%d\"",
"%",
"(",
"_num_samples",
"(",
"X",
")",
",",
"n_clusters",
")",
")",
"tol",
"=",
"_tolerance",
"(",
"X",
",",
"tol",
")",
"if",
"hasattr",
"(",
"init",
",",
"\"__array__\"",
")",
":",
"init",
"=",
"check_array",
"(",
"init",
",",
"dtype",
"=",
"X",
".",
"dtype",
".",
"type",
",",
"order",
"=",
"\"C\"",
",",
"copy",
"=",
"True",
")",
"_validate_center_shape",
"(",
"X",
",",
"n_clusters",
",",
"init",
")",
"if",
"n_init",
"!=",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"Explicit initial center position passed: \"",
"\"performing only one init in k-means instead of n_init=%d\"",
"%",
"n_init",
",",
"RuntimeWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"n_init",
"=",
"1",
"# precompute squared norms of data points",
"x_squared_norms",
"=",
"row_norms",
"(",
"X",
",",
"squared",
"=",
"True",
")",
"if",
"n_jobs",
"==",
"1",
":",
"# For a single thread, less memory is needed if we just store one set",
"# of the best results (as opposed to one set per run per thread).",
"for",
"it",
"in",
"range",
"(",
"n_init",
")",
":",
"# run a k-means once",
"labels",
",",
"inertia",
",",
"centers",
",",
"n_iter_",
"=",
"_spherical_kmeans_single_lloyd",
"(",
"X",
",",
"n_clusters",
",",
"sample_weight",
",",
"max_iter",
"=",
"max_iter",
",",
"init",
"=",
"init",
",",
"verbose",
"=",
"verbose",
",",
"tol",
"=",
"tol",
",",
"x_squared_norms",
"=",
"x_squared_norms",
",",
"random_state",
"=",
"random_state",
",",
")",
"# determine if these results are the best so far",
"if",
"best_inertia",
"is",
"None",
"or",
"inertia",
"<",
"best_inertia",
":",
"best_labels",
"=",
"labels",
".",
"copy",
"(",
")",
"best_centers",
"=",
"centers",
".",
"copy",
"(",
")",
"best_inertia",
"=",
"inertia",
"best_n_iter",
"=",
"n_iter_",
"else",
":",
"# parallelisation of k-means runs",
"seeds",
"=",
"random_state",
".",
"randint",
"(",
"np",
".",
"iinfo",
"(",
"np",
".",
"int32",
")",
".",
"max",
",",
"size",
"=",
"n_init",
")",
"results",
"=",
"Parallel",
"(",
"n_jobs",
"=",
"n_jobs",
",",
"verbose",
"=",
"0",
")",
"(",
"delayed",
"(",
"_spherical_kmeans_single_lloyd",
")",
"(",
"X",
",",
"n_clusters",
",",
"sample_weight",
",",
"max_iter",
"=",
"max_iter",
",",
"init",
"=",
"init",
",",
"verbose",
"=",
"verbose",
",",
"tol",
"=",
"tol",
",",
"x_squared_norms",
"=",
"x_squared_norms",
",",
"# Change seed to ensure variety",
"random_state",
"=",
"seed",
",",
")",
"for",
"seed",
"in",
"seeds",
")",
"# Get results with the lowest inertia",
"labels",
",",
"inertia",
",",
"centers",
",",
"n_iters",
"=",
"zip",
"(",
"*",
"results",
")",
"best",
"=",
"np",
".",
"argmin",
"(",
"inertia",
")",
"best_labels",
"=",
"labels",
"[",
"best",
"]",
"best_inertia",
"=",
"inertia",
"[",
"best",
"]",
"best_centers",
"=",
"centers",
"[",
"best",
"]",
"best_n_iter",
"=",
"n_iters",
"[",
"best",
"]",
"if",
"return_n_iter",
":",
"return",
"best_centers",
",",
"best_labels",
",",
"best_inertia",
",",
"best_n_iter",
"else",
":",
"return",
"best_centers",
",",
"best_labels",
",",
"best_inertia"
] | Modified from sklearn.cluster.k_means_.k_means. | [
"Modified",
"from",
"sklearn",
".",
"cluster",
".",
"k_means_",
".",
"k_means",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L116-L228 | train |
jasonlaska/spherecluster | spherecluster/spherical_kmeans.py | SphericalKMeans.fit | def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self | python | def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"if",
"self",
".",
"normalize",
":",
"X",
"=",
"normalize",
"(",
"X",
")",
"random_state",
"=",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"# TODO: add check that all data is unit-normalized",
"self",
".",
"cluster_centers_",
",",
"self",
".",
"labels_",
",",
"self",
".",
"inertia_",
",",
"self",
".",
"n_iter_",
"=",
"spherical_k_means",
"(",
"X",
",",
"n_clusters",
"=",
"self",
".",
"n_clusters",
",",
"sample_weight",
"=",
"sample_weight",
",",
"init",
"=",
"self",
".",
"init",
",",
"n_init",
"=",
"self",
".",
"n_init",
",",
"max_iter",
"=",
"self",
".",
"max_iter",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"tol",
"=",
"self",
".",
"tol",
",",
"random_state",
"=",
"random_state",
",",
"copy_x",
"=",
"self",
".",
"copy_x",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
",",
"return_n_iter",
"=",
"True",
",",
")",
"return",
"self"
] | Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None) | [
"Compute",
"k",
"-",
"means",
"clustering",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L329-L366 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _inertia_from_labels | def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | python | def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | [
"def",
"_inertia_from_labels",
"(",
"X",
",",
"centers",
",",
"labels",
")",
":",
"n_examples",
",",
"n_features",
"=",
"X",
".",
"shape",
"inertia",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_examples",
",",
")",
")",
"for",
"ee",
"in",
"range",
"(",
"n_examples",
")",
":",
"inertia",
"[",
"ee",
"]",
"=",
"1",
"-",
"X",
"[",
"ee",
",",
":",
"]",
".",
"dot",
"(",
"centers",
"[",
"int",
"(",
"labels",
"[",
"ee",
"]",
")",
",",
":",
"]",
".",
"T",
")",
"return",
"np",
".",
"sum",
"(",
"inertia",
")"
] | Compute inertia with cosine distance using known labels. | [
"Compute",
"inertia",
"with",
"cosine",
"distance",
"using",
"known",
"labels",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L25-L33 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _labels_inertia | def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia) | python | def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia) | [
"def",
"_labels_inertia",
"(",
"X",
",",
"centers",
")",
":",
"n_examples",
",",
"n_features",
"=",
"X",
".",
"shape",
"n_clusters",
",",
"n_features",
"=",
"centers",
".",
"shape",
"labels",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_examples",
",",
")",
")",
"inertia",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_examples",
",",
")",
")",
"for",
"ee",
"in",
"range",
"(",
"n_examples",
")",
":",
"dists",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
")",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"dists",
"[",
"cc",
"]",
"=",
"1",
"-",
"X",
"[",
"ee",
",",
":",
"]",
".",
"dot",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
".",
"T",
")",
"labels",
"[",
"ee",
"]",
"=",
"np",
".",
"argmin",
"(",
"dists",
")",
"inertia",
"[",
"ee",
"]",
"=",
"dists",
"[",
"int",
"(",
"labels",
"[",
"ee",
"]",
")",
"]",
"return",
"labels",
",",
"np",
".",
"sum",
"(",
"inertia",
")"
] | Compute labels and inertia with cosine distance. | [
"Compute",
"labels",
"and",
"inertia",
"with",
"cosine",
"distance",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L36-L53 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _S | def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale | python | def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale | [
"def",
"_S",
"(",
"kappa",
",",
"alpha",
",",
"beta",
")",
":",
"kappa",
"=",
"1.",
"*",
"np",
".",
"abs",
"(",
"kappa",
")",
"alpha",
"=",
"1.",
"*",
"alpha",
"beta",
"=",
"1.",
"*",
"np",
".",
"abs",
"(",
"beta",
")",
"a_plus_b",
"=",
"alpha",
"+",
"beta",
"u",
"=",
"np",
".",
"sqrt",
"(",
"kappa",
"**",
"2",
"+",
"beta",
"**",
"2",
")",
"if",
"alpha",
"==",
"0",
":",
"alpha_scale",
"=",
"0",
"else",
":",
"alpha_scale",
"=",
"alpha",
"*",
"np",
".",
"log",
"(",
"(",
"alpha",
"+",
"u",
")",
"/",
"a_plus_b",
")",
"return",
"u",
"-",
"beta",
"-",
"alpha_scale"
] | Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html | [
"Compute",
"the",
"antiderivative",
"of",
"the",
"Amos",
"-",
"type",
"bound",
"G",
"on",
"the",
"modified",
"Bessel",
"function",
"ratio",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L105-L124 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _init_unit_centers | def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers | python | def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers | [
"def",
"_init_unit_centers",
"(",
"X",
",",
"n_clusters",
",",
"random_state",
",",
"init",
")",
":",
"n_examples",
",",
"n_features",
"=",
"np",
".",
"shape",
"(",
"X",
")",
"if",
"isinstance",
"(",
"init",
",",
"np",
".",
"ndarray",
")",
":",
"n_init_clusters",
",",
"n_init_features",
"=",
"init",
".",
"shape",
"assert",
"n_init_clusters",
"==",
"n_clusters",
"assert",
"n_init_features",
"==",
"n_features",
"# ensure unit normed centers",
"centers",
"=",
"init",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"centers",
"[",
"cc",
",",
":",
"]",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"return",
"centers",
"elif",
"init",
"==",
"\"spherical-k-means\"",
":",
"labels",
",",
"inertia",
",",
"centers",
",",
"iters",
"=",
"spherical_kmeans",
".",
"_spherical_kmeans_single_lloyd",
"(",
"X",
",",
"n_clusters",
",",
"x_squared_norms",
"=",
"np",
".",
"ones",
"(",
"(",
"n_examples",
",",
")",
")",
",",
"init",
"=",
"\"k-means++\"",
")",
"return",
"centers",
"elif",
"init",
"==",
"\"random\"",
":",
"centers",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"n_clusters",
",",
"n_features",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"centers",
"[",
"cc",
",",
":",
"]",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"return",
"centers",
"elif",
"init",
"==",
"\"k-means++\"",
":",
"centers",
"=",
"_init_centroids",
"(",
"X",
",",
"n_clusters",
",",
"\"k-means++\"",
",",
"random_state",
"=",
"random_state",
",",
"x_squared_norms",
"=",
"np",
".",
"ones",
"(",
"(",
"n_examples",
",",
")",
")",
",",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"centers",
"[",
"cc",
",",
":",
"]",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"return",
"centers",
"elif",
"init",
"==",
"\"random-orthonormal\"",
":",
"centers",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"n_clusters",
",",
"n_features",
")",
"q",
",",
"r",
"=",
"np",
".",
"linalg",
".",
"qr",
"(",
"centers",
".",
"T",
",",
"mode",
"=",
"\"reduced\"",
")",
"return",
"q",
".",
"T",
"elif",
"init",
"==",
"\"random-class\"",
":",
"centers",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
"n_features",
")",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"while",
"np",
".",
"linalg",
".",
"norm",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"==",
"0",
":",
"labels",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"n_clusters",
",",
"n_examples",
")",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"X",
"[",
"labels",
"==",
"cc",
",",
":",
"]",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"centers",
"[",
"cc",
",",
":",
"]",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"return",
"centers"
] | Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers. | [
"Initializes",
"unit",
"norm",
"centers",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L171-L252 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _expectation | def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior | python | def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior | [
"def",
"_expectation",
"(",
"X",
",",
"centers",
",",
"weights",
",",
"concentrations",
",",
"posterior_type",
"=",
"\"soft\"",
")",
":",
"n_examples",
",",
"n_features",
"=",
"np",
".",
"shape",
"(",
"X",
")",
"n_clusters",
",",
"_",
"=",
"centers",
".",
"shape",
"if",
"n_features",
"<=",
"50",
":",
"# works up to about 50 before numrically unstable",
"vmf_f",
"=",
"_vmf_log",
"else",
":",
"vmf_f",
"=",
"_vmf_log_asymptotic",
"f_log",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
"n_examples",
")",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"f_log",
"[",
"cc",
",",
":",
"]",
"=",
"vmf_f",
"(",
"X",
",",
"concentrations",
"[",
"cc",
"]",
",",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"posterior",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
"n_examples",
")",
")",
"if",
"posterior_type",
"==",
"\"soft\"",
":",
"weights_log",
"=",
"np",
".",
"log",
"(",
"weights",
")",
"posterior",
"=",
"np",
".",
"tile",
"(",
"weights_log",
".",
"T",
",",
"(",
"n_examples",
",",
"1",
")",
")",
".",
"T",
"+",
"f_log",
"for",
"ee",
"in",
"range",
"(",
"n_examples",
")",
":",
"posterior",
"[",
":",
",",
"ee",
"]",
"=",
"np",
".",
"exp",
"(",
"posterior",
"[",
":",
",",
"ee",
"]",
"-",
"logsumexp",
"(",
"posterior",
"[",
":",
",",
"ee",
"]",
")",
")",
"elif",
"posterior_type",
"==",
"\"hard\"",
":",
"weights_log",
"=",
"np",
".",
"log",
"(",
"weights",
")",
"weighted_f_log",
"=",
"np",
".",
"tile",
"(",
"weights_log",
".",
"T",
",",
"(",
"n_examples",
",",
"1",
")",
")",
".",
"T",
"+",
"f_log",
"for",
"ee",
"in",
"range",
"(",
"n_examples",
")",
":",
"posterior",
"[",
"np",
".",
"argmax",
"(",
"weighted_f_log",
"[",
":",
",",
"ee",
"]",
")",
",",
"ee",
"]",
"=",
"1.0",
"return",
"posterior"
] | Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples] | [
"Compute",
"the",
"log",
"-",
"likelihood",
"of",
"each",
"datapoint",
"being",
"in",
"each",
"cluster",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L255-L293 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _maximization | def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations | python | def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations | [
"def",
"_maximization",
"(",
"X",
",",
"posterior",
",",
"force_weights",
"=",
"None",
")",
":",
"n_examples",
",",
"n_features",
"=",
"X",
".",
"shape",
"n_clusters",
",",
"n_examples",
"=",
"posterior",
".",
"shape",
"concentrations",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
")",
")",
"centers",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
"n_features",
")",
")",
"if",
"force_weights",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_clusters",
",",
")",
")",
"for",
"cc",
"in",
"range",
"(",
"n_clusters",
")",
":",
"# update weights (alpha)",
"if",
"force_weights",
"is",
"None",
":",
"weights",
"[",
"cc",
"]",
"=",
"np",
".",
"mean",
"(",
"posterior",
"[",
"cc",
",",
":",
"]",
")",
"else",
":",
"weights",
"=",
"force_weights",
"# update centers (mu)",
"X_scaled",
"=",
"X",
".",
"copy",
"(",
")",
"if",
"sp",
".",
"issparse",
"(",
"X",
")",
":",
"X_scaled",
".",
"data",
"*=",
"posterior",
"[",
"cc",
",",
":",
"]",
".",
"repeat",
"(",
"np",
".",
"diff",
"(",
"X_scaled",
".",
"indptr",
")",
")",
"else",
":",
"for",
"ee",
"in",
"range",
"(",
"n_examples",
")",
":",
"X_scaled",
"[",
"ee",
",",
":",
"]",
"*=",
"posterior",
"[",
"cc",
",",
"ee",
"]",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"X_scaled",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"# normalize centers",
"center_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"centers",
"[",
"cc",
",",
":",
"]",
")",
"if",
"center_norm",
">",
"1e-8",
":",
"centers",
"[",
"cc",
",",
":",
"]",
"=",
"centers",
"[",
"cc",
",",
":",
"]",
"/",
"center_norm",
"# update concentration (kappa) [TODO: add other kappa approximations]",
"rbar",
"=",
"center_norm",
"/",
"(",
"n_examples",
"*",
"weights",
"[",
"cc",
"]",
")",
"concentrations",
"[",
"cc",
"]",
"=",
"rbar",
"*",
"n_features",
"-",
"np",
".",
"power",
"(",
"rbar",
",",
"3.",
")",
"if",
"np",
".",
"abs",
"(",
"rbar",
"-",
"1.0",
")",
"<",
"1e-10",
":",
"concentrations",
"[",
"cc",
"]",
"=",
"MAX_CONTENTRATION",
"else",
":",
"concentrations",
"[",
"cc",
"]",
"/=",
"1.",
"-",
"np",
".",
"power",
"(",
"rbar",
",",
"2.",
")",
"# let python know we can free this (good for large dense X)",
"del",
"X_scaled",
"return",
"centers",
",",
"weights",
",",
"concentrations"
] | Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ] | [
"Estimate",
"new",
"centers",
"weights",
"and",
"concentrations",
"from"
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L296-L354 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _movMF | def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia | python | def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia | [
"def",
"_movMF",
"(",
"X",
",",
"n_clusters",
",",
"posterior_type",
"=",
"\"soft\"",
",",
"force_weights",
"=",
"None",
",",
"max_iter",
"=",
"300",
",",
"verbose",
"=",
"False",
",",
"init",
"=",
"\"random-class\"",
",",
"random_state",
"=",
"None",
",",
"tol",
"=",
"1e-6",
",",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"n_examples",
",",
"n_features",
"=",
"np",
".",
"shape",
"(",
"X",
")",
"# init centers (mus)",
"centers",
"=",
"_init_unit_centers",
"(",
"X",
",",
"n_clusters",
",",
"random_state",
",",
"init",
")",
"# init weights (alphas)",
"if",
"force_weights",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"(",
"n_clusters",
",",
")",
")",
"weights",
"=",
"weights",
"/",
"np",
".",
"sum",
"(",
"weights",
")",
"else",
":",
"weights",
"=",
"force_weights",
"# init concentrations (kappas)",
"concentrations",
"=",
"np",
".",
"ones",
"(",
"(",
"n_clusters",
",",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Initialization complete\"",
")",
"for",
"iter",
"in",
"range",
"(",
"max_iter",
")",
":",
"centers_prev",
"=",
"centers",
".",
"copy",
"(",
")",
"# expectation step",
"posterior",
"=",
"_expectation",
"(",
"X",
",",
"centers",
",",
"weights",
",",
"concentrations",
",",
"posterior_type",
"=",
"posterior_type",
")",
"# maximization step",
"centers",
",",
"weights",
",",
"concentrations",
"=",
"_maximization",
"(",
"X",
",",
"posterior",
",",
"force_weights",
"=",
"force_weights",
")",
"# check convergence",
"tolcheck",
"=",
"squared_norm",
"(",
"centers_prev",
"-",
"centers",
")",
"if",
"tolcheck",
"<=",
"tol",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Converged at iteration %d: \"",
"\"center shift %e within tolerance %e\"",
"%",
"(",
"iter",
",",
"tolcheck",
",",
"tol",
")",
")",
"break",
"# labels come for free via posterior",
"labels",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_examples",
",",
")",
")",
"for",
"ee",
"in",
"range",
"(",
"n_examples",
")",
":",
"labels",
"[",
"ee",
"]",
"=",
"np",
".",
"argmax",
"(",
"posterior",
"[",
":",
",",
"ee",
"]",
")",
"inertia",
"=",
"_inertia_from_labels",
"(",
"X",
",",
"centers",
",",
"labels",
")",
"return",
"centers",
",",
"weights",
",",
"concentrations",
",",
"posterior",
",",
"labels",
",",
"inertia"
] | Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean. | [
"Mixture",
"of",
"von",
"Mises",
"Fisher",
"clustering",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L357-L497 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | movMF | def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
) | python | def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
) | [
"def",
"movMF",
"(",
"X",
",",
"n_clusters",
",",
"posterior_type",
"=",
"\"soft\"",
",",
"force_weights",
"=",
"None",
",",
"n_init",
"=",
"10",
",",
"n_jobs",
"=",
"1",
",",
"max_iter",
"=",
"300",
",",
"verbose",
"=",
"False",
",",
"init",
"=",
"\"random-class\"",
",",
"random_state",
"=",
"None",
",",
"tol",
"=",
"1e-6",
",",
"copy_x",
"=",
"True",
",",
")",
":",
"if",
"n_init",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid number of initializations.\"",
"\" n_init=%d must be bigger than zero.\"",
"%",
"n_init",
")",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"if",
"max_iter",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Number of iterations should be a positive number,\"",
"\" got %d instead\"",
"%",
"max_iter",
")",
"best_inertia",
"=",
"np",
".",
"infty",
"X",
"=",
"as_float_array",
"(",
"X",
",",
"copy",
"=",
"copy_x",
")",
"tol",
"=",
"_tolerance",
"(",
"X",
",",
"tol",
")",
"if",
"hasattr",
"(",
"init",
",",
"\"__array__\"",
")",
":",
"init",
"=",
"check_array",
"(",
"init",
",",
"dtype",
"=",
"X",
".",
"dtype",
".",
"type",
",",
"copy",
"=",
"True",
")",
"_validate_center_shape",
"(",
"X",
",",
"n_clusters",
",",
"init",
")",
"if",
"n_init",
"!=",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"Explicit initial center position passed: \"",
"\"performing only one init in k-means instead of n_init=%d\"",
"%",
"n_init",
",",
"RuntimeWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"n_init",
"=",
"1",
"# defaults",
"best_centers",
"=",
"None",
"best_labels",
"=",
"None",
"best_weights",
"=",
"None",
"best_concentrations",
"=",
"None",
"best_posterior",
"=",
"None",
"best_inertia",
"=",
"None",
"if",
"n_jobs",
"==",
"1",
":",
"# For a single thread, less memory is needed if we just store one set",
"# of the best results (as opposed to one set per run per thread).",
"for",
"it",
"in",
"range",
"(",
"n_init",
")",
":",
"# cluster on the sphere",
"(",
"centers",
",",
"weights",
",",
"concentrations",
",",
"posterior",
",",
"labels",
",",
"inertia",
")",
"=",
"_movMF",
"(",
"X",
",",
"n_clusters",
",",
"posterior_type",
"=",
"posterior_type",
",",
"force_weights",
"=",
"force_weights",
",",
"max_iter",
"=",
"max_iter",
",",
"verbose",
"=",
"verbose",
",",
"init",
"=",
"init",
",",
"random_state",
"=",
"random_state",
",",
"tol",
"=",
"tol",
",",
")",
"# determine if these results are the best so far",
"if",
"best_inertia",
"is",
"None",
"or",
"inertia",
"<",
"best_inertia",
":",
"best_centers",
"=",
"centers",
".",
"copy",
"(",
")",
"best_labels",
"=",
"labels",
".",
"copy",
"(",
")",
"best_weights",
"=",
"weights",
".",
"copy",
"(",
")",
"best_concentrations",
"=",
"concentrations",
".",
"copy",
"(",
")",
"best_posterior",
"=",
"posterior",
".",
"copy",
"(",
")",
"best_inertia",
"=",
"inertia",
"else",
":",
"# parallelisation of movMF runs",
"seeds",
"=",
"random_state",
".",
"randint",
"(",
"np",
".",
"iinfo",
"(",
"np",
".",
"int32",
")",
".",
"max",
",",
"size",
"=",
"n_init",
")",
"results",
"=",
"Parallel",
"(",
"n_jobs",
"=",
"n_jobs",
",",
"verbose",
"=",
"0",
")",
"(",
"delayed",
"(",
"_movMF",
")",
"(",
"X",
",",
"n_clusters",
",",
"posterior_type",
"=",
"posterior_type",
",",
"force_weights",
"=",
"force_weights",
",",
"max_iter",
"=",
"max_iter",
",",
"verbose",
"=",
"verbose",
",",
"init",
"=",
"init",
",",
"random_state",
"=",
"random_state",
",",
"tol",
"=",
"tol",
",",
")",
"for",
"seed",
"in",
"seeds",
")",
"# Get results with the lowest inertia",
"centers",
",",
"weights",
",",
"concentrations",
",",
"posteriors",
",",
"labels",
",",
"inertia",
"=",
"zip",
"(",
"*",
"results",
")",
"best",
"=",
"np",
".",
"argmin",
"(",
"inertia",
")",
"best_labels",
"=",
"labels",
"[",
"best",
"]",
"best_inertia",
"=",
"inertia",
"[",
"best",
"]",
"best_centers",
"=",
"centers",
"[",
"best",
"]",
"best_concentrations",
"=",
"concentrations",
"[",
"best",
"]",
"best_posterior",
"=",
"posteriors",
"[",
"best",
"]",
"best_weights",
"=",
"weights",
"[",
"best",
"]",
"return",
"(",
"best_centers",
",",
"best_labels",
",",
"best_inertia",
",",
"best_weights",
",",
"best_concentrations",
",",
"best_posterior",
",",
")"
] | Wrapper for parallelization of _movMF and running n_init times. | [
"Wrapper",
"for",
"parallelization",
"of",
"_movMF",
"and",
"running",
"n_init",
"times",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L500-L614 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture._check_fit_data | def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X | python | def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X | [
"def",
"_check_fit_data",
"(",
"self",
",",
"X",
")",
":",
"X",
"=",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"\"csr\"",
",",
"dtype",
"=",
"[",
"np",
".",
"float64",
",",
"np",
".",
"float32",
"]",
")",
"n_samples",
",",
"n_features",
"=",
"X",
".",
"shape",
"if",
"X",
".",
"shape",
"[",
"0",
"]",
"<",
"self",
".",
"n_clusters",
":",
"raise",
"ValueError",
"(",
"\"n_samples=%d should be >= n_clusters=%d\"",
"%",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"n_clusters",
")",
")",
"for",
"ee",
"in",
"range",
"(",
"n_samples",
")",
":",
"if",
"sp",
".",
"issparse",
"(",
"X",
")",
":",
"n",
"=",
"sp",
".",
"linalg",
".",
"norm",
"(",
"X",
"[",
"ee",
",",
":",
"]",
")",
"else",
":",
"n",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"X",
"[",
"ee",
",",
":",
"]",
")",
"if",
"np",
".",
"abs",
"(",
"n",
"-",
"1.",
")",
">",
"1e-4",
":",
"raise",
"ValueError",
"(",
"\"Data l2-norm must be 1, found {}\"",
".",
"format",
"(",
"n",
")",
")",
"return",
"X"
] | Verify that the number of samples given is larger than k | [
"Verify",
"that",
"the",
"number",
"of",
"samples",
"given",
"is",
"larger",
"than",
"k"
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L772-L791 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture.fit | def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self | python | def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"self",
".",
"normalize",
":",
"X",
"=",
"normalize",
"(",
"X",
")",
"self",
".",
"_check_force_weights",
"(",
")",
"random_state",
"=",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"X",
"=",
"self",
".",
"_check_fit_data",
"(",
"X",
")",
"(",
"self",
".",
"cluster_centers_",
",",
"self",
".",
"labels_",
",",
"self",
".",
"inertia_",
",",
"self",
".",
"weights_",
",",
"self",
".",
"concentrations_",
",",
"self",
".",
"posterior_",
",",
")",
"=",
"movMF",
"(",
"X",
",",
"self",
".",
"n_clusters",
",",
"posterior_type",
"=",
"self",
".",
"posterior_type",
",",
"force_weights",
"=",
"self",
".",
"force_weights",
",",
"n_init",
"=",
"self",
".",
"n_init",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
",",
"max_iter",
"=",
"self",
".",
"max_iter",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"init",
"=",
"self",
".",
"init",
",",
"random_state",
"=",
"random_state",
",",
"tol",
"=",
"self",
".",
"tol",
",",
"copy_x",
"=",
"self",
".",
"copy_x",
",",
")",
"return",
"self"
] | Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features) | [
"Compute",
"mixture",
"of",
"von",
"Mises",
"Fisher",
"clustering",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L814-L850 | train |
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture.transform | def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X) | python | def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X) | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"self",
".",
"normalize",
":",
"X",
"=",
"normalize",
"(",
"X",
")",
"check_is_fitted",
"(",
"self",
",",
"\"cluster_centers_\"",
")",
"X",
"=",
"self",
".",
"_check_test_data",
"(",
"X",
")",
"return",
"self",
".",
"_transform",
"(",
"X",
")"
] | Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space. | [
"Transform",
"X",
"to",
"a",
"cluster",
"-",
"distance",
"space",
".",
"In",
"the",
"new",
"space",
"each",
"dimension",
"is",
"the",
"cosine",
"distance",
"to",
"the",
"cluster",
"centers",
".",
"Note",
"that",
"even",
"if",
"X",
"is",
"sparse",
"the",
"array",
"returned",
"by",
"transform",
"will",
"typically",
"be",
"dense",
"."
] | 701b0b1909088a56e353b363b2672580d4fe9d93 | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L869-L890 | train |
skggm/skggm | inverse_covariance/metrics.py | log_likelihood | def log_likelihood(covariance, precision):
"""Computes the log-likelihood between the covariance and precision
estimate.
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
log-likelihood
"""
assert covariance.shape == precision.shape
dim, _ = precision.shape
log_likelihood_ = (
-np.sum(covariance * precision)
+ fast_logdet(precision)
- dim * np.log(2 * np.pi)
)
log_likelihood_ /= 2.
return log_likelihood_ | python | def log_likelihood(covariance, precision):
"""Computes the log-likelihood between the covariance and precision
estimate.
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
log-likelihood
"""
assert covariance.shape == precision.shape
dim, _ = precision.shape
log_likelihood_ = (
-np.sum(covariance * precision)
+ fast_logdet(precision)
- dim * np.log(2 * np.pi)
)
log_likelihood_ /= 2.
return log_likelihood_ | [
"def",
"log_likelihood",
"(",
"covariance",
",",
"precision",
")",
":",
"assert",
"covariance",
".",
"shape",
"==",
"precision",
".",
"shape",
"dim",
",",
"_",
"=",
"precision",
".",
"shape",
"log_likelihood_",
"=",
"(",
"-",
"np",
".",
"sum",
"(",
"covariance",
"*",
"precision",
")",
"+",
"fast_logdet",
"(",
"precision",
")",
"-",
"dim",
"*",
"np",
".",
"log",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"log_likelihood_",
"/=",
"2.",
"return",
"log_likelihood_"
] | Computes the log-likelihood between the covariance and precision
estimate.
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
log-likelihood | [
"Computes",
"the",
"log",
"-",
"likelihood",
"between",
"the",
"covariance",
"and",
"precision",
"estimate",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/metrics.py#L6-L30 | train |
skggm/skggm | inverse_covariance/metrics.py | kl_loss | def kl_loss(covariance, precision):
"""Computes the KL divergence between precision estimate and
reference covariance.
The loss is computed as:
Trace(Theta_1 * Sigma_0) - log(Theta_0 * Sigma_1) - dim(Sigma)
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
KL-divergence
"""
assert covariance.shape == precision.shape
dim, _ = precision.shape
logdet_p_dot_c = fast_logdet(np.dot(precision, covariance))
return 0.5 * (np.sum(precision * covariance) - logdet_p_dot_c - dim) | python | def kl_loss(covariance, precision):
"""Computes the KL divergence between precision estimate and
reference covariance.
The loss is computed as:
Trace(Theta_1 * Sigma_0) - log(Theta_0 * Sigma_1) - dim(Sigma)
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
KL-divergence
"""
assert covariance.shape == precision.shape
dim, _ = precision.shape
logdet_p_dot_c = fast_logdet(np.dot(precision, covariance))
return 0.5 * (np.sum(precision * covariance) - logdet_p_dot_c - dim) | [
"def",
"kl_loss",
"(",
"covariance",
",",
"precision",
")",
":",
"assert",
"covariance",
".",
"shape",
"==",
"precision",
".",
"shape",
"dim",
",",
"_",
"=",
"precision",
".",
"shape",
"logdet_p_dot_c",
"=",
"fast_logdet",
"(",
"np",
".",
"dot",
"(",
"precision",
",",
"covariance",
")",
")",
"return",
"0.5",
"*",
"(",
"np",
".",
"sum",
"(",
"precision",
"*",
"covariance",
")",
"-",
"logdet_p_dot_c",
"-",
"dim",
")"
] | Computes the KL divergence between precision estimate and
reference covariance.
The loss is computed as:
Trace(Theta_1 * Sigma_0) - log(Theta_0 * Sigma_1) - dim(Sigma)
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
KL-divergence | [
"Computes",
"the",
"KL",
"divergence",
"between",
"precision",
"estimate",
"and",
"reference",
"covariance",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/metrics.py#L33-L56 | train |
skggm/skggm | inverse_covariance/metrics.py | ebic | def ebic(covariance, precision, n_samples, n_features, gamma=0):
"""
Extended Bayesian Information Criteria for model selection.
When using path mode, use this as an alternative to cross-validation for
finding lambda.
See:
"Extended Bayesian Information Criteria for Gaussian Graphical Models"
R. Foygel and M. Drton, NIPS 2010
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance (sample covariance)
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
n_samples : int
Number of examples.
n_features : int
Dimension of an example.
lam: (float)
Threshold value for precision matrix. This should be lambda scaling
used to obtain this estimate.
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
ebic score (float). Caller should minimized this score.
"""
l_theta = -np.sum(covariance * precision) + fast_logdet(precision)
l_theta *= n_features / 2.
# is something goes wrong with fast_logdet, return large value
if np.isinf(l_theta) or np.isnan(l_theta):
return 1e10
mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps
precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri
return (
-2.0 * l_theta
+ precision_nnz * np.log(n_samples)
+ 4.0 * precision_nnz * np.log(n_features) * gamma
) | python | def ebic(covariance, precision, n_samples, n_features, gamma=0):
"""
Extended Bayesian Information Criteria for model selection.
When using path mode, use this as an alternative to cross-validation for
finding lambda.
See:
"Extended Bayesian Information Criteria for Gaussian Graphical Models"
R. Foygel and M. Drton, NIPS 2010
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance (sample covariance)
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
n_samples : int
Number of examples.
n_features : int
Dimension of an example.
lam: (float)
Threshold value for precision matrix. This should be lambda scaling
used to obtain this estimate.
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
ebic score (float). Caller should minimized this score.
"""
l_theta = -np.sum(covariance * precision) + fast_logdet(precision)
l_theta *= n_features / 2.
# is something goes wrong with fast_logdet, return large value
if np.isinf(l_theta) or np.isnan(l_theta):
return 1e10
mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps
precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri
return (
-2.0 * l_theta
+ precision_nnz * np.log(n_samples)
+ 4.0 * precision_nnz * np.log(n_features) * gamma
) | [
"def",
"ebic",
"(",
"covariance",
",",
"precision",
",",
"n_samples",
",",
"n_features",
",",
"gamma",
"=",
"0",
")",
":",
"l_theta",
"=",
"-",
"np",
".",
"sum",
"(",
"covariance",
"*",
"precision",
")",
"+",
"fast_logdet",
"(",
"precision",
")",
"l_theta",
"*=",
"n_features",
"/",
"2.",
"# is something goes wrong with fast_logdet, return large value",
"if",
"np",
".",
"isinf",
"(",
"l_theta",
")",
"or",
"np",
".",
"isnan",
"(",
"l_theta",
")",
":",
"return",
"1e10",
"mask",
"=",
"np",
".",
"abs",
"(",
"precision",
".",
"flat",
")",
">",
"np",
".",
"finfo",
"(",
"precision",
".",
"dtype",
")",
".",
"eps",
"precision_nnz",
"=",
"(",
"np",
".",
"sum",
"(",
"mask",
")",
"-",
"n_features",
")",
"/",
"2.0",
"# lower off diagonal tri",
"return",
"(",
"-",
"2.0",
"*",
"l_theta",
"+",
"precision_nnz",
"*",
"np",
".",
"log",
"(",
"n_samples",
")",
"+",
"4.0",
"*",
"precision_nnz",
"*",
"np",
".",
"log",
"(",
"n_features",
")",
"*",
"gamma",
")"
] | Extended Bayesian Information Criteria for model selection.
When using path mode, use this as an alternative to cross-validation for
finding lambda.
See:
"Extended Bayesian Information Criteria for Gaussian Graphical Models"
R. Foygel and M. Drton, NIPS 2010
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance (sample covariance)
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
n_samples : int
Number of examples.
n_features : int
Dimension of an example.
lam: (float)
Threshold value for precision matrix. This should be lambda scaling
used to obtain this estimate.
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
ebic score (float). Caller should minimized this score. | [
"Extended",
"Bayesian",
"Information",
"Criteria",
"for",
"model",
"selection",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/metrics.py#L79-L130 | train |
skggm/skggm | inverse_covariance/profiling/graphs.py | lattice | def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7):
"""Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization.
"""
degree = int(1 + np.round(alpha * n_features / 2.))
if random_sign:
sign_row = -1.0 * np.ones(degree) + 2 * (
prng.uniform(low=0, high=1, size=degree) > .5
)
else:
sign_row = -1.0 * np.ones(degree)
# in the *very unlikely* event that we draw a bad row that sums to zero
# (which is only possible when random_sign=True), we try again up to
# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of
# values something is probably wrong and we raise.
MAX_ATTEMPTS = 5
attempt = 0
row = np.zeros((n_features,))
while np.sum(row) == 0 and attempt < MAX_ATTEMPTS:
row = np.zeros((n_features,))
row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree)
attempt += 1
if np.sum(row) == 0:
raise Exception("InvalidLattice", "Rows sum to 0.")
return
# sum-normalize and keep signs
row /= np.abs(np.sum(row))
return sp.linalg.toeplitz(c=row, r=row) | python | def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7):
"""Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization.
"""
degree = int(1 + np.round(alpha * n_features / 2.))
if random_sign:
sign_row = -1.0 * np.ones(degree) + 2 * (
prng.uniform(low=0, high=1, size=degree) > .5
)
else:
sign_row = -1.0 * np.ones(degree)
# in the *very unlikely* event that we draw a bad row that sums to zero
# (which is only possible when random_sign=True), we try again up to
# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of
# values something is probably wrong and we raise.
MAX_ATTEMPTS = 5
attempt = 0
row = np.zeros((n_features,))
while np.sum(row) == 0 and attempt < MAX_ATTEMPTS:
row = np.zeros((n_features,))
row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree)
attempt += 1
if np.sum(row) == 0:
raise Exception("InvalidLattice", "Rows sum to 0.")
return
# sum-normalize and keep signs
row /= np.abs(np.sum(row))
return sp.linalg.toeplitz(c=row, r=row) | [
"def",
"lattice",
"(",
"prng",
",",
"n_features",
",",
"alpha",
",",
"random_sign",
"=",
"False",
",",
"low",
"=",
"0.3",
",",
"high",
"=",
"0.7",
")",
":",
"degree",
"=",
"int",
"(",
"1",
"+",
"np",
".",
"round",
"(",
"alpha",
"*",
"n_features",
"/",
"2.",
")",
")",
"if",
"random_sign",
":",
"sign_row",
"=",
"-",
"1.0",
"*",
"np",
".",
"ones",
"(",
"degree",
")",
"+",
"2",
"*",
"(",
"prng",
".",
"uniform",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"1",
",",
"size",
"=",
"degree",
")",
">",
".5",
")",
"else",
":",
"sign_row",
"=",
"-",
"1.0",
"*",
"np",
".",
"ones",
"(",
"degree",
")",
"# in the *very unlikely* event that we draw a bad row that sums to zero",
"# (which is only possible when random_sign=True), we try again up to",
"# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of",
"# values something is probably wrong and we raise.",
"MAX_ATTEMPTS",
"=",
"5",
"attempt",
"=",
"0",
"row",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
")",
")",
"while",
"np",
".",
"sum",
"(",
"row",
")",
"==",
"0",
"and",
"attempt",
"<",
"MAX_ATTEMPTS",
":",
"row",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
")",
")",
"row",
"[",
"1",
":",
"1",
"+",
"degree",
"]",
"=",
"sign_row",
"*",
"prng",
".",
"uniform",
"(",
"low",
"=",
"low",
",",
"high",
"=",
"high",
",",
"size",
"=",
"degree",
")",
"attempt",
"+=",
"1",
"if",
"np",
".",
"sum",
"(",
"row",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"InvalidLattice\"",
",",
"\"Rows sum to 0.\"",
")",
"return",
"# sum-normalize and keep signs",
"row",
"/=",
"np",
".",
"abs",
"(",
"np",
".",
"sum",
"(",
"row",
")",
")",
"return",
"sp",
".",
"linalg",
".",
"toeplitz",
"(",
"c",
"=",
"row",
",",
"r",
"=",
"row",
")"
] | Returns the adjacency matrix for a lattice network.
The resulting network is a Toeplitz matrix with random values summing
between -1 and 1 and zeros along the diagonal.
The range of the values can be controlled via the parameters low and high.
If random_sign is false, all entries will be negative, otherwise their sign
will be modulated at random with probability 1/2.
Each row has maximum edges of np.ceil(alpha * n_features).
Parameters
-----------
n_features : int
alpha : float (0, 1)
The complexity / sparsity factor.
random sign : bool (default=False)
Randomly modulate each entry by 1 or -1 with probability of 1/2.
low : float (0, 1) (default=0.3)
Lower bound for np.random.RandomState.uniform before normalization.
high : float (0, 1) > low (default=0.7)
Upper bound for np.random.RandomState.uniform before normalization. | [
"Returns",
"the",
"adjacency",
"matrix",
"for",
"a",
"lattice",
"network",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L5-L61 | train |
skggm/skggm | inverse_covariance/profiling/graphs.py | _to_diagonally_dominant | def _to_diagonally_dominant(mat):
"""Make matrix unweighted diagonally dominant using the Laplacian."""
mat += np.diag(np.sum(mat != 0, axis=1) + 0.01)
return mat | python | def _to_diagonally_dominant(mat):
"""Make matrix unweighted diagonally dominant using the Laplacian."""
mat += np.diag(np.sum(mat != 0, axis=1) + 0.01)
return mat | [
"def",
"_to_diagonally_dominant",
"(",
"mat",
")",
":",
"mat",
"+=",
"np",
".",
"diag",
"(",
"np",
".",
"sum",
"(",
"mat",
"!=",
"0",
",",
"axis",
"=",
"1",
")",
"+",
"0.01",
")",
"return",
"mat"
] | Make matrix unweighted diagonally dominant using the Laplacian. | [
"Make",
"matrix",
"unweighted",
"diagonally",
"dominant",
"using",
"the",
"Laplacian",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L103-L106 | train |
skggm/skggm | inverse_covariance/profiling/graphs.py | _to_diagonally_dominant_weighted | def _to_diagonally_dominant_weighted(mat):
"""Make matrix weighted diagonally dominant using the Laplacian."""
mat += np.diag(np.sum(np.abs(mat), axis=1) + 0.01)
return mat | python | def _to_diagonally_dominant_weighted(mat):
"""Make matrix weighted diagonally dominant using the Laplacian."""
mat += np.diag(np.sum(np.abs(mat), axis=1) + 0.01)
return mat | [
"def",
"_to_diagonally_dominant_weighted",
"(",
"mat",
")",
":",
"mat",
"+=",
"np",
".",
"diag",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"mat",
")",
",",
"axis",
"=",
"1",
")",
"+",
"0.01",
")",
"return",
"mat"
] | Make matrix weighted diagonally dominant using the Laplacian. | [
"Make",
"matrix",
"weighted",
"diagonally",
"dominant",
"using",
"the",
"Laplacian",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L109-L112 | train |
skggm/skggm | inverse_covariance/profiling/graphs.py | _rescale_to_unit_diagonals | def _rescale_to_unit_diagonals(mat):
"""Rescale matrix to have unit diagonals.
Note: Call only after diagonal dominance is ensured.
"""
d = np.sqrt(np.diag(mat))
mat /= d
mat /= d[:, np.newaxis]
return mat | python | def _rescale_to_unit_diagonals(mat):
"""Rescale matrix to have unit diagonals.
Note: Call only after diagonal dominance is ensured.
"""
d = np.sqrt(np.diag(mat))
mat /= d
mat /= d[:, np.newaxis]
return mat | [
"def",
"_rescale_to_unit_diagonals",
"(",
"mat",
")",
":",
"d",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"diag",
"(",
"mat",
")",
")",
"mat",
"/=",
"d",
"mat",
"/=",
"d",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"return",
"mat"
] | Rescale matrix to have unit diagonals.
Note: Call only after diagonal dominance is ensured. | [
"Rescale",
"matrix",
"to",
"have",
"unit",
"diagonals",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L115-L123 | train |
skggm/skggm | inverse_covariance/profiling/graphs.py | Graph.create | def create(self, n_features, alpha):
"""Build a new graph with block structure.
Parameters
-----------
n_features : int
alpha : float (0,1)
The complexity / sparsity factor for each graph type.
Returns
-----------
(n_features, n_features) matrices: covariance, precision, adjacency
"""
n_block_features = int(np.floor(1. * n_features / self.n_blocks))
if n_block_features * self.n_blocks != n_features:
raise ValueError(
(
"Error: n_features {} not divisible by n_blocks {}."
"Use n_features = n_blocks * int"
).format(n_features, self.n_blocks)
)
return
block_adj = self.prototype_adjacency(n_block_features, alpha)
adjacency = blocks(
self.prng, block_adj, n_blocks=self.n_blocks, chain_blocks=self.chain_blocks
)
precision = self.to_precision(adjacency)
covariance = self.to_covariance(precision)
return covariance, precision, adjacency | python | def create(self, n_features, alpha):
"""Build a new graph with block structure.
Parameters
-----------
n_features : int
alpha : float (0,1)
The complexity / sparsity factor for each graph type.
Returns
-----------
(n_features, n_features) matrices: covariance, precision, adjacency
"""
n_block_features = int(np.floor(1. * n_features / self.n_blocks))
if n_block_features * self.n_blocks != n_features:
raise ValueError(
(
"Error: n_features {} not divisible by n_blocks {}."
"Use n_features = n_blocks * int"
).format(n_features, self.n_blocks)
)
return
block_adj = self.prototype_adjacency(n_block_features, alpha)
adjacency = blocks(
self.prng, block_adj, n_blocks=self.n_blocks, chain_blocks=self.chain_blocks
)
precision = self.to_precision(adjacency)
covariance = self.to_covariance(precision)
return covariance, precision, adjacency | [
"def",
"create",
"(",
"self",
",",
"n_features",
",",
"alpha",
")",
":",
"n_block_features",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"1.",
"*",
"n_features",
"/",
"self",
".",
"n_blocks",
")",
")",
"if",
"n_block_features",
"*",
"self",
".",
"n_blocks",
"!=",
"n_features",
":",
"raise",
"ValueError",
"(",
"(",
"\"Error: n_features {} not divisible by n_blocks {}.\"",
"\"Use n_features = n_blocks * int\"",
")",
".",
"format",
"(",
"n_features",
",",
"self",
".",
"n_blocks",
")",
")",
"return",
"block_adj",
"=",
"self",
".",
"prototype_adjacency",
"(",
"n_block_features",
",",
"alpha",
")",
"adjacency",
"=",
"blocks",
"(",
"self",
".",
"prng",
",",
"block_adj",
",",
"n_blocks",
"=",
"self",
".",
"n_blocks",
",",
"chain_blocks",
"=",
"self",
".",
"chain_blocks",
")",
"precision",
"=",
"self",
".",
"to_precision",
"(",
"adjacency",
")",
"covariance",
"=",
"self",
".",
"to_covariance",
"(",
"precision",
")",
"return",
"covariance",
",",
"precision",
",",
"adjacency"
] | Build a new graph with block structure.
Parameters
-----------
n_features : int
alpha : float (0,1)
The complexity / sparsity factor for each graph type.
Returns
-----------
(n_features, n_features) matrices: covariance, precision, adjacency | [
"Build",
"a",
"new",
"graph",
"with",
"block",
"structure",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L176-L207 | train |
skggm/skggm | inverse_covariance/profiling/monte_carlo_profile.py | _sample_mvn | def _sample_mvn(n_samples, cov, prng):
"""Draw a multivariate normal sample from the graph defined by cov.
Parameters
-----------
n_samples : int
cov : matrix of shape (n_features, n_features)
Covariance matrix of the graph.
prng : np.random.RandomState instance.
"""
n_features, _ = cov.shape
return prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) | python | def _sample_mvn(n_samples, cov, prng):
"""Draw a multivariate normal sample from the graph defined by cov.
Parameters
-----------
n_samples : int
cov : matrix of shape (n_features, n_features)
Covariance matrix of the graph.
prng : np.random.RandomState instance.
"""
n_features, _ = cov.shape
return prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) | [
"def",
"_sample_mvn",
"(",
"n_samples",
",",
"cov",
",",
"prng",
")",
":",
"n_features",
",",
"_",
"=",
"cov",
".",
"shape",
"return",
"prng",
".",
"multivariate_normal",
"(",
"np",
".",
"zeros",
"(",
"n_features",
")",
",",
"cov",
",",
"size",
"=",
"n_samples",
")"
] | Draw a multivariate normal sample from the graph defined by cov.
Parameters
-----------
n_samples : int
cov : matrix of shape (n_features, n_features)
Covariance matrix of the graph.
prng : np.random.RandomState instance. | [
"Draw",
"a",
"multivariate",
"normal",
"sample",
"from",
"the",
"graph",
"defined",
"by",
"cov",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/monte_carlo_profile.py#L13-L26 | train |
skggm/skggm | inverse_covariance/model_average.py | _fully_random_weights | def _fully_random_weights(n_features, lam_scale, prng):
"""Generate a symmetric random matrix with zeros along the diagonal."""
weights = np.zeros((n_features, n_features))
n_off_diag = int((n_features ** 2 - n_features) / 2)
weights[np.triu_indices(n_features, k=1)] = 0.1 * lam_scale * prng.randn(
n_off_diag
) + (0.25 * lam_scale)
weights[weights < 0] = 0
weights = weights + weights.T
return weights | python | def _fully_random_weights(n_features, lam_scale, prng):
"""Generate a symmetric random matrix with zeros along the diagonal."""
weights = np.zeros((n_features, n_features))
n_off_diag = int((n_features ** 2 - n_features) / 2)
weights[np.triu_indices(n_features, k=1)] = 0.1 * lam_scale * prng.randn(
n_off_diag
) + (0.25 * lam_scale)
weights[weights < 0] = 0
weights = weights + weights.T
return weights | [
"def",
"_fully_random_weights",
"(",
"n_features",
",",
"lam_scale",
",",
"prng",
")",
":",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
"n_features",
")",
")",
"n_off_diag",
"=",
"int",
"(",
"(",
"n_features",
"**",
"2",
"-",
"n_features",
")",
"/",
"2",
")",
"weights",
"[",
"np",
".",
"triu_indices",
"(",
"n_features",
",",
"k",
"=",
"1",
")",
"]",
"=",
"0.1",
"*",
"lam_scale",
"*",
"prng",
".",
"randn",
"(",
"n_off_diag",
")",
"+",
"(",
"0.25",
"*",
"lam_scale",
")",
"weights",
"[",
"weights",
"<",
"0",
"]",
"=",
"0",
"weights",
"=",
"weights",
"+",
"weights",
".",
"T",
"return",
"weights"
] | Generate a symmetric random matrix with zeros along the diagonal. | [
"Generate",
"a",
"symmetric",
"random",
"matrix",
"with",
"zeros",
"along",
"the",
"diagonal",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L17-L26 | train |
skggm/skggm | inverse_covariance/model_average.py | _fix_weights | def _fix_weights(weight_fun, *args):
"""Ensure random weight matrix is valid.
TODO: The diagonally dominant tuning currently doesn't make sense.
Our weight matrix has zeros along the diagonal, so multiplying by
a diagonal matrix results in a zero-matrix.
"""
weights = weight_fun(*args)
# TODO: fix this
# disable checks for now
return weights
# if positive semidefinite, then we're good as is
if _check_psd(weights):
return weights
# make diagonally dominant
off_diag_sums = np.sum(weights, axis=1) # NOTE: assumes diag is zero
mod_mat = np.linalg.inv(np.sqrt(np.diag(off_diag_sums)))
return np.dot(mod_mat, weights, mod_mat) | python | def _fix_weights(weight_fun, *args):
"""Ensure random weight matrix is valid.
TODO: The diagonally dominant tuning currently doesn't make sense.
Our weight matrix has zeros along the diagonal, so multiplying by
a diagonal matrix results in a zero-matrix.
"""
weights = weight_fun(*args)
# TODO: fix this
# disable checks for now
return weights
# if positive semidefinite, then we're good as is
if _check_psd(weights):
return weights
# make diagonally dominant
off_diag_sums = np.sum(weights, axis=1) # NOTE: assumes diag is zero
mod_mat = np.linalg.inv(np.sqrt(np.diag(off_diag_sums)))
return np.dot(mod_mat, weights, mod_mat) | [
"def",
"_fix_weights",
"(",
"weight_fun",
",",
"*",
"args",
")",
":",
"weights",
"=",
"weight_fun",
"(",
"*",
"args",
")",
"# TODO: fix this",
"# disable checks for now",
"return",
"weights",
"# if positive semidefinite, then we're good as is",
"if",
"_check_psd",
"(",
"weights",
")",
":",
"return",
"weights",
"# make diagonally dominant",
"off_diag_sums",
"=",
"np",
".",
"sum",
"(",
"weights",
",",
"axis",
"=",
"1",
")",
"# NOTE: assumes diag is zero",
"mod_mat",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"np",
".",
"sqrt",
"(",
"np",
".",
"diag",
"(",
"off_diag_sums",
")",
")",
")",
"return",
"np",
".",
"dot",
"(",
"mod_mat",
",",
"weights",
",",
"mod_mat",
")"
] | Ensure random weight matrix is valid.
TODO: The diagonally dominant tuning currently doesn't make sense.
Our weight matrix has zeros along the diagonal, so multiplying by
a diagonal matrix results in a zero-matrix. | [
"Ensure",
"random",
"weight",
"matrix",
"is",
"valid",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L46-L66 | train |
skggm/skggm | inverse_covariance/model_average.py | _fit | def _fit(
indexed_params,
penalization,
lam,
lam_perturb,
lam_scale_,
estimator,
penalty_name,
subsample,
bootstrap,
prng,
X=None,
):
"""Wrapper function outside of instance for fitting a single model average
trial.
If X is None, then we assume we are using a broadcast spark object. Else,
we expect X to get passed into this function.
"""
index = indexed_params
if isinstance(X, np.ndarray):
local_X = X
else:
local_X = X.value
n_samples, n_features = local_X.shape
prec_is_real = False
while not prec_is_real:
boot_lam = None
if penalization == "subsampling":
pass
elif penalization == "random":
boot_lam = _fix_weights(_random_weights, n_features, lam, lam_perturb, prng)
elif penalization == "fully-random":
boot_lam = _fix_weights(_fully_random_weights, n_features, lam_scale_, prng)
else:
raise NotImplementedError(
(
"Only penalization = 'subsampling', "
"'random', and 'fully-random' have "
"been implemented. Found {}.".format(penalization)
)
)
# new instance of estimator
new_estimator = clone(estimator)
if boot_lam is not None:
new_estimator.set_params(**{penalty_name: boot_lam})
# fit estimator
num_subsamples = int(subsample * n_samples)
rp = bootstrap(n_samples, num_subsamples, prng)
new_estimator.fit(local_X[rp, :])
# check that new_estimator.precision_ is real
# if not, skip this boot_lam and try again
if isinstance(new_estimator.precision_, list):
prec_real_bools = []
for prec in new_estimator.precision_:
prec_real_bools.append(np.all(np.isreal(prec)))
prec_is_real = np.all(np.array(prec_real_bools) is True)
elif isinstance(new_estimator.precision_, np.ndarray):
prec_is_real = np.all(np.isreal(new_estimator.precision_))
else:
raise ValueError("Estimator returned invalid precision_.")
return index, (boot_lam, rp, new_estimator) | python | def _fit(
indexed_params,
penalization,
lam,
lam_perturb,
lam_scale_,
estimator,
penalty_name,
subsample,
bootstrap,
prng,
X=None,
):
"""Wrapper function outside of instance for fitting a single model average
trial.
If X is None, then we assume we are using a broadcast spark object. Else,
we expect X to get passed into this function.
"""
index = indexed_params
if isinstance(X, np.ndarray):
local_X = X
else:
local_X = X.value
n_samples, n_features = local_X.shape
prec_is_real = False
while not prec_is_real:
boot_lam = None
if penalization == "subsampling":
pass
elif penalization == "random":
boot_lam = _fix_weights(_random_weights, n_features, lam, lam_perturb, prng)
elif penalization == "fully-random":
boot_lam = _fix_weights(_fully_random_weights, n_features, lam_scale_, prng)
else:
raise NotImplementedError(
(
"Only penalization = 'subsampling', "
"'random', and 'fully-random' have "
"been implemented. Found {}.".format(penalization)
)
)
# new instance of estimator
new_estimator = clone(estimator)
if boot_lam is not None:
new_estimator.set_params(**{penalty_name: boot_lam})
# fit estimator
num_subsamples = int(subsample * n_samples)
rp = bootstrap(n_samples, num_subsamples, prng)
new_estimator.fit(local_X[rp, :])
# check that new_estimator.precision_ is real
# if not, skip this boot_lam and try again
if isinstance(new_estimator.precision_, list):
prec_real_bools = []
for prec in new_estimator.precision_:
prec_real_bools.append(np.all(np.isreal(prec)))
prec_is_real = np.all(np.array(prec_real_bools) is True)
elif isinstance(new_estimator.precision_, np.ndarray):
prec_is_real = np.all(np.isreal(new_estimator.precision_))
else:
raise ValueError("Estimator returned invalid precision_.")
return index, (boot_lam, rp, new_estimator) | [
"def",
"_fit",
"(",
"indexed_params",
",",
"penalization",
",",
"lam",
",",
"lam_perturb",
",",
"lam_scale_",
",",
"estimator",
",",
"penalty_name",
",",
"subsample",
",",
"bootstrap",
",",
"prng",
",",
"X",
"=",
"None",
",",
")",
":",
"index",
"=",
"indexed_params",
"if",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"local_X",
"=",
"X",
"else",
":",
"local_X",
"=",
"X",
".",
"value",
"n_samples",
",",
"n_features",
"=",
"local_X",
".",
"shape",
"prec_is_real",
"=",
"False",
"while",
"not",
"prec_is_real",
":",
"boot_lam",
"=",
"None",
"if",
"penalization",
"==",
"\"subsampling\"",
":",
"pass",
"elif",
"penalization",
"==",
"\"random\"",
":",
"boot_lam",
"=",
"_fix_weights",
"(",
"_random_weights",
",",
"n_features",
",",
"lam",
",",
"lam_perturb",
",",
"prng",
")",
"elif",
"penalization",
"==",
"\"fully-random\"",
":",
"boot_lam",
"=",
"_fix_weights",
"(",
"_fully_random_weights",
",",
"n_features",
",",
"lam_scale_",
",",
"prng",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"(",
"\"Only penalization = 'subsampling', \"",
"\"'random', and 'fully-random' have \"",
"\"been implemented. Found {}.\"",
".",
"format",
"(",
"penalization",
")",
")",
")",
"# new instance of estimator",
"new_estimator",
"=",
"clone",
"(",
"estimator",
")",
"if",
"boot_lam",
"is",
"not",
"None",
":",
"new_estimator",
".",
"set_params",
"(",
"*",
"*",
"{",
"penalty_name",
":",
"boot_lam",
"}",
")",
"# fit estimator",
"num_subsamples",
"=",
"int",
"(",
"subsample",
"*",
"n_samples",
")",
"rp",
"=",
"bootstrap",
"(",
"n_samples",
",",
"num_subsamples",
",",
"prng",
")",
"new_estimator",
".",
"fit",
"(",
"local_X",
"[",
"rp",
",",
":",
"]",
")",
"# check that new_estimator.precision_ is real",
"# if not, skip this boot_lam and try again",
"if",
"isinstance",
"(",
"new_estimator",
".",
"precision_",
",",
"list",
")",
":",
"prec_real_bools",
"=",
"[",
"]",
"for",
"prec",
"in",
"new_estimator",
".",
"precision_",
":",
"prec_real_bools",
".",
"append",
"(",
"np",
".",
"all",
"(",
"np",
".",
"isreal",
"(",
"prec",
")",
")",
")",
"prec_is_real",
"=",
"np",
".",
"all",
"(",
"np",
".",
"array",
"(",
"prec_real_bools",
")",
"is",
"True",
")",
"elif",
"isinstance",
"(",
"new_estimator",
".",
"precision_",
",",
"np",
".",
"ndarray",
")",
":",
"prec_is_real",
"=",
"np",
".",
"all",
"(",
"np",
".",
"isreal",
"(",
"new_estimator",
".",
"precision_",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Estimator returned invalid precision_.\"",
")",
"return",
"index",
",",
"(",
"boot_lam",
",",
"rp",
",",
"new_estimator",
")"
] | Wrapper function outside of instance for fitting a single model average
trial.
If X is None, then we assume we are using a broadcast spark object. Else,
we expect X to get passed into this function. | [
"Wrapper",
"function",
"outside",
"of",
"instance",
"for",
"fitting",
"a",
"single",
"model",
"average",
"trial",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L74-L145 | train |
skggm/skggm | inverse_covariance/model_average.py | _spark_map | def _spark_map(fun, indexed_param_grid, sc, seed, X_bc):
"""We cannot pass a RandomState instance to each spark worker since it will
behave identically across partitions. Instead, we explictly handle the
partitions with a newly seeded instance.
The seed for each partition will be the "seed" (MonteCarloProfile.seed) +
"split_index" which is the partition index.
Following this trick:
https://wegetsignal.wordpress.com/2015/05/08/
generating-random-numbers-for-rdd-in-spark/
"""
def _wrap_random_state(split_index, partition):
prng = np.random.RandomState(seed + split_index)
yield map(partial(fun, prng=prng, X=X_bc), partition)
par_param_grid = sc.parallelize(indexed_param_grid)
indexed_results = par_param_grid.mapPartitionsWithIndex(
_wrap_random_state
).collect()
return [item for sublist in indexed_results for item in sublist] | python | def _spark_map(fun, indexed_param_grid, sc, seed, X_bc):
"""We cannot pass a RandomState instance to each spark worker since it will
behave identically across partitions. Instead, we explictly handle the
partitions with a newly seeded instance.
The seed for each partition will be the "seed" (MonteCarloProfile.seed) +
"split_index" which is the partition index.
Following this trick:
https://wegetsignal.wordpress.com/2015/05/08/
generating-random-numbers-for-rdd-in-spark/
"""
def _wrap_random_state(split_index, partition):
prng = np.random.RandomState(seed + split_index)
yield map(partial(fun, prng=prng, X=X_bc), partition)
par_param_grid = sc.parallelize(indexed_param_grid)
indexed_results = par_param_grid.mapPartitionsWithIndex(
_wrap_random_state
).collect()
return [item for sublist in indexed_results for item in sublist] | [
"def",
"_spark_map",
"(",
"fun",
",",
"indexed_param_grid",
",",
"sc",
",",
"seed",
",",
"X_bc",
")",
":",
"def",
"_wrap_random_state",
"(",
"split_index",
",",
"partition",
")",
":",
"prng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
"+",
"split_index",
")",
"yield",
"map",
"(",
"partial",
"(",
"fun",
",",
"prng",
"=",
"prng",
",",
"X",
"=",
"X_bc",
")",
",",
"partition",
")",
"par_param_grid",
"=",
"sc",
".",
"parallelize",
"(",
"indexed_param_grid",
")",
"indexed_results",
"=",
"par_param_grid",
".",
"mapPartitionsWithIndex",
"(",
"_wrap_random_state",
")",
".",
"collect",
"(",
")",
"return",
"[",
"item",
"for",
"sublist",
"in",
"indexed_results",
"for",
"item",
"in",
"sublist",
"]"
] | We cannot pass a RandomState instance to each spark worker since it will
behave identically across partitions. Instead, we explictly handle the
partitions with a newly seeded instance.
The seed for each partition will be the "seed" (MonteCarloProfile.seed) +
"split_index" which is the partition index.
Following this trick:
https://wegetsignal.wordpress.com/2015/05/08/
generating-random-numbers-for-rdd-in-spark/ | [
"We",
"cannot",
"pass",
"a",
"RandomState",
"instance",
"to",
"each",
"spark",
"worker",
"since",
"it",
"will",
"behave",
"identically",
"across",
"partitions",
".",
"Instead",
"we",
"explictly",
"handle",
"the",
"partitions",
"with",
"a",
"newly",
"seeded",
"instance",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/model_average.py#L156-L177 | train |
skggm/skggm | examples/estimator_suite_spark.py | quic_graph_lasso_ebic_manual | def quic_graph_lasso_ebic_manual(X, gamma=0):
"""Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model
selection.
The EBIC criteria is built into InverseCovarianceEstimator base class
so we demonstrate those utilities here.
"""
print("QuicGraphicalLasso (manual EBIC) with:")
print(" mode: path")
print(" gamma: {}".format(gamma))
model = QuicGraphicalLasso(
lam=1.0,
mode="path",
init_method="cov",
path=np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
)
model.fit(X)
ebic_index = model.ebic_select(gamma=gamma)
covariance_ = model.covariance_[ebic_index]
precision_ = model.precision_[ebic_index]
lam_ = model.lam_at_index(ebic_index)
print(" len(path lams): {}".format(len(model.path_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(lam_))
print(" ebic_index: {}".format(ebic_index))
return covariance_, precision_, lam_ | python | def quic_graph_lasso_ebic_manual(X, gamma=0):
"""Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model
selection.
The EBIC criteria is built into InverseCovarianceEstimator base class
so we demonstrate those utilities here.
"""
print("QuicGraphicalLasso (manual EBIC) with:")
print(" mode: path")
print(" gamma: {}".format(gamma))
model = QuicGraphicalLasso(
lam=1.0,
mode="path",
init_method="cov",
path=np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
)
model.fit(X)
ebic_index = model.ebic_select(gamma=gamma)
covariance_ = model.covariance_[ebic_index]
precision_ = model.precision_[ebic_index]
lam_ = model.lam_at_index(ebic_index)
print(" len(path lams): {}".format(len(model.path_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(lam_))
print(" ebic_index: {}".format(ebic_index))
return covariance_, precision_, lam_ | [
"def",
"quic_graph_lasso_ebic_manual",
"(",
"X",
",",
"gamma",
"=",
"0",
")",
":",
"print",
"(",
"\"QuicGraphicalLasso (manual EBIC) with:\"",
")",
"print",
"(",
"\" mode: path\"",
")",
"print",
"(",
"\" gamma: {}\"",
".",
"format",
"(",
"gamma",
")",
")",
"model",
"=",
"QuicGraphicalLasso",
"(",
"lam",
"=",
"1.0",
",",
"mode",
"=",
"\"path\"",
",",
"init_method",
"=",
"\"cov\"",
",",
"path",
"=",
"np",
".",
"logspace",
"(",
"np",
".",
"log10",
"(",
"0.01",
")",
",",
"np",
".",
"log10",
"(",
"1.0",
")",
",",
"num",
"=",
"100",
",",
"endpoint",
"=",
"True",
")",
",",
")",
"model",
".",
"fit",
"(",
"X",
")",
"ebic_index",
"=",
"model",
".",
"ebic_select",
"(",
"gamma",
"=",
"gamma",
")",
"covariance_",
"=",
"model",
".",
"covariance_",
"[",
"ebic_index",
"]",
"precision_",
"=",
"model",
".",
"precision_",
"[",
"ebic_index",
"]",
"lam_",
"=",
"model",
".",
"lam_at_index",
"(",
"ebic_index",
")",
"print",
"(",
"\" len(path lams): {}\"",
".",
"format",
"(",
"len",
"(",
"model",
".",
"path_",
")",
")",
")",
"print",
"(",
"\" lam_scale_: {}\"",
".",
"format",
"(",
"model",
".",
"lam_scale_",
")",
")",
"print",
"(",
"\" lam_: {}\"",
".",
"format",
"(",
"lam_",
")",
")",
"print",
"(",
"\" ebic_index: {}\"",
".",
"format",
"(",
"ebic_index",
")",
")",
"return",
"covariance_",
",",
"precision_",
",",
"lam_"
] | Run QuicGraphicalLasso with mode='path' and gamma; use EBIC criteria for model
selection.
The EBIC criteria is built into InverseCovarianceEstimator base class
so we demonstrate those utilities here. | [
"Run",
"QuicGraphicalLasso",
"with",
"mode",
"=",
"path",
"and",
"gamma",
";",
"use",
"EBIC",
"criteria",
"for",
"model",
"selection",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L110-L135 | train |
skggm/skggm | examples/estimator_suite_spark.py | quic_graph_lasso_ebic | def quic_graph_lasso_ebic(X, gamma=0):
"""Run QuicGraphicalLassoEBIC with gamma.
QuicGraphicalLassoEBIC is a convenience class. Results should be identical to
those obtained via quic_graph_lasso_ebic_manual.
"""
print("QuicGraphicalLassoEBIC with:")
print(" mode: path")
print(" gamma: {}".format(gamma))
model = QuicGraphicalLassoEBIC(lam=1.0, init_method="cov", gamma=gamma)
model.fit(X)
print(" len(path lams): {}".format(len(model.path_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_ | python | def quic_graph_lasso_ebic(X, gamma=0):
"""Run QuicGraphicalLassoEBIC with gamma.
QuicGraphicalLassoEBIC is a convenience class. Results should be identical to
those obtained via quic_graph_lasso_ebic_manual.
"""
print("QuicGraphicalLassoEBIC with:")
print(" mode: path")
print(" gamma: {}".format(gamma))
model = QuicGraphicalLassoEBIC(lam=1.0, init_method="cov", gamma=gamma)
model.fit(X)
print(" len(path lams): {}".format(len(model.path_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_ | [
"def",
"quic_graph_lasso_ebic",
"(",
"X",
",",
"gamma",
"=",
"0",
")",
":",
"print",
"(",
"\"QuicGraphicalLassoEBIC with:\"",
")",
"print",
"(",
"\" mode: path\"",
")",
"print",
"(",
"\" gamma: {}\"",
".",
"format",
"(",
"gamma",
")",
")",
"model",
"=",
"QuicGraphicalLassoEBIC",
"(",
"lam",
"=",
"1.0",
",",
"init_method",
"=",
"\"cov\"",
",",
"gamma",
"=",
"gamma",
")",
"model",
".",
"fit",
"(",
"X",
")",
"print",
"(",
"\" len(path lams): {}\"",
".",
"format",
"(",
"len",
"(",
"model",
".",
"path_",
")",
")",
")",
"print",
"(",
"\" lam_scale_: {}\"",
".",
"format",
"(",
"model",
".",
"lam_scale_",
")",
")",
"print",
"(",
"\" lam_: {}\"",
".",
"format",
"(",
"model",
".",
"lam_",
")",
")",
"return",
"model",
".",
"covariance_",
",",
"model",
".",
"precision_",
",",
"model",
".",
"lam_"
] | Run QuicGraphicalLassoEBIC with gamma.
QuicGraphicalLassoEBIC is a convenience class. Results should be identical to
those obtained via quic_graph_lasso_ebic_manual. | [
"Run",
"QuicGraphicalLassoEBIC",
"with",
"gamma",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L138-L152 | train |
skggm/skggm | examples/estimator_suite_spark.py | empirical | def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov) | python | def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov) | [
"def",
"empirical",
"(",
"X",
")",
":",
"print",
"(",
"\"Empirical\"",
")",
"cov",
"=",
"np",
".",
"dot",
"(",
"X",
".",
"T",
",",
"X",
")",
"/",
"n_samples",
"return",
"cov",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"cov",
")"
] | Compute empirical covariance as baseline estimator. | [
"Compute",
"empirical",
"covariance",
"as",
"baseline",
"estimator",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L232-L237 | train |
skggm/skggm | examples/estimator_suite_spark.py | sk_ledoit_wolf | def sk_ledoit_wolf(X):
"""Estimate inverse covariance via scikit-learn ledoit_wolf function.
"""
print("Ledoit-Wolf (sklearn)")
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = np.linalg.inv(lw_cov_)
return lw_cov_, lw_prec_ | python | def sk_ledoit_wolf(X):
"""Estimate inverse covariance via scikit-learn ledoit_wolf function.
"""
print("Ledoit-Wolf (sklearn)")
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = np.linalg.inv(lw_cov_)
return lw_cov_, lw_prec_ | [
"def",
"sk_ledoit_wolf",
"(",
"X",
")",
":",
"print",
"(",
"\"Ledoit-Wolf (sklearn)\"",
")",
"lw_cov_",
",",
"_",
"=",
"ledoit_wolf",
"(",
"X",
")",
"lw_prec_",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"lw_cov_",
")",
"return",
"lw_cov_",
",",
"lw_prec_"
] | Estimate inverse covariance via scikit-learn ledoit_wolf function. | [
"Estimate",
"inverse",
"covariance",
"via",
"scikit",
"-",
"learn",
"ledoit_wolf",
"function",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L240-L246 | train |
skggm/skggm | inverse_covariance/profiling/metrics.py | _nonzero_intersection | def _nonzero_intersection(m, m_hat):
"""Count the number of nonzeros in and between m and m_hat.
Returns
----------
m_nnz : number of nonzeros in m (w/o diagonal)
m_hat_nnz : number of nonzeros in m_hat (w/o diagonal)
intersection_nnz : number of nonzeros in intersection of m/m_hat
(w/o diagonal)
"""
n_features, _ = m.shape
m_no_diag = m.copy()
m_no_diag[np.diag_indices(n_features)] = 0
m_hat_no_diag = m_hat.copy()
m_hat_no_diag[np.diag_indices(n_features)] = 0
m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0])
m_nnz = len(np.nonzero(m_no_diag.flat)[0])
intersection_nnz = len(
np.intersect1d(np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0])
)
return m_nnz, m_hat_nnz, intersection_nnz | python | def _nonzero_intersection(m, m_hat):
"""Count the number of nonzeros in and between m and m_hat.
Returns
----------
m_nnz : number of nonzeros in m (w/o diagonal)
m_hat_nnz : number of nonzeros in m_hat (w/o diagonal)
intersection_nnz : number of nonzeros in intersection of m/m_hat
(w/o diagonal)
"""
n_features, _ = m.shape
m_no_diag = m.copy()
m_no_diag[np.diag_indices(n_features)] = 0
m_hat_no_diag = m_hat.copy()
m_hat_no_diag[np.diag_indices(n_features)] = 0
m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0])
m_nnz = len(np.nonzero(m_no_diag.flat)[0])
intersection_nnz = len(
np.intersect1d(np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0])
)
return m_nnz, m_hat_nnz, intersection_nnz | [
"def",
"_nonzero_intersection",
"(",
"m",
",",
"m_hat",
")",
":",
"n_features",
",",
"_",
"=",
"m",
".",
"shape",
"m_no_diag",
"=",
"m",
".",
"copy",
"(",
")",
"m_no_diag",
"[",
"np",
".",
"diag_indices",
"(",
"n_features",
")",
"]",
"=",
"0",
"m_hat_no_diag",
"=",
"m_hat",
".",
"copy",
"(",
")",
"m_hat_no_diag",
"[",
"np",
".",
"diag_indices",
"(",
"n_features",
")",
"]",
"=",
"0",
"m_hat_nnz",
"=",
"len",
"(",
"np",
".",
"nonzero",
"(",
"m_hat_no_diag",
".",
"flat",
")",
"[",
"0",
"]",
")",
"m_nnz",
"=",
"len",
"(",
"np",
".",
"nonzero",
"(",
"m_no_diag",
".",
"flat",
")",
"[",
"0",
"]",
")",
"intersection_nnz",
"=",
"len",
"(",
"np",
".",
"intersect1d",
"(",
"np",
".",
"nonzero",
"(",
"m_no_diag",
".",
"flat",
")",
"[",
"0",
"]",
",",
"np",
".",
"nonzero",
"(",
"m_hat_no_diag",
".",
"flat",
")",
"[",
"0",
"]",
")",
")",
"return",
"m_nnz",
",",
"m_hat_nnz",
",",
"intersection_nnz"
] | Count the number of nonzeros in and between m and m_hat.
Returns
----------
m_nnz : number of nonzeros in m (w/o diagonal)
m_hat_nnz : number of nonzeros in m_hat (w/o diagonal)
intersection_nnz : number of nonzeros in intersection of m/m_hat
(w/o diagonal) | [
"Count",
"the",
"number",
"of",
"nonzeros",
"in",
"and",
"between",
"m",
"and",
"m_hat",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L4-L30 | train |
skggm/skggm | inverse_covariance/profiling/metrics.py | support_false_positive_count | def support_false_positive_count(m, m_hat):
"""Count the number of false positive support elements in
m_hat in one triangle, not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_hat_nnz - intersection_nnz) / 2.0) | python | def support_false_positive_count(m, m_hat):
"""Count the number of false positive support elements in
m_hat in one triangle, not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_hat_nnz - intersection_nnz) / 2.0) | [
"def",
"support_false_positive_count",
"(",
"m",
",",
"m_hat",
")",
":",
"m_nnz",
",",
"m_hat_nnz",
",",
"intersection_nnz",
"=",
"_nonzero_intersection",
"(",
"m",
",",
"m_hat",
")",
"return",
"int",
"(",
"(",
"m_hat_nnz",
"-",
"intersection_nnz",
")",
"/",
"2.0",
")"
] | Count the number of false positive support elements in
m_hat in one triangle, not including the diagonal. | [
"Count",
"the",
"number",
"of",
"false",
"positive",
"support",
"elements",
"in",
"m_hat",
"in",
"one",
"triangle",
"not",
"including",
"the",
"diagonal",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L33-L38 | train |
skggm/skggm | inverse_covariance/profiling/metrics.py | support_false_negative_count | def support_false_negative_count(m, m_hat):
"""Count the number of false negative support elements in
m_hat in one triangle, not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_nnz - intersection_nnz) / 2.0) | python | def support_false_negative_count(m, m_hat):
"""Count the number of false negative support elements in
m_hat in one triangle, not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_nnz - intersection_nnz) / 2.0) | [
"def",
"support_false_negative_count",
"(",
"m",
",",
"m_hat",
")",
":",
"m_nnz",
",",
"m_hat_nnz",
",",
"intersection_nnz",
"=",
"_nonzero_intersection",
"(",
"m",
",",
"m_hat",
")",
"return",
"int",
"(",
"(",
"m_nnz",
"-",
"intersection_nnz",
")",
"/",
"2.0",
")"
] | Count the number of false negative support elements in
m_hat in one triangle, not including the diagonal. | [
"Count",
"the",
"number",
"of",
"false",
"negative",
"support",
"elements",
"in",
"m_hat",
"in",
"one",
"triangle",
"not",
"including",
"the",
"diagonal",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L41-L46 | train |
skggm/skggm | inverse_covariance/profiling/metrics.py | support_difference_count | def support_difference_count(m, m_hat):
"""Count the number of different elements in the support in one triangle,
not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) / 2.0) | python | def support_difference_count(m, m_hat):
"""Count the number of different elements in the support in one triangle,
not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) / 2.0) | [
"def",
"support_difference_count",
"(",
"m",
",",
"m_hat",
")",
":",
"m_nnz",
",",
"m_hat_nnz",
",",
"intersection_nnz",
"=",
"_nonzero_intersection",
"(",
"m",
",",
"m_hat",
")",
"return",
"int",
"(",
"(",
"m_nnz",
"+",
"m_hat_nnz",
"-",
"(",
"2",
"*",
"intersection_nnz",
")",
")",
"/",
"2.0",
")"
] | Count the number of different elements in the support in one triangle,
not including the diagonal. | [
"Count",
"the",
"number",
"of",
"different",
"elements",
"in",
"the",
"support",
"in",
"one",
"triangle",
"not",
"including",
"the",
"diagonal",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L49-L54 | train |
skggm/skggm | inverse_covariance/profiling/metrics.py | has_exact_support | def has_exact_support(m, m_hat):
"""Returns 1 if support_difference_count is zero, 0 else.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) == 0) | python | def has_exact_support(m, m_hat):
"""Returns 1 if support_difference_count is zero, 0 else.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) == 0) | [
"def",
"has_exact_support",
"(",
"m",
",",
"m_hat",
")",
":",
"m_nnz",
",",
"m_hat_nnz",
",",
"intersection_nnz",
"=",
"_nonzero_intersection",
"(",
"m",
",",
"m_hat",
")",
"return",
"int",
"(",
"(",
"m_nnz",
"+",
"m_hat_nnz",
"-",
"(",
"2",
"*",
"intersection_nnz",
")",
")",
"==",
"0",
")"
] | Returns 1 if support_difference_count is zero, 0 else. | [
"Returns",
"1",
"if",
"support_difference_count",
"is",
"zero",
"0",
"else",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L57-L61 | train |
skggm/skggm | inverse_covariance/profiling/metrics.py | has_approx_support | def has_approx_support(m, m_hat, prob=0.01):
"""Returns 1 if model selection error is less than or equal to prob rate,
0 else.
NOTE: why does np.nonzero/np.flatnonzero create so much problems?
"""
m_nz = np.flatnonzero(np.triu(m, 1))
m_hat_nz = np.flatnonzero(np.triu(m_hat, 1))
upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1))
not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz)
intersection = np.in1d(m_hat_nz, m_nz) # true positives
not_intersection = np.in1d(m_hat_nz, not_m_nz) # false positives
true_positive_rate = 0.0
if len(m_nz):
true_positive_rate = 1. * np.sum(intersection) / len(m_nz)
true_negative_rate = 1. - true_positive_rate
false_positive_rate = 0.0
if len(not_m_nz):
false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz)
return int(np.less_equal(true_negative_rate + false_positive_rate, prob)) | python | def has_approx_support(m, m_hat, prob=0.01):
"""Returns 1 if model selection error is less than or equal to prob rate,
0 else.
NOTE: why does np.nonzero/np.flatnonzero create so much problems?
"""
m_nz = np.flatnonzero(np.triu(m, 1))
m_hat_nz = np.flatnonzero(np.triu(m_hat, 1))
upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1))
not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz)
intersection = np.in1d(m_hat_nz, m_nz) # true positives
not_intersection = np.in1d(m_hat_nz, not_m_nz) # false positives
true_positive_rate = 0.0
if len(m_nz):
true_positive_rate = 1. * np.sum(intersection) / len(m_nz)
true_negative_rate = 1. - true_positive_rate
false_positive_rate = 0.0
if len(not_m_nz):
false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz)
return int(np.less_equal(true_negative_rate + false_positive_rate, prob)) | [
"def",
"has_approx_support",
"(",
"m",
",",
"m_hat",
",",
"prob",
"=",
"0.01",
")",
":",
"m_nz",
"=",
"np",
".",
"flatnonzero",
"(",
"np",
".",
"triu",
"(",
"m",
",",
"1",
")",
")",
"m_hat_nz",
"=",
"np",
".",
"flatnonzero",
"(",
"np",
".",
"triu",
"(",
"m_hat",
",",
"1",
")",
")",
"upper_diagonal_mask",
"=",
"np",
".",
"flatnonzero",
"(",
"np",
".",
"triu",
"(",
"np",
".",
"ones",
"(",
"m",
".",
"shape",
")",
",",
"1",
")",
")",
"not_m_nz",
"=",
"np",
".",
"setdiff1d",
"(",
"upper_diagonal_mask",
",",
"m_nz",
")",
"intersection",
"=",
"np",
".",
"in1d",
"(",
"m_hat_nz",
",",
"m_nz",
")",
"# true positives",
"not_intersection",
"=",
"np",
".",
"in1d",
"(",
"m_hat_nz",
",",
"not_m_nz",
")",
"# false positives",
"true_positive_rate",
"=",
"0.0",
"if",
"len",
"(",
"m_nz",
")",
":",
"true_positive_rate",
"=",
"1.",
"*",
"np",
".",
"sum",
"(",
"intersection",
")",
"/",
"len",
"(",
"m_nz",
")",
"true_negative_rate",
"=",
"1.",
"-",
"true_positive_rate",
"false_positive_rate",
"=",
"0.0",
"if",
"len",
"(",
"not_m_nz",
")",
":",
"false_positive_rate",
"=",
"1.",
"*",
"np",
".",
"sum",
"(",
"not_intersection",
")",
"/",
"len",
"(",
"not_m_nz",
")",
"return",
"int",
"(",
"np",
".",
"less_equal",
"(",
"true_negative_rate",
"+",
"false_positive_rate",
",",
"prob",
")",
")"
] | Returns 1 if model selection error is less than or equal to prob rate,
0 else.
NOTE: why does np.nonzero/np.flatnonzero create so much problems? | [
"Returns",
"1",
"if",
"model",
"selection",
"error",
"is",
"less",
"than",
"or",
"equal",
"to",
"prob",
"rate",
"0",
"else",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L64-L88 | train |
skggm/skggm | inverse_covariance/inverse_covariance.py | _validate_path | def _validate_path(path):
"""Sorts path values from largest to smallest.
Will warn if path parameter was not already sorted.
"""
if path is None:
return None
new_path = np.array(sorted(set(path), reverse=True))
if new_path[0] != path[0]:
print("Warning: Path must be sorted largest to smallest.")
return new_path | python | def _validate_path(path):
"""Sorts path values from largest to smallest.
Will warn if path parameter was not already sorted.
"""
if path is None:
return None
new_path = np.array(sorted(set(path), reverse=True))
if new_path[0] != path[0]:
print("Warning: Path must be sorted largest to smallest.")
return new_path | [
"def",
"_validate_path",
"(",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"return",
"None",
"new_path",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"set",
"(",
"path",
")",
",",
"reverse",
"=",
"True",
")",
")",
"if",
"new_path",
"[",
"0",
"]",
"!=",
"path",
"[",
"0",
"]",
":",
"print",
"(",
"\"Warning: Path must be sorted largest to smallest.\"",
")",
"return",
"new_path"
] | Sorts path values from largest to smallest.
Will warn if path parameter was not already sorted. | [
"Sorts",
"path",
"values",
"from",
"largest",
"to",
"smallest",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L77-L89 | train |
skggm/skggm | inverse_covariance/inverse_covariance.py | InverseCovarianceEstimator.ebic | def ebic(self, gamma=0):
"""Compute EBIC scores for each model. If model is not "path" then
returns a scalar score value.
May require self.path_
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Scalar ebic score or list of ebic scores.
"""
if not self.is_fitted_:
return
if not isinstance(self.precision_, list):
return metrics.ebic(
self.sample_covariance_,
self.precision_,
self.n_samples_,
self.n_features_,
gamma=gamma,
)
ebic_scores = []
for lidx, lam in enumerate(self.path_):
ebic_scores.append(
metrics.ebic(
self.sample_covariance_,
self.precision_[lidx],
self.n_samples_,
self.n_features_,
gamma=gamma,
)
)
return np.array(ebic_scores) | python | def ebic(self, gamma=0):
"""Compute EBIC scores for each model. If model is not "path" then
returns a scalar score value.
May require self.path_
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Scalar ebic score or list of ebic scores.
"""
if not self.is_fitted_:
return
if not isinstance(self.precision_, list):
return metrics.ebic(
self.sample_covariance_,
self.precision_,
self.n_samples_,
self.n_features_,
gamma=gamma,
)
ebic_scores = []
for lidx, lam in enumerate(self.path_):
ebic_scores.append(
metrics.ebic(
self.sample_covariance_,
self.precision_[lidx],
self.n_samples_,
self.n_features_,
gamma=gamma,
)
)
return np.array(ebic_scores) | [
"def",
"ebic",
"(",
"self",
",",
"gamma",
"=",
"0",
")",
":",
"if",
"not",
"self",
".",
"is_fitted_",
":",
"return",
"if",
"not",
"isinstance",
"(",
"self",
".",
"precision_",
",",
"list",
")",
":",
"return",
"metrics",
".",
"ebic",
"(",
"self",
".",
"sample_covariance_",
",",
"self",
".",
"precision_",
",",
"self",
".",
"n_samples_",
",",
"self",
".",
"n_features_",
",",
"gamma",
"=",
"gamma",
",",
")",
"ebic_scores",
"=",
"[",
"]",
"for",
"lidx",
",",
"lam",
"in",
"enumerate",
"(",
"self",
".",
"path_",
")",
":",
"ebic_scores",
".",
"append",
"(",
"metrics",
".",
"ebic",
"(",
"self",
".",
"sample_covariance_",
",",
"self",
".",
"precision_",
"[",
"lidx",
"]",
",",
"self",
".",
"n_samples_",
",",
"self",
".",
"n_features_",
",",
"gamma",
"=",
"gamma",
",",
")",
")",
"return",
"np",
".",
"array",
"(",
"ebic_scores",
")"
] | Compute EBIC scores for each model. If model is not "path" then
returns a scalar score value.
May require self.path_
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Scalar ebic score or list of ebic scores. | [
"Compute",
"EBIC",
"scores",
"for",
"each",
"model",
".",
"If",
"model",
"is",
"not",
"path",
"then",
"returns",
"a",
"scalar",
"score",
"value",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L268-L313 | train |
skggm/skggm | inverse_covariance/inverse_covariance.py | InverseCovarianceEstimator.ebic_select | def ebic_select(self, gamma=0):
"""Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score.
"""
if not isinstance(self.precision_, list):
raise ValueError("EBIC requires multiple models to select from.")
return
if not self.is_fitted_:
return
ebic_scores = self.ebic(gamma=gamma)
min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10)
return np.max(min_indices) | python | def ebic_select(self, gamma=0):
"""Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score.
"""
if not isinstance(self.precision_, list):
raise ValueError("EBIC requires multiple models to select from.")
return
if not self.is_fitted_:
return
ebic_scores = self.ebic(gamma=gamma)
min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10)
return np.max(min_indices) | [
"def",
"ebic_select",
"(",
"self",
",",
"gamma",
"=",
"0",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"precision_",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"EBIC requires multiple models to select from.\"",
")",
"return",
"if",
"not",
"self",
".",
"is_fitted_",
":",
"return",
"ebic_scores",
"=",
"self",
".",
"ebic",
"(",
"gamma",
"=",
"gamma",
")",
"min_indices",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"ebic_scores",
"-",
"ebic_scores",
".",
"min",
"(",
")",
")",
"<",
"1e-10",
")",
"return",
"np",
".",
"max",
"(",
"min_indices",
")"
] | Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score. | [
"Uses",
"Extended",
"Bayesian",
"Information",
"Criteria",
"for",
"model",
"selection",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L315-L345 | train |
skggm/skggm | examples/estimator_suite.py | quic_graph_lasso | def quic_graph_lasso(X, num_folds, metric):
"""Run QuicGraphicalLasso with mode='default' and use standard scikit
GridSearchCV to find the best lambda.
Primarily demonstrates compatibility with existing scikit tooling.
"""
print("QuicGraphicalLasso + GridSearchCV with:")
print(" metric: {}".format(metric))
search_grid = {
"lam": np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
"init_method": ["cov"],
"score_metric": [metric],
}
model = GridSearchCV(QuicGraphicalLasso(), search_grid, cv=num_folds, refit=True)
model.fit(X)
bmodel = model.best_estimator_
print(" len(cv_lams): {}".format(len(search_grid["lam"])))
print(" cv-lam: {}".format(model.best_params_["lam"]))
print(" lam_scale_: {}".format(bmodel.lam_scale_))
print(" lam_: {}".format(bmodel.lam_))
return bmodel.covariance_, bmodel.precision_, bmodel.lam_ | python | def quic_graph_lasso(X, num_folds, metric):
"""Run QuicGraphicalLasso with mode='default' and use standard scikit
GridSearchCV to find the best lambda.
Primarily demonstrates compatibility with existing scikit tooling.
"""
print("QuicGraphicalLasso + GridSearchCV with:")
print(" metric: {}".format(metric))
search_grid = {
"lam": np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True),
"init_method": ["cov"],
"score_metric": [metric],
}
model = GridSearchCV(QuicGraphicalLasso(), search_grid, cv=num_folds, refit=True)
model.fit(X)
bmodel = model.best_estimator_
print(" len(cv_lams): {}".format(len(search_grid["lam"])))
print(" cv-lam: {}".format(model.best_params_["lam"]))
print(" lam_scale_: {}".format(bmodel.lam_scale_))
print(" lam_: {}".format(bmodel.lam_))
return bmodel.covariance_, bmodel.precision_, bmodel.lam_ | [
"def",
"quic_graph_lasso",
"(",
"X",
",",
"num_folds",
",",
"metric",
")",
":",
"print",
"(",
"\"QuicGraphicalLasso + GridSearchCV with:\"",
")",
"print",
"(",
"\" metric: {}\"",
".",
"format",
"(",
"metric",
")",
")",
"search_grid",
"=",
"{",
"\"lam\"",
":",
"np",
".",
"logspace",
"(",
"np",
".",
"log10",
"(",
"0.01",
")",
",",
"np",
".",
"log10",
"(",
"1.0",
")",
",",
"num",
"=",
"100",
",",
"endpoint",
"=",
"True",
")",
",",
"\"init_method\"",
":",
"[",
"\"cov\"",
"]",
",",
"\"score_metric\"",
":",
"[",
"metric",
"]",
",",
"}",
"model",
"=",
"GridSearchCV",
"(",
"QuicGraphicalLasso",
"(",
")",
",",
"search_grid",
",",
"cv",
"=",
"num_folds",
",",
"refit",
"=",
"True",
")",
"model",
".",
"fit",
"(",
"X",
")",
"bmodel",
"=",
"model",
".",
"best_estimator_",
"print",
"(",
"\" len(cv_lams): {}\"",
".",
"format",
"(",
"len",
"(",
"search_grid",
"[",
"\"lam\"",
"]",
")",
")",
")",
"print",
"(",
"\" cv-lam: {}\"",
".",
"format",
"(",
"model",
".",
"best_params_",
"[",
"\"lam\"",
"]",
")",
")",
"print",
"(",
"\" lam_scale_: {}\"",
".",
"format",
"(",
"bmodel",
".",
"lam_scale_",
")",
")",
"print",
"(",
"\" lam_: {}\"",
".",
"format",
"(",
"bmodel",
".",
"lam_",
")",
")",
"return",
"bmodel",
".",
"covariance_",
",",
"bmodel",
".",
"precision_",
",",
"bmodel",
".",
"lam_"
] | Run QuicGraphicalLasso with mode='default' and use standard scikit
GridSearchCV to find the best lambda.
Primarily demonstrates compatibility with existing scikit tooling. | [
"Run",
"QuicGraphicalLasso",
"with",
"mode",
"=",
"default",
"and",
"use",
"standard",
"scikit",
"GridSearchCV",
"to",
"find",
"the",
"best",
"lambda",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite.py#L97-L117 | train |
skggm/skggm | examples/estimator_suite.py | quic_graph_lasso_cv | def quic_graph_lasso_cv(X, metric):
"""Run QuicGraphicalLassoCV on data with metric of choice.
Compare results with GridSearchCV + quic_graph_lasso. The number of
lambdas tested should be much lower with similar final lam_ selected.
"""
print("QuicGraphicalLassoCV with:")
print(" metric: {}".format(metric))
model = QuicGraphicalLassoCV(
cv=2, # cant deal w more folds at small size
n_refinements=6,
n_jobs=1,
init_method="cov",
score_metric=metric,
)
model.fit(X)
print(" len(cv_lams): {}".format(len(model.cv_lams_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_ | python | def quic_graph_lasso_cv(X, metric):
"""Run QuicGraphicalLassoCV on data with metric of choice.
Compare results with GridSearchCV + quic_graph_lasso. The number of
lambdas tested should be much lower with similar final lam_ selected.
"""
print("QuicGraphicalLassoCV with:")
print(" metric: {}".format(metric))
model = QuicGraphicalLassoCV(
cv=2, # cant deal w more folds at small size
n_refinements=6,
n_jobs=1,
init_method="cov",
score_metric=metric,
)
model.fit(X)
print(" len(cv_lams): {}".format(len(model.cv_lams_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_ | [
"def",
"quic_graph_lasso_cv",
"(",
"X",
",",
"metric",
")",
":",
"print",
"(",
"\"QuicGraphicalLassoCV with:\"",
")",
"print",
"(",
"\" metric: {}\"",
".",
"format",
"(",
"metric",
")",
")",
"model",
"=",
"QuicGraphicalLassoCV",
"(",
"cv",
"=",
"2",
",",
"# cant deal w more folds at small size",
"n_refinements",
"=",
"6",
",",
"n_jobs",
"=",
"1",
",",
"init_method",
"=",
"\"cov\"",
",",
"score_metric",
"=",
"metric",
",",
")",
"model",
".",
"fit",
"(",
"X",
")",
"print",
"(",
"\" len(cv_lams): {}\"",
".",
"format",
"(",
"len",
"(",
"model",
".",
"cv_lams_",
")",
")",
")",
"print",
"(",
"\" lam_scale_: {}\"",
".",
"format",
"(",
"model",
".",
"lam_scale_",
")",
")",
"print",
"(",
"\" lam_: {}\"",
".",
"format",
"(",
"model",
".",
"lam_",
")",
")",
"return",
"model",
".",
"covariance_",
",",
"model",
".",
"precision_",
",",
"model",
".",
"lam_"
] | Run QuicGraphicalLassoCV on data with metric of choice.
Compare results with GridSearchCV + quic_graph_lasso. The number of
lambdas tested should be much lower with similar final lam_ selected. | [
"Run",
"QuicGraphicalLassoCV",
"on",
"data",
"with",
"metric",
"of",
"choice",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite.py#L120-L139 | train |
skggm/skggm | examples/estimator_suite.py | graph_lasso | def graph_lasso(X, num_folds):
"""Estimate inverse covariance via scikit-learn GraphLassoCV class.
"""
print("GraphLasso (sklearn)")
model = GraphLassoCV(cv=num_folds)
model.fit(X)
print(" lam_: {}".format(model.alpha_))
return model.covariance_, model.precision_, model.alpha_ | python | def graph_lasso(X, num_folds):
"""Estimate inverse covariance via scikit-learn GraphLassoCV class.
"""
print("GraphLasso (sklearn)")
model = GraphLassoCV(cv=num_folds)
model.fit(X)
print(" lam_: {}".format(model.alpha_))
return model.covariance_, model.precision_, model.alpha_ | [
"def",
"graph_lasso",
"(",
"X",
",",
"num_folds",
")",
":",
"print",
"(",
"\"GraphLasso (sklearn)\"",
")",
"model",
"=",
"GraphLassoCV",
"(",
"cv",
"=",
"num_folds",
")",
"model",
".",
"fit",
"(",
"X",
")",
"print",
"(",
"\" lam_: {}\"",
".",
"format",
"(",
"model",
".",
"alpha_",
")",
")",
"return",
"model",
".",
"covariance_",
",",
"model",
".",
"precision_",
",",
"model",
".",
"alpha_"
] | Estimate inverse covariance via scikit-learn GraphLassoCV class. | [
"Estimate",
"inverse",
"covariance",
"via",
"scikit",
"-",
"learn",
"GraphLassoCV",
"class",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite.py#L295-L302 | train |
skggm/skggm | inverse_covariance/quic_graph_lasso.py | _quic_path | def _quic_path(
X,
path,
X_test=None,
lam=0.5,
tol=1e-6,
max_iter=1000,
Theta0=None,
Sigma0=None,
method="quic",
verbose=0,
score_metric="log_likelihood",
init_method="corrcoef",
):
"""Wrapper to compute path for example X.
"""
S, lam_scale_ = _init_coefs(X, method=init_method)
path = path.copy(order="C")
if method == "quic":
(precisions_, covariances_, opt_, cputime_, iters_, duality_gap_) = quic(
S,
lam,
mode="path",
tol=tol,
max_iter=max_iter,
Theta0=Theta0,
Sigma0=Sigma0,
path=path,
msg=verbose,
)
else:
raise NotImplementedError("Only method='quic' has been implemented.")
if X_test is not None:
S_test, lam_scale_test = _init_coefs(X_test, method=init_method)
path_errors = []
for lidx, lam in enumerate(path):
path_errors.append(
_compute_error(
S_test,
covariances_[lidx],
precisions_[lidx],
score_metric=score_metric,
)
)
scores_ = [-e for e in path_errors]
return covariances_, precisions_, scores_
return covariances_, precisions_ | python | def _quic_path(
X,
path,
X_test=None,
lam=0.5,
tol=1e-6,
max_iter=1000,
Theta0=None,
Sigma0=None,
method="quic",
verbose=0,
score_metric="log_likelihood",
init_method="corrcoef",
):
"""Wrapper to compute path for example X.
"""
S, lam_scale_ = _init_coefs(X, method=init_method)
path = path.copy(order="C")
if method == "quic":
(precisions_, covariances_, opt_, cputime_, iters_, duality_gap_) = quic(
S,
lam,
mode="path",
tol=tol,
max_iter=max_iter,
Theta0=Theta0,
Sigma0=Sigma0,
path=path,
msg=verbose,
)
else:
raise NotImplementedError("Only method='quic' has been implemented.")
if X_test is not None:
S_test, lam_scale_test = _init_coefs(X_test, method=init_method)
path_errors = []
for lidx, lam in enumerate(path):
path_errors.append(
_compute_error(
S_test,
covariances_[lidx],
precisions_[lidx],
score_metric=score_metric,
)
)
scores_ = [-e for e in path_errors]
return covariances_, precisions_, scores_
return covariances_, precisions_ | [
"def",
"_quic_path",
"(",
"X",
",",
"path",
",",
"X_test",
"=",
"None",
",",
"lam",
"=",
"0.5",
",",
"tol",
"=",
"1e-6",
",",
"max_iter",
"=",
"1000",
",",
"Theta0",
"=",
"None",
",",
"Sigma0",
"=",
"None",
",",
"method",
"=",
"\"quic\"",
",",
"verbose",
"=",
"0",
",",
"score_metric",
"=",
"\"log_likelihood\"",
",",
"init_method",
"=",
"\"corrcoef\"",
",",
")",
":",
"S",
",",
"lam_scale_",
"=",
"_init_coefs",
"(",
"X",
",",
"method",
"=",
"init_method",
")",
"path",
"=",
"path",
".",
"copy",
"(",
"order",
"=",
"\"C\"",
")",
"if",
"method",
"==",
"\"quic\"",
":",
"(",
"precisions_",
",",
"covariances_",
",",
"opt_",
",",
"cputime_",
",",
"iters_",
",",
"duality_gap_",
")",
"=",
"quic",
"(",
"S",
",",
"lam",
",",
"mode",
"=",
"\"path\"",
",",
"tol",
"=",
"tol",
",",
"max_iter",
"=",
"max_iter",
",",
"Theta0",
"=",
"Theta0",
",",
"Sigma0",
"=",
"Sigma0",
",",
"path",
"=",
"path",
",",
"msg",
"=",
"verbose",
",",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Only method='quic' has been implemented.\"",
")",
"if",
"X_test",
"is",
"not",
"None",
":",
"S_test",
",",
"lam_scale_test",
"=",
"_init_coefs",
"(",
"X_test",
",",
"method",
"=",
"init_method",
")",
"path_errors",
"=",
"[",
"]",
"for",
"lidx",
",",
"lam",
"in",
"enumerate",
"(",
"path",
")",
":",
"path_errors",
".",
"append",
"(",
"_compute_error",
"(",
"S_test",
",",
"covariances_",
"[",
"lidx",
"]",
",",
"precisions_",
"[",
"lidx",
"]",
",",
"score_metric",
"=",
"score_metric",
",",
")",
")",
"scores_",
"=",
"[",
"-",
"e",
"for",
"e",
"in",
"path_errors",
"]",
"return",
"covariances_",
",",
"precisions_",
",",
"scores_",
"return",
"covariances_",
",",
"precisions_"
] | Wrapper to compute path for example X. | [
"Wrapper",
"to",
"compute",
"path",
"for",
"example",
"X",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/quic_graph_lasso.py#L383-L435 | train |
skggm/skggm | inverse_covariance/quic_graph_lasso.py | QuicGraphicalLasso.lam_at_index | def lam_at_index(self, lidx):
"""Compute the scaled lambda used at index lidx.
"""
if self.path_ is None:
return self.lam * self.lam_scale_
return self.lam * self.lam_scale_ * self.path_[lidx] | python | def lam_at_index(self, lidx):
"""Compute the scaled lambda used at index lidx.
"""
if self.path_ is None:
return self.lam * self.lam_scale_
return self.lam * self.lam_scale_ * self.path_[lidx] | [
"def",
"lam_at_index",
"(",
"self",
",",
"lidx",
")",
":",
"if",
"self",
".",
"path_",
"is",
"None",
":",
"return",
"self",
".",
"lam",
"*",
"self",
".",
"lam_scale_",
"return",
"self",
".",
"lam",
"*",
"self",
".",
"lam_scale_",
"*",
"self",
".",
"path_",
"[",
"lidx",
"]"
] | Compute the scaled lambda used at index lidx. | [
"Compute",
"the",
"scaled",
"lambda",
"used",
"at",
"index",
"lidx",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/quic_graph_lasso.py#L361-L367 | train |
skggm/skggm | inverse_covariance/rank_correlation.py | _compute_ranks | def _compute_ranks(X, winsorize=False, truncation=None, verbose=True):
"""
Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
"""
n_samples, n_features = X.shape
Xrank = np.zeros(shape=X.shape)
if winsorize:
if truncation is None:
truncation = 1 / (
4 * np.power(n_samples, 0.25) * np.sqrt(np.pi * np.log(n_samples))
)
elif truncation > 1:
truncation = np.min(1.0, truncation)
for col in np.arange(n_features):
Xrank[:, col] = rankdata(X[:, col], method="average")
Xrank[:, col] /= n_samples
if winsorize:
if n_samples > 100 * n_features:
Xrank[:, col] = n_samples * Xrank[:, col] / (n_samples + 1)
else:
lower_truncate = Xrank[:, col] <= truncation
upper_truncate = Xrank[:, col] > 1 - truncation
Xrank[lower_truncate, col] = truncation
Xrank[upper_truncate, col] = 1 - truncation
return Xrank | python | def _compute_ranks(X, winsorize=False, truncation=None, verbose=True):
"""
Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
"""
n_samples, n_features = X.shape
Xrank = np.zeros(shape=X.shape)
if winsorize:
if truncation is None:
truncation = 1 / (
4 * np.power(n_samples, 0.25) * np.sqrt(np.pi * np.log(n_samples))
)
elif truncation > 1:
truncation = np.min(1.0, truncation)
for col in np.arange(n_features):
Xrank[:, col] = rankdata(X[:, col], method="average")
Xrank[:, col] /= n_samples
if winsorize:
if n_samples > 100 * n_features:
Xrank[:, col] = n_samples * Xrank[:, col] / (n_samples + 1)
else:
lower_truncate = Xrank[:, col] <= truncation
upper_truncate = Xrank[:, col] > 1 - truncation
Xrank[lower_truncate, col] = truncation
Xrank[upper_truncate, col] = 1 - truncation
return Xrank | [
"def",
"_compute_ranks",
"(",
"X",
",",
"winsorize",
"=",
"False",
",",
"truncation",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"n_samples",
",",
"n_features",
"=",
"X",
".",
"shape",
"Xrank",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"X",
".",
"shape",
")",
"if",
"winsorize",
":",
"if",
"truncation",
"is",
"None",
":",
"truncation",
"=",
"1",
"/",
"(",
"4",
"*",
"np",
".",
"power",
"(",
"n_samples",
",",
"0.25",
")",
"*",
"np",
".",
"sqrt",
"(",
"np",
".",
"pi",
"*",
"np",
".",
"log",
"(",
"n_samples",
")",
")",
")",
"elif",
"truncation",
">",
"1",
":",
"truncation",
"=",
"np",
".",
"min",
"(",
"1.0",
",",
"truncation",
")",
"for",
"col",
"in",
"np",
".",
"arange",
"(",
"n_features",
")",
":",
"Xrank",
"[",
":",
",",
"col",
"]",
"=",
"rankdata",
"(",
"X",
"[",
":",
",",
"col",
"]",
",",
"method",
"=",
"\"average\"",
")",
"Xrank",
"[",
":",
",",
"col",
"]",
"/=",
"n_samples",
"if",
"winsorize",
":",
"if",
"n_samples",
">",
"100",
"*",
"n_features",
":",
"Xrank",
"[",
":",
",",
"col",
"]",
"=",
"n_samples",
"*",
"Xrank",
"[",
":",
",",
"col",
"]",
"/",
"(",
"n_samples",
"+",
"1",
")",
"else",
":",
"lower_truncate",
"=",
"Xrank",
"[",
":",
",",
"col",
"]",
"<=",
"truncation",
"upper_truncate",
"=",
"Xrank",
"[",
":",
",",
"col",
"]",
">",
"1",
"-",
"truncation",
"Xrank",
"[",
"lower_truncate",
",",
"col",
"]",
"=",
"truncation",
"Xrank",
"[",
"upper_truncate",
",",
"col",
"]",
"=",
"1",
"-",
"truncation",
"return",
"Xrank"
] | Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328. | [
"Transform",
"each",
"column",
"into",
"ranked",
"data",
".",
"Tied",
"ranks",
"are",
"averaged",
".",
"Ranks",
"can",
"optionally",
"be",
"winsorized",
"as",
"described",
"in",
"Liu",
"2009",
"otherwise",
"this",
"returns",
"Tsukahara",
"s",
"scaled",
"rank",
"based",
"Z",
"-",
"estimator",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/rank_correlation.py#L9-L66 | train |
skggm/skggm | inverse_covariance/rank_correlation.py | spearman_correlation | def spearman_correlation(X, rowvar=False):
"""
Computes the spearman correlation estimate.
This is effectively a bias corrected pearson correlation
between rank transformed columns of X.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Xue, Lingzhou; Zou, Hui.
"Regularized rank-based estimation of high-dimensional
nonparanormal graphical models."
Ann. Statist. 40 (2012), no. 5, 2541--2571. doi:10.1214/12-AOS1041.
Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry.
"High-dimensional semiparametric Gaussian copula graphical models."
Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037
"""
Xrank = _compute_ranks(X)
rank_correlation = np.corrcoef(Xrank, rowvar=rowvar)
return 2 * np.sin(rank_correlation * np.pi / 6) | python | def spearman_correlation(X, rowvar=False):
"""
Computes the spearman correlation estimate.
This is effectively a bias corrected pearson correlation
between rank transformed columns of X.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Xue, Lingzhou; Zou, Hui.
"Regularized rank-based estimation of high-dimensional
nonparanormal graphical models."
Ann. Statist. 40 (2012), no. 5, 2541--2571. doi:10.1214/12-AOS1041.
Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry.
"High-dimensional semiparametric Gaussian copula graphical models."
Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037
"""
Xrank = _compute_ranks(X)
rank_correlation = np.corrcoef(Xrank, rowvar=rowvar)
return 2 * np.sin(rank_correlation * np.pi / 6) | [
"def",
"spearman_correlation",
"(",
"X",
",",
"rowvar",
"=",
"False",
")",
":",
"Xrank",
"=",
"_compute_ranks",
"(",
"X",
")",
"rank_correlation",
"=",
"np",
".",
"corrcoef",
"(",
"Xrank",
",",
"rowvar",
"=",
"rowvar",
")",
"return",
"2",
"*",
"np",
".",
"sin",
"(",
"rank_correlation",
"*",
"np",
".",
"pi",
"/",
"6",
")"
] | Computes the spearman correlation estimate.
This is effectively a bias corrected pearson correlation
between rank transformed columns of X.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Xue, Lingzhou; Zou, Hui.
"Regularized rank-based estimation of high-dimensional
nonparanormal graphical models."
Ann. Statist. 40 (2012), no. 5, 2541--2571. doi:10.1214/12-AOS1041.
Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry.
"High-dimensional semiparametric Gaussian copula graphical models."
Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037 | [
"Computes",
"the",
"spearman",
"correlation",
"estimate",
".",
"This",
"is",
"effectively",
"a",
"bias",
"corrected",
"pearson",
"correlation",
"between",
"rank",
"transformed",
"columns",
"of",
"X",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/rank_correlation.py#L69-L101 | train |
skggm/skggm | inverse_covariance/rank_correlation.py | kendalltau_correlation | def kendalltau_correlation(X, rowvar=False, weighted=False):
"""
Computes kendall's tau correlation estimate.
The option to use scipy.stats.weightedtau is not recommended
as the implementation does not appear to handle ties correctly.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry.
"High-dimensional semiparametric Gaussian copula graphical models."
Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037
Barber, Rina Foygel; Kolar, Mladen.
"ROCKET: Robust Confidence Intervals via Kendall's Tau
for Transelliptical Graphical Models."
arXiv:1502.07641
"""
if rowvar:
X = X.T
_, n_features = X.shape
rank_correlation = np.eye(n_features)
for row in np.arange(n_features):
for col in np.arange(1 + row, n_features):
if weighted:
rank_correlation[row, col], _ = weightedtau(
X[:, row], X[:, col], rank=False
)
else:
rank_correlation[row, col], _ = kendalltau(X[:, row], X[:, col])
rank_correlation = np.triu(rank_correlation, 1) + rank_correlation.T
return np.sin(rank_correlation * np.pi / 2) | python | def kendalltau_correlation(X, rowvar=False, weighted=False):
"""
Computes kendall's tau correlation estimate.
The option to use scipy.stats.weightedtau is not recommended
as the implementation does not appear to handle ties correctly.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry.
"High-dimensional semiparametric Gaussian copula graphical models."
Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037
Barber, Rina Foygel; Kolar, Mladen.
"ROCKET: Robust Confidence Intervals via Kendall's Tau
for Transelliptical Graphical Models."
arXiv:1502.07641
"""
if rowvar:
X = X.T
_, n_features = X.shape
rank_correlation = np.eye(n_features)
for row in np.arange(n_features):
for col in np.arange(1 + row, n_features):
if weighted:
rank_correlation[row, col], _ = weightedtau(
X[:, row], X[:, col], rank=False
)
else:
rank_correlation[row, col], _ = kendalltau(X[:, row], X[:, col])
rank_correlation = np.triu(rank_correlation, 1) + rank_correlation.T
return np.sin(rank_correlation * np.pi / 2) | [
"def",
"kendalltau_correlation",
"(",
"X",
",",
"rowvar",
"=",
"False",
",",
"weighted",
"=",
"False",
")",
":",
"if",
"rowvar",
":",
"X",
"=",
"X",
".",
"T",
"_",
",",
"n_features",
"=",
"X",
".",
"shape",
"rank_correlation",
"=",
"np",
".",
"eye",
"(",
"n_features",
")",
"for",
"row",
"in",
"np",
".",
"arange",
"(",
"n_features",
")",
":",
"for",
"col",
"in",
"np",
".",
"arange",
"(",
"1",
"+",
"row",
",",
"n_features",
")",
":",
"if",
"weighted",
":",
"rank_correlation",
"[",
"row",
",",
"col",
"]",
",",
"_",
"=",
"weightedtau",
"(",
"X",
"[",
":",
",",
"row",
"]",
",",
"X",
"[",
":",
",",
"col",
"]",
",",
"rank",
"=",
"False",
")",
"else",
":",
"rank_correlation",
"[",
"row",
",",
"col",
"]",
",",
"_",
"=",
"kendalltau",
"(",
"X",
"[",
":",
",",
"row",
"]",
",",
"X",
"[",
":",
",",
"col",
"]",
")",
"rank_correlation",
"=",
"np",
".",
"triu",
"(",
"rank_correlation",
",",
"1",
")",
"+",
"rank_correlation",
".",
"T",
"return",
"np",
".",
"sin",
"(",
"rank_correlation",
"*",
"np",
".",
"pi",
"/",
"2",
")"
] | Computes kendall's tau correlation estimate.
The option to use scipy.stats.weightedtau is not recommended
as the implementation does not appear to handle ties correctly.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Data matrix using which we compute the empirical
correlation
Returns
-------
rank_correlation
References
----------
Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry.
"High-dimensional semiparametric Gaussian copula graphical models."
Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037
Barber, Rina Foygel; Kolar, Mladen.
"ROCKET: Robust Confidence Intervals via Kendall's Tau
for Transelliptical Graphical Models."
arXiv:1502.07641 | [
"Computes",
"kendall",
"s",
"tau",
"correlation",
"estimate",
".",
"The",
"option",
"to",
"use",
"scipy",
".",
"stats",
".",
"weightedtau",
"is",
"not",
"recommended",
"as",
"the",
"implementation",
"does",
"not",
"appear",
"to",
"handle",
"ties",
"correctly",
"."
] | a0ed406586c4364ea3297a658f415e13b5cbdaf8 | https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/rank_correlation.py#L104-L148 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Endpoint.version | def version(self):
"""
This attribute retrieve the API version.
>>> Works().version
'1.0.0'
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message-version'] | python | def version(self):
"""
This attribute retrieve the API version.
>>> Works().version
'1.0.0'
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return result['message-version'] | [
"def",
"version",
"(",
"self",
")",
":",
"request_params",
"=",
"dict",
"(",
"self",
".",
"request_params",
")",
"request_url",
"=",
"str",
"(",
"self",
".",
"request_url",
")",
"result",
"=",
"self",
".",
"do_http_request",
"(",
"'get'",
",",
"request_url",
",",
"data",
"=",
"request_params",
",",
"custom_header",
"=",
"str",
"(",
"self",
".",
"etiquette",
")",
")",
".",
"json",
"(",
")",
"return",
"result",
"[",
"'message-version'",
"]"
] | This attribute retrieve the API version.
>>> Works().version
'1.0.0' | [
"This",
"attribute",
"retrieve",
"the",
"API",
"version",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L157-L174 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Endpoint.count | def count(self):
"""
This method retrieve the total of records resulting from a given query.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').count()
3597
>>> Works().query('zika').filter(prefix='10.1590').count()
61
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count()
14
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count()
1
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
request_params['rows'] = 0
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return int(result['message']['total-results']) | python | def count(self):
"""
This method retrieve the total of records resulting from a given query.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').count()
3597
>>> Works().query('zika').filter(prefix='10.1590').count()
61
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count()
14
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count()
1
"""
request_params = dict(self.request_params)
request_url = str(self.request_url)
request_params['rows'] = 0
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
).json()
return int(result['message']['total-results']) | [
"def",
"count",
"(",
"self",
")",
":",
"request_params",
"=",
"dict",
"(",
"self",
".",
"request_params",
")",
"request_url",
"=",
"str",
"(",
"self",
".",
"request_url",
")",
"request_params",
"[",
"'rows'",
"]",
"=",
"0",
"result",
"=",
"self",
".",
"do_http_request",
"(",
"'get'",
",",
"request_url",
",",
"data",
"=",
"request_params",
",",
"custom_header",
"=",
"str",
"(",
"self",
".",
"etiquette",
")",
")",
".",
"json",
"(",
")",
"return",
"int",
"(",
"result",
"[",
"'message'",
"]",
"[",
"'total-results'",
"]",
")"
] | This method retrieve the total of records resulting from a given query.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').count()
3597
>>> Works().query('zika').filter(prefix='10.1590').count()
61
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count()
14
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count()
1 | [
"This",
"method",
"retrieve",
"the",
"total",
"of",
"records",
"resulting",
"from",
"a",
"given",
"query",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L186-L215 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Endpoint.url | def url(self):
"""
This attribute retrieve the url that will be used as a HTTP request to
the Crossref API.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').url
'https://api.crossref.org/works?query=zika'
>>> Works().query('zika').filter(prefix='10.1590').url
'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url
'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url
'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli'
"""
request_params = self._escaped_pagging()
sorted_request_params = sorted([(k, v) for k, v in request_params.items()])
req = requests.Request(
'get', self.request_url, params=sorted_request_params).prepare()
return req.url | python | def url(self):
"""
This attribute retrieve the url that will be used as a HTTP request to
the Crossref API.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').url
'https://api.crossref.org/works?query=zika'
>>> Works().query('zika').filter(prefix='10.1590').url
'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url
'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url
'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli'
"""
request_params = self._escaped_pagging()
sorted_request_params = sorted([(k, v) for k, v in request_params.items()])
req = requests.Request(
'get', self.request_url, params=sorted_request_params).prepare()
return req.url | [
"def",
"url",
"(",
"self",
")",
":",
"request_params",
"=",
"self",
".",
"_escaped_pagging",
"(",
")",
"sorted_request_params",
"=",
"sorted",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"request_params",
".",
"items",
"(",
")",
"]",
")",
"req",
"=",
"requests",
".",
"Request",
"(",
"'get'",
",",
"self",
".",
"request_url",
",",
"params",
"=",
"sorted_request_params",
")",
".",
"prepare",
"(",
")",
"return",
"req",
".",
"url"
] | This attribute retrieve the url that will be used as a HTTP request to
the Crossref API.
This attribute can be used compounded with query, filter,
sort, order and facet methods.
Examples:
>>> from crossref.restful import Works
>>> Works().query('zika').url
'https://api.crossref.org/works?query=zika'
>>> Works().query('zika').filter(prefix='10.1590').url
'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url
'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590'
>>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url
'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli' | [
"This",
"attribute",
"retrieve",
"the",
"url",
"that",
"will",
"be",
"used",
"as",
"a",
"HTTP",
"request",
"to",
"the",
"Crossref",
"API",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L218-L243 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Works.doi | def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result | python | def doi(self, doi, only_message=True):
"""
This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return
result = result.json()
return result['message'] if only_message is True else result | [
"def",
"doi",
"(",
"self",
",",
"doi",
",",
"only_message",
"=",
"True",
")",
":",
"request_url",
"=",
"build_url_endpoint",
"(",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"ENDPOINT",
",",
"doi",
"]",
")",
")",
"request_params",
"=",
"{",
"}",
"result",
"=",
"self",
".",
"do_http_request",
"(",
"'get'",
",",
"request_url",
",",
"data",
"=",
"request_params",
",",
"custom_header",
"=",
"str",
"(",
"self",
".",
"etiquette",
")",
")",
"if",
"result",
".",
"status_code",
"==",
"404",
":",
"return",
"result",
"=",
"result",
".",
"json",
"(",
")",
"return",
"result",
"[",
"'message'",
"]",
"if",
"only_message",
"is",
"True",
"else",
"result"
] | This method retrieve the DOI metadata related to a given DOI
number.
args: Crossref DOI id (String)
return: JSON
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi('10.1590/S0004-28032013005000001')
{'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001',
'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref',
'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [],
'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z',
'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article',
'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001',
'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z',
'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []},
'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z',
'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}],
'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'],
'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530',
'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]},
'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'],
'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'],
'abstract': '<jats:p>Significant abstract data..... .</jats:p>',
'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [
{'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk',
'given': 'Ismael'}, {'affiliation': [
{'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}],
'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [
{'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'},
{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}],
'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [
{'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho',
'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} | [
"This",
"method",
"retrieve",
"the",
"DOI",
"metadata",
"related",
"to",
"a",
"given",
"DOI",
"number",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L901-L959 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Works.doi_exists | def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True | python | def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True | [
"def",
"doi_exists",
"(",
"self",
",",
"doi",
")",
":",
"request_url",
"=",
"build_url_endpoint",
"(",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"ENDPOINT",
",",
"doi",
"]",
")",
")",
"request_params",
"=",
"{",
"}",
"result",
"=",
"self",
".",
"do_http_request",
"(",
"'get'",
",",
"request_url",
",",
"data",
"=",
"request_params",
",",
"only_headers",
"=",
"True",
",",
"custom_header",
"=",
"str",
"(",
"self",
".",
"etiquette",
")",
")",
"if",
"result",
".",
"status_code",
"==",
"404",
":",
"return",
"False",
"return",
"True"
] | This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False | [
"This",
"method",
"retrieve",
"a",
"boolean",
"according",
"to",
"the",
"existence",
"of",
"a",
"crossref",
"DOI",
"number",
".",
"It",
"returns",
"False",
"if",
"the",
"API",
"results",
"a",
"404",
"status",
"code",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L995-L1032 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Funders.works | def works(self, funder_id):
"""
This method retrieve a iterable of Works of the given funder.
args: Crossref allowed document Types (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(funder_id))
return Works(context=context) | python | def works(self, funder_id):
"""
This method retrieve a iterable of Works of the given funder.
args: Crossref allowed document Types (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(funder_id))
return Works(context=context) | [
"def",
"works",
"(",
"self",
",",
"funder_id",
")",
":",
"context",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"ENDPOINT",
",",
"str",
"(",
"funder_id",
")",
")",
"return",
"Works",
"(",
"context",
"=",
"context",
")"
] | This method retrieve a iterable of Works of the given funder.
args: Crossref allowed document Types (String)
return: Works() | [
"This",
"method",
"retrieve",
"a",
"iterable",
"of",
"Works",
"of",
"the",
"given",
"funder",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1199-L1208 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Members.works | def works(self, member_id):
"""
This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(member_id))
return Works(context=context) | python | def works(self, member_id):
"""
This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(member_id))
return Works(context=context) | [
"def",
"works",
"(",
"self",
",",
"member_id",
")",
":",
"context",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"ENDPOINT",
",",
"str",
"(",
"member_id",
")",
")",
"return",
"Works",
"(",
"context",
"=",
"context",
")"
] | This method retrieve a iterable of Works of the given member.
args: Member ID (Integer)
return: Works() | [
"This",
"method",
"retrieve",
"a",
"iterable",
"of",
"Works",
"of",
"the",
"given",
"member",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1418-L1427 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Types.all | def all(self):
"""
This method retrieve an iterator with all the available types.
return: iterator of crossref document types
Example:
>>> from crossref.restful import Types
>>> types = Types()
>>> [i for i in types.all()]
[{'label': 'Book Section', 'id': 'book-section'},
{'label': 'Monograph', 'id': 'monograph'},
{'label': 'Report', 'id': 'report'},
{'label': 'Book Track', 'id': 'book-track'},
{'label': 'Journal Article', 'id': 'journal-article'},
{'label': 'Part', 'id': 'book-part'},
...
}]
"""
request_url = build_url_endpoint(self.ENDPOINT, self.context)
request_params = dict(self.request_params)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item | python | def all(self):
"""
This method retrieve an iterator with all the available types.
return: iterator of crossref document types
Example:
>>> from crossref.restful import Types
>>> types = Types()
>>> [i for i in types.all()]
[{'label': 'Book Section', 'id': 'book-section'},
{'label': 'Monograph', 'id': 'monograph'},
{'label': 'Report', 'id': 'report'},
{'label': 'Book Track', 'id': 'book-track'},
{'label': 'Journal Article', 'id': 'journal-article'},
{'label': 'Part', 'id': 'book-part'},
...
}]
"""
request_url = build_url_endpoint(self.ENDPOINT, self.context)
request_params = dict(self.request_params)
result = self.do_http_request(
'get',
request_url,
data=request_params,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
raise StopIteration()
result = result.json()
for item in result['message']['items']:
yield item | [
"def",
"all",
"(",
"self",
")",
":",
"request_url",
"=",
"build_url_endpoint",
"(",
"self",
".",
"ENDPOINT",
",",
"self",
".",
"context",
")",
"request_params",
"=",
"dict",
"(",
"self",
".",
"request_params",
")",
"result",
"=",
"self",
".",
"do_http_request",
"(",
"'get'",
",",
"request_url",
",",
"data",
"=",
"request_params",
",",
"custom_header",
"=",
"str",
"(",
"self",
".",
"etiquette",
")",
")",
"if",
"result",
".",
"status_code",
"==",
"404",
":",
"raise",
"StopIteration",
"(",
")",
"result",
"=",
"result",
".",
"json",
"(",
")",
"for",
"item",
"in",
"result",
"[",
"'message'",
"]",
"[",
"'items'",
"]",
":",
"yield",
"item"
] | This method retrieve an iterator with all the available types.
return: iterator of crossref document types
Example:
>>> from crossref.restful import Types
>>> types = Types()
>>> [i for i in types.all()]
[{'label': 'Book Section', 'id': 'book-section'},
{'label': 'Monograph', 'id': 'monograph'},
{'label': 'Report', 'id': 'report'},
{'label': 'Book Track', 'id': 'book-track'},
{'label': 'Journal Article', 'id': 'journal-article'},
{'label': 'Part', 'id': 'book-part'},
...
}] | [
"This",
"method",
"retrieve",
"an",
"iterator",
"with",
"all",
"the",
"available",
"types",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1466-L1501 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Types.works | def works(self, type_id):
"""
This method retrieve a iterable of Works of the given type.
args: Crossref allowed document Types (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(type_id))
return Works(context=context) | python | def works(self, type_id):
"""
This method retrieve a iterable of Works of the given type.
args: Crossref allowed document Types (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(type_id))
return Works(context=context) | [
"def",
"works",
"(",
"self",
",",
"type_id",
")",
":",
"context",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"ENDPOINT",
",",
"str",
"(",
"type_id",
")",
")",
"return",
"Works",
"(",
"context",
"=",
"context",
")"
] | This method retrieve a iterable of Works of the given type.
args: Crossref allowed document Types (String)
return: Works() | [
"This",
"method",
"retrieve",
"a",
"iterable",
"of",
"Works",
"of",
"the",
"given",
"type",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1542-L1551 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Prefixes.works | def works(self, prefix_id):
"""
This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(prefix_id))
return Works(context=context) | python | def works(self, prefix_id):
"""
This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(prefix_id))
return Works(context=context) | [
"def",
"works",
"(",
"self",
",",
"prefix_id",
")",
":",
"context",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"ENDPOINT",
",",
"str",
"(",
"prefix_id",
")",
")",
"return",
"Works",
"(",
"context",
"=",
"context",
")"
] | This method retrieve a iterable of Works of the given prefix.
args: Crossref Prefix (String)
return: Works() | [
"This",
"method",
"retrieve",
"a",
"iterable",
"of",
"Works",
"of",
"the",
"given",
"prefix",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1594-L1603 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Journals.works | def works(self, issn):
"""
This method retrieve a iterable of Works of the given journal.
args: Journal ISSN (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(issn))
return Works(context=context) | python | def works(self, issn):
"""
This method retrieve a iterable of Works of the given journal.
args: Journal ISSN (String)
return: Works()
"""
context = '%s/%s' % (self.ENDPOINT, str(issn))
return Works(context=context) | [
"def",
"works",
"(",
"self",
",",
"issn",
")",
":",
"context",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"ENDPOINT",
",",
"str",
"(",
"issn",
")",
")",
"return",
"Works",
"(",
"context",
"=",
"context",
")"
] | This method retrieve a iterable of Works of the given journal.
args: Journal ISSN (String)
return: Works() | [
"This",
"method",
"retrieve",
"a",
"iterable",
"of",
"Works",
"of",
"the",
"given",
"journal",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1718-L1728 | train |
fabiobatalha/crossrefapi | crossref/restful.py | Depositor.register_doi | def register_doi(self, submission_id, request_xml):
"""
This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema.
"""
endpoint = self.get_endpoint('deposit')
files = {
'mdFile': ('%s.xml' % submission_id, request_xml)
}
params = {
'operation': 'doMDUpload',
'login_id': self.api_user,
'login_passwd': self.api_key
}
result = self.do_http_request(
'post',
endpoint,
data=params,
files=files,
timeout=10,
custom_header=str(self.etiquette)
)
return result | python | def register_doi(self, submission_id, request_xml):
"""
This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema.
"""
endpoint = self.get_endpoint('deposit')
files = {
'mdFile': ('%s.xml' % submission_id, request_xml)
}
params = {
'operation': 'doMDUpload',
'login_id': self.api_user,
'login_passwd': self.api_key
}
result = self.do_http_request(
'post',
endpoint,
data=params,
files=files,
timeout=10,
custom_header=str(self.etiquette)
)
return result | [
"def",
"register_doi",
"(",
"self",
",",
"submission_id",
",",
"request_xml",
")",
":",
"endpoint",
"=",
"self",
".",
"get_endpoint",
"(",
"'deposit'",
")",
"files",
"=",
"{",
"'mdFile'",
":",
"(",
"'%s.xml'",
"%",
"submission_id",
",",
"request_xml",
")",
"}",
"params",
"=",
"{",
"'operation'",
":",
"'doMDUpload'",
",",
"'login_id'",
":",
"self",
".",
"api_user",
",",
"'login_passwd'",
":",
"self",
".",
"api_key",
"}",
"result",
"=",
"self",
".",
"do_http_request",
"(",
"'post'",
",",
"endpoint",
",",
"data",
"=",
"params",
",",
"files",
"=",
"files",
",",
"timeout",
"=",
"10",
",",
"custom_header",
"=",
"str",
"(",
"self",
".",
"etiquette",
")",
")",
"return",
"result"
] | This method registry a new DOI number in Crossref or update some DOI
metadata.
submission_id: Will be used as the submission file name. The file name
could be used in future requests to retrieve the submission status.
request_xml: The XML with the document metadata. It must be under
compliance with the Crossref Submission Schema. | [
"This",
"method",
"registry",
"a",
"new",
"DOI",
"number",
"in",
"Crossref",
"or",
"update",
"some",
"DOI",
"metadata",
"."
] | 53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7 | https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1746-L1779 | train |
buildinspace/peru | peru/plugin.py | _find_plugin_dir | def _find_plugin_dir(module_type):
'''Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name.'''
for install_dir in _get_plugin_install_dirs():
candidate = os.path.join(install_dir, module_type)
if os.path.isdir(candidate):
return candidate
else:
raise PluginCandidateError(
'No plugin found for `{}` module in paths:\n{}'.format(
module_type, '\n'.join(_get_plugin_install_dirs()))) | python | def _find_plugin_dir(module_type):
'''Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name.'''
for install_dir in _get_plugin_install_dirs():
candidate = os.path.join(install_dir, module_type)
if os.path.isdir(candidate):
return candidate
else:
raise PluginCandidateError(
'No plugin found for `{}` module in paths:\n{}'.format(
module_type, '\n'.join(_get_plugin_install_dirs()))) | [
"def",
"_find_plugin_dir",
"(",
"module_type",
")",
":",
"for",
"install_dir",
"in",
"_get_plugin_install_dirs",
"(",
")",
":",
"candidate",
"=",
"os",
".",
"path",
".",
"join",
"(",
"install_dir",
",",
"module_type",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"candidate",
")",
":",
"return",
"candidate",
"else",
":",
"raise",
"PluginCandidateError",
"(",
"'No plugin found for `{}` module in paths:\\n{}'",
".",
"format",
"(",
"module_type",
",",
"'\\n'",
".",
"join",
"(",
"_get_plugin_install_dirs",
"(",
")",
")",
")",
")"
] | Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name. | [
"Find",
"the",
"directory",
"containing",
"the",
"plugin",
"definition",
"for",
"the",
"given",
"type",
".",
"Do",
"this",
"by",
"searching",
"all",
"the",
"paths",
"where",
"plugins",
"can",
"live",
"for",
"a",
"dir",
"that",
"matches",
"the",
"type",
"name",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/plugin.py#L264-L276 | train |
buildinspace/peru | peru/main.py | merged_args_dicts | def merged_args_dicts(global_args, subcommand_args):
'''We deal with docopt args from the toplevel peru parse and the subcommand
parse. We don't want False values for a flag in the subcommand to override
True values if that flag was given at the top level. This function
specifically handles that case.'''
merged = global_args.copy()
for key, val in subcommand_args.items():
if key not in merged:
merged[key] = val
elif type(merged[key]) is type(val) is bool:
merged[key] = merged[key] or val
else:
raise RuntimeError("Unmergable args.")
return merged | python | def merged_args_dicts(global_args, subcommand_args):
'''We deal with docopt args from the toplevel peru parse and the subcommand
parse. We don't want False values for a flag in the subcommand to override
True values if that flag was given at the top level. This function
specifically handles that case.'''
merged = global_args.copy()
for key, val in subcommand_args.items():
if key not in merged:
merged[key] = val
elif type(merged[key]) is type(val) is bool:
merged[key] = merged[key] or val
else:
raise RuntimeError("Unmergable args.")
return merged | [
"def",
"merged_args_dicts",
"(",
"global_args",
",",
"subcommand_args",
")",
":",
"merged",
"=",
"global_args",
".",
"copy",
"(",
")",
"for",
"key",
",",
"val",
"in",
"subcommand_args",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"merged",
":",
"merged",
"[",
"key",
"]",
"=",
"val",
"elif",
"type",
"(",
"merged",
"[",
"key",
"]",
")",
"is",
"type",
"(",
"val",
")",
"is",
"bool",
":",
"merged",
"[",
"key",
"]",
"=",
"merged",
"[",
"key",
"]",
"or",
"val",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unmergable args.\"",
")",
"return",
"merged"
] | We deal with docopt args from the toplevel peru parse and the subcommand
parse. We don't want False values for a flag in the subcommand to override
True values if that flag was given at the top level. This function
specifically handles that case. | [
"We",
"deal",
"with",
"docopt",
"args",
"from",
"the",
"toplevel",
"peru",
"parse",
"and",
"the",
"subcommand",
"parse",
".",
"We",
"don",
"t",
"want",
"False",
"values",
"for",
"a",
"flag",
"in",
"the",
"subcommand",
"to",
"override",
"True",
"values",
"if",
"that",
"flag",
"was",
"given",
"at",
"the",
"top",
"level",
".",
"This",
"function",
"specifically",
"handles",
"that",
"case",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/main.py#L299-L312 | train |
buildinspace/peru | peru/main.py | force_utf8_in_ascii_mode_hack | def force_utf8_in_ascii_mode_hack():
'''In systems without a UTF8 locale configured, Python will default to
ASCII mode for stdout and stderr. This causes our fancy display to fail
with encoding errors. In particular, you run into this if you try to run
peru inside of Docker. This is a hack to force emitting UTF8 in that case.
Hopefully it doesn't break anything important.'''
if sys.stdout.encoding == 'ANSI_X3.4-1968':
sys.stdout = open(
sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
sys.stderr = open(
sys.stderr.fileno(), mode='w', encoding='utf8', buffering=1) | python | def force_utf8_in_ascii_mode_hack():
'''In systems without a UTF8 locale configured, Python will default to
ASCII mode for stdout and stderr. This causes our fancy display to fail
with encoding errors. In particular, you run into this if you try to run
peru inside of Docker. This is a hack to force emitting UTF8 in that case.
Hopefully it doesn't break anything important.'''
if sys.stdout.encoding == 'ANSI_X3.4-1968':
sys.stdout = open(
sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
sys.stderr = open(
sys.stderr.fileno(), mode='w', encoding='utf8', buffering=1) | [
"def",
"force_utf8_in_ascii_mode_hack",
"(",
")",
":",
"if",
"sys",
".",
"stdout",
".",
"encoding",
"==",
"'ANSI_X3.4-1968'",
":",
"sys",
".",
"stdout",
"=",
"open",
"(",
"sys",
".",
"stdout",
".",
"fileno",
"(",
")",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"'utf8'",
",",
"buffering",
"=",
"1",
")",
"sys",
".",
"stderr",
"=",
"open",
"(",
"sys",
".",
"stderr",
".",
"fileno",
"(",
")",
",",
"mode",
"=",
"'w'",
",",
"encoding",
"=",
"'utf8'",
",",
"buffering",
"=",
"1",
")"
] | In systems without a UTF8 locale configured, Python will default to
ASCII mode for stdout and stderr. This causes our fancy display to fail
with encoding errors. In particular, you run into this if you try to run
peru inside of Docker. This is a hack to force emitting UTF8 in that case.
Hopefully it doesn't break anything important. | [
"In",
"systems",
"without",
"a",
"UTF8",
"locale",
"configured",
"Python",
"will",
"default",
"to",
"ASCII",
"mode",
"for",
"stdout",
"and",
"stderr",
".",
"This",
"causes",
"our",
"fancy",
"display",
"to",
"fail",
"with",
"encoding",
"errors",
".",
"In",
"particular",
"you",
"run",
"into",
"this",
"if",
"you",
"try",
"to",
"run",
"peru",
"inside",
"of",
"Docker",
".",
"This",
"is",
"a",
"hack",
"to",
"force",
"emitting",
"UTF8",
"in",
"that",
"case",
".",
"Hopefully",
"it",
"doesn",
"t",
"break",
"anything",
"important",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/main.py#L334-L344 | train |
buildinspace/peru | peru/scope.py | Scope.parse_target | async def parse_target(self, runtime, target_str):
'''A target is a pipeline of a module into zero or more rules, and each
module and rule can itself be scoped with zero or more module names.'''
pipeline_parts = target_str.split(RULE_SEPARATOR)
module = await self.resolve_module(runtime, pipeline_parts[0],
target_str)
rules = []
for part in pipeline_parts[1:]:
rule = await self.resolve_rule(runtime, part)
rules.append(rule)
return module, tuple(rules) | python | async def parse_target(self, runtime, target_str):
'''A target is a pipeline of a module into zero or more rules, and each
module and rule can itself be scoped with zero or more module names.'''
pipeline_parts = target_str.split(RULE_SEPARATOR)
module = await self.resolve_module(runtime, pipeline_parts[0],
target_str)
rules = []
for part in pipeline_parts[1:]:
rule = await self.resolve_rule(runtime, part)
rules.append(rule)
return module, tuple(rules) | [
"async",
"def",
"parse_target",
"(",
"self",
",",
"runtime",
",",
"target_str",
")",
":",
"pipeline_parts",
"=",
"target_str",
".",
"split",
"(",
"RULE_SEPARATOR",
")",
"module",
"=",
"await",
"self",
".",
"resolve_module",
"(",
"runtime",
",",
"pipeline_parts",
"[",
"0",
"]",
",",
"target_str",
")",
"rules",
"=",
"[",
"]",
"for",
"part",
"in",
"pipeline_parts",
"[",
"1",
":",
"]",
":",
"rule",
"=",
"await",
"self",
".",
"resolve_rule",
"(",
"runtime",
",",
"part",
")",
"rules",
".",
"append",
"(",
"rule",
")",
"return",
"module",
",",
"tuple",
"(",
"rules",
")"
] | A target is a pipeline of a module into zero or more rules, and each
module and rule can itself be scoped with zero or more module names. | [
"A",
"target",
"is",
"a",
"pipeline",
"of",
"a",
"module",
"into",
"zero",
"or",
"more",
"rules",
"and",
"each",
"module",
"and",
"rule",
"can",
"itself",
"be",
"scoped",
"with",
"zero",
"or",
"more",
"module",
"names",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/scope.py#L17-L27 | train |
buildinspace/peru | peru/edit_yaml.py | _maybe_quote | def _maybe_quote(val):
'''All of our values should be strings. Usually those can be passed in as
bare words, but if they're parseable as an int or float we need to quote
them.'''
assert isinstance(val, str), 'We should never set non-string values.'
needs_quoting = False
try:
int(val)
needs_quoting = True
except Exception:
pass
try:
float(val)
needs_quoting = True
except Exception:
pass
if needs_quoting:
return '"{}"'.format(val)
else:
return val | python | def _maybe_quote(val):
'''All of our values should be strings. Usually those can be passed in as
bare words, but if they're parseable as an int or float we need to quote
them.'''
assert isinstance(val, str), 'We should never set non-string values.'
needs_quoting = False
try:
int(val)
needs_quoting = True
except Exception:
pass
try:
float(val)
needs_quoting = True
except Exception:
pass
if needs_quoting:
return '"{}"'.format(val)
else:
return val | [
"def",
"_maybe_quote",
"(",
"val",
")",
":",
"assert",
"isinstance",
"(",
"val",
",",
"str",
")",
",",
"'We should never set non-string values.'",
"needs_quoting",
"=",
"False",
"try",
":",
"int",
"(",
"val",
")",
"needs_quoting",
"=",
"True",
"except",
"Exception",
":",
"pass",
"try",
":",
"float",
"(",
"val",
")",
"needs_quoting",
"=",
"True",
"except",
"Exception",
":",
"pass",
"if",
"needs_quoting",
":",
"return",
"'\"{}\"'",
".",
"format",
"(",
"val",
")",
"else",
":",
"return",
"val"
] | All of our values should be strings. Usually those can be passed in as
bare words, but if they're parseable as an int or float we need to quote
them. | [
"All",
"of",
"our",
"values",
"should",
"be",
"strings",
".",
"Usually",
"those",
"can",
"be",
"passed",
"in",
"as",
"bare",
"words",
"but",
"if",
"they",
"re",
"parseable",
"as",
"an",
"int",
"or",
"float",
"we",
"need",
"to",
"quote",
"them",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/edit_yaml.py#L26-L45 | train |
buildinspace/peru | peru/async_helpers.py | gather_coalescing_exceptions | async def gather_coalescing_exceptions(coros, display, *, verbose):
'''The tricky thing about running multiple coroutines in parallel is what
we're supposed to do when one of them raises an exception. The approach
we're using here is to catch exceptions and keep waiting for other tasks to
finish. At the end, we reraise a GatheredExceptions error, if any
exceptions were caught.
Another minor detail: We also want to make sure to start coroutines in the
order given, so that they end up appearing to the user alphabetically in
the fancy display. Note that asyncio.gather() puts coroutines in a set
internally, so we schedule coroutines *before* we give them to gather().
'''
exceptions = []
reprs = []
async def catching_wrapper(coro):
try:
return (await coro)
except Exception as e:
exceptions.append(e)
if isinstance(e, PrintableError) and not verbose:
reprs.append(e.message)
else:
reprs.append(traceback.format_exc())
return None
# Suppress a deprecation warning in Python 3.5, while continuing to support
# 3.3 and early 3.4 releases.
if hasattr(asyncio, 'ensure_future'):
schedule = getattr(asyncio, 'ensure_future')
else:
schedule = getattr(asyncio, 'async')
futures = [schedule(catching_wrapper(coro)) for coro in coros]
results = await asyncio.gather(*futures)
if exceptions:
raise GatheredExceptions(exceptions, reprs)
else:
return results | python | async def gather_coalescing_exceptions(coros, display, *, verbose):
'''The tricky thing about running multiple coroutines in parallel is what
we're supposed to do when one of them raises an exception. The approach
we're using here is to catch exceptions and keep waiting for other tasks to
finish. At the end, we reraise a GatheredExceptions error, if any
exceptions were caught.
Another minor detail: We also want to make sure to start coroutines in the
order given, so that they end up appearing to the user alphabetically in
the fancy display. Note that asyncio.gather() puts coroutines in a set
internally, so we schedule coroutines *before* we give them to gather().
'''
exceptions = []
reprs = []
async def catching_wrapper(coro):
try:
return (await coro)
except Exception as e:
exceptions.append(e)
if isinstance(e, PrintableError) and not verbose:
reprs.append(e.message)
else:
reprs.append(traceback.format_exc())
return None
# Suppress a deprecation warning in Python 3.5, while continuing to support
# 3.3 and early 3.4 releases.
if hasattr(asyncio, 'ensure_future'):
schedule = getattr(asyncio, 'ensure_future')
else:
schedule = getattr(asyncio, 'async')
futures = [schedule(catching_wrapper(coro)) for coro in coros]
results = await asyncio.gather(*futures)
if exceptions:
raise GatheredExceptions(exceptions, reprs)
else:
return results | [
"async",
"def",
"gather_coalescing_exceptions",
"(",
"coros",
",",
"display",
",",
"*",
",",
"verbose",
")",
":",
"exceptions",
"=",
"[",
"]",
"reprs",
"=",
"[",
"]",
"async",
"def",
"catching_wrapper",
"(",
"coro",
")",
":",
"try",
":",
"return",
"(",
"await",
"coro",
")",
"except",
"Exception",
"as",
"e",
":",
"exceptions",
".",
"append",
"(",
"e",
")",
"if",
"isinstance",
"(",
"e",
",",
"PrintableError",
")",
"and",
"not",
"verbose",
":",
"reprs",
".",
"append",
"(",
"e",
".",
"message",
")",
"else",
":",
"reprs",
".",
"append",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"None",
"# Suppress a deprecation warning in Python 3.5, while continuing to support",
"# 3.3 and early 3.4 releases.",
"if",
"hasattr",
"(",
"asyncio",
",",
"'ensure_future'",
")",
":",
"schedule",
"=",
"getattr",
"(",
"asyncio",
",",
"'ensure_future'",
")",
"else",
":",
"schedule",
"=",
"getattr",
"(",
"asyncio",
",",
"'async'",
")",
"futures",
"=",
"[",
"schedule",
"(",
"catching_wrapper",
"(",
"coro",
")",
")",
"for",
"coro",
"in",
"coros",
"]",
"results",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"futures",
")",
"if",
"exceptions",
":",
"raise",
"GatheredExceptions",
"(",
"exceptions",
",",
"reprs",
")",
"else",
":",
"return",
"results"
] | The tricky thing about running multiple coroutines in parallel is what
we're supposed to do when one of them raises an exception. The approach
we're using here is to catch exceptions and keep waiting for other tasks to
finish. At the end, we reraise a GatheredExceptions error, if any
exceptions were caught.
Another minor detail: We also want to make sure to start coroutines in the
order given, so that they end up appearing to the user alphabetically in
the fancy display. Note that asyncio.gather() puts coroutines in a set
internally, so we schedule coroutines *before* we give them to gather(). | [
"The",
"tricky",
"thing",
"about",
"running",
"multiple",
"coroutines",
"in",
"parallel",
"is",
"what",
"we",
"re",
"supposed",
"to",
"do",
"when",
"one",
"of",
"them",
"raises",
"an",
"exception",
".",
"The",
"approach",
"we",
"re",
"using",
"here",
"is",
"to",
"catch",
"exceptions",
"and",
"keep",
"waiting",
"for",
"other",
"tasks",
"to",
"finish",
".",
"At",
"the",
"end",
"we",
"reraise",
"a",
"GatheredExceptions",
"error",
"if",
"any",
"exceptions",
"were",
"caught",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_helpers.py#L53-L94 | train |
buildinspace/peru | peru/async_helpers.py | create_subprocess_with_handle | async def create_subprocess_with_handle(command,
display_handle,
*,
shell=False,
cwd,
**kwargs):
'''Writes subprocess output to a display handle as it comes in, and also
returns a copy of it as a string. Throws if the subprocess returns an
error. Note that cwd is a required keyword-only argument, on theory that
peru should never start child processes "wherever I happen to be running
right now."'''
# We're going to get chunks of bytes from the subprocess, and it's possible
# that one of those chunks ends in the middle of a unicode character. An
# incremental decoder keeps those dangling bytes around until the next
# chunk arrives, so that split characters get decoded properly. Use
# stdout's encoding, but provide a default for the case where stdout has
# been redirected to a StringIO. (This happens in tests.)
encoding = sys.stdout.encoding or 'utf8'
decoder_factory = codecs.getincrementaldecoder(encoding)
decoder = decoder_factory(errors='replace')
output_copy = io.StringIO()
# Display handles are context managers. Entering and exiting the display
# handle lets the display know when the job starts and stops.
with display_handle:
stdin = asyncio.subprocess.DEVNULL
stdout = asyncio.subprocess.PIPE
stderr = asyncio.subprocess.STDOUT
if shell:
proc = await asyncio.create_subprocess_shell(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
**kwargs)
else:
proc = await asyncio.create_subprocess_exec(
*command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
**kwargs)
# Read all the output from the subprocess as its comes in.
while True:
outputbytes = await proc.stdout.read(4096)
if not outputbytes:
break
outputstr = decoder.decode(outputbytes)
outputstr_unified = _unify_newlines(outputstr)
display_handle.write(outputstr_unified)
output_copy.write(outputstr_unified)
returncode = await proc.wait()
if returncode != 0:
raise subprocess.CalledProcessError(returncode, command,
output_copy.getvalue())
if hasattr(decoder, 'buffer'):
# The utf8 decoder has this attribute, but some others don't.
assert not decoder.buffer, 'decoder nonempty: ' + repr(decoder.buffer)
return output_copy.getvalue() | python | async def create_subprocess_with_handle(command,
display_handle,
*,
shell=False,
cwd,
**kwargs):
'''Writes subprocess output to a display handle as it comes in, and also
returns a copy of it as a string. Throws if the subprocess returns an
error. Note that cwd is a required keyword-only argument, on theory that
peru should never start child processes "wherever I happen to be running
right now."'''
# We're going to get chunks of bytes from the subprocess, and it's possible
# that one of those chunks ends in the middle of a unicode character. An
# incremental decoder keeps those dangling bytes around until the next
# chunk arrives, so that split characters get decoded properly. Use
# stdout's encoding, but provide a default for the case where stdout has
# been redirected to a StringIO. (This happens in tests.)
encoding = sys.stdout.encoding or 'utf8'
decoder_factory = codecs.getincrementaldecoder(encoding)
decoder = decoder_factory(errors='replace')
output_copy = io.StringIO()
# Display handles are context managers. Entering and exiting the display
# handle lets the display know when the job starts and stops.
with display_handle:
stdin = asyncio.subprocess.DEVNULL
stdout = asyncio.subprocess.PIPE
stderr = asyncio.subprocess.STDOUT
if shell:
proc = await asyncio.create_subprocess_shell(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
**kwargs)
else:
proc = await asyncio.create_subprocess_exec(
*command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
**kwargs)
# Read all the output from the subprocess as its comes in.
while True:
outputbytes = await proc.stdout.read(4096)
if not outputbytes:
break
outputstr = decoder.decode(outputbytes)
outputstr_unified = _unify_newlines(outputstr)
display_handle.write(outputstr_unified)
output_copy.write(outputstr_unified)
returncode = await proc.wait()
if returncode != 0:
raise subprocess.CalledProcessError(returncode, command,
output_copy.getvalue())
if hasattr(decoder, 'buffer'):
# The utf8 decoder has this attribute, but some others don't.
assert not decoder.buffer, 'decoder nonempty: ' + repr(decoder.buffer)
return output_copy.getvalue() | [
"async",
"def",
"create_subprocess_with_handle",
"(",
"command",
",",
"display_handle",
",",
"*",
",",
"shell",
"=",
"False",
",",
"cwd",
",",
"*",
"*",
"kwargs",
")",
":",
"# We're going to get chunks of bytes from the subprocess, and it's possible",
"# that one of those chunks ends in the middle of a unicode character. An",
"# incremental decoder keeps those dangling bytes around until the next",
"# chunk arrives, so that split characters get decoded properly. Use",
"# stdout's encoding, but provide a default for the case where stdout has",
"# been redirected to a StringIO. (This happens in tests.)",
"encoding",
"=",
"sys",
".",
"stdout",
".",
"encoding",
"or",
"'utf8'",
"decoder_factory",
"=",
"codecs",
".",
"getincrementaldecoder",
"(",
"encoding",
")",
"decoder",
"=",
"decoder_factory",
"(",
"errors",
"=",
"'replace'",
")",
"output_copy",
"=",
"io",
".",
"StringIO",
"(",
")",
"# Display handles are context managers. Entering and exiting the display",
"# handle lets the display know when the job starts and stops.",
"with",
"display_handle",
":",
"stdin",
"=",
"asyncio",
".",
"subprocess",
".",
"DEVNULL",
"stdout",
"=",
"asyncio",
".",
"subprocess",
".",
"PIPE",
"stderr",
"=",
"asyncio",
".",
"subprocess",
".",
"STDOUT",
"if",
"shell",
":",
"proc",
"=",
"await",
"asyncio",
".",
"create_subprocess_shell",
"(",
"command",
",",
"stdin",
"=",
"stdin",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"cwd",
"=",
"cwd",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"proc",
"=",
"await",
"asyncio",
".",
"create_subprocess_exec",
"(",
"*",
"command",
",",
"stdin",
"=",
"stdin",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"cwd",
"=",
"cwd",
",",
"*",
"*",
"kwargs",
")",
"# Read all the output from the subprocess as its comes in.",
"while",
"True",
":",
"outputbytes",
"=",
"await",
"proc",
".",
"stdout",
".",
"read",
"(",
"4096",
")",
"if",
"not",
"outputbytes",
":",
"break",
"outputstr",
"=",
"decoder",
".",
"decode",
"(",
"outputbytes",
")",
"outputstr_unified",
"=",
"_unify_newlines",
"(",
"outputstr",
")",
"display_handle",
".",
"write",
"(",
"outputstr_unified",
")",
"output_copy",
".",
"write",
"(",
"outputstr_unified",
")",
"returncode",
"=",
"await",
"proc",
".",
"wait",
"(",
")",
"if",
"returncode",
"!=",
"0",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"returncode",
",",
"command",
",",
"output_copy",
".",
"getvalue",
"(",
")",
")",
"if",
"hasattr",
"(",
"decoder",
",",
"'buffer'",
")",
":",
"# The utf8 decoder has this attribute, but some others don't.",
"assert",
"not",
"decoder",
".",
"buffer",
",",
"'decoder nonempty: '",
"+",
"repr",
"(",
"decoder",
".",
"buffer",
")",
"return",
"output_copy",
".",
"getvalue",
"(",
")"
] | Writes subprocess output to a display handle as it comes in, and also
returns a copy of it as a string. Throws if the subprocess returns an
error. Note that cwd is a required keyword-only argument, on theory that
peru should never start child processes "wherever I happen to be running
right now." | [
"Writes",
"subprocess",
"output",
"to",
"a",
"display",
"handle",
"as",
"it",
"comes",
"in",
"and",
"also",
"returns",
"a",
"copy",
"of",
"it",
"as",
"a",
"string",
".",
"Throws",
"if",
"the",
"subprocess",
"returns",
"an",
"error",
".",
"Note",
"that",
"cwd",
"is",
"a",
"required",
"keyword",
"-",
"only",
"argument",
"on",
"theory",
"that",
"peru",
"should",
"never",
"start",
"child",
"processes",
"wherever",
"I",
"happen",
"to",
"be",
"running",
"right",
"now",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_helpers.py#L97-L164 | train |
buildinspace/peru | peru/async_helpers.py | raises_gathered | def raises_gathered(error_type):
'''For use in tests. Many tests expect a single error to be thrown, and
want it to be of a specific type. This is a helper method for when that
type is inside a gathered exception.'''
container = RaisesGatheredContainer()
try:
yield container
except GatheredExceptions as e:
# Make sure there is exactly one exception.
if len(e.exceptions) != 1:
raise
inner = e.exceptions[0]
# Make sure the exception is the right type.
if not isinstance(inner, error_type):
raise
# Success.
container.exception = inner | python | def raises_gathered(error_type):
'''For use in tests. Many tests expect a single error to be thrown, and
want it to be of a specific type. This is a helper method for when that
type is inside a gathered exception.'''
container = RaisesGatheredContainer()
try:
yield container
except GatheredExceptions as e:
# Make sure there is exactly one exception.
if len(e.exceptions) != 1:
raise
inner = e.exceptions[0]
# Make sure the exception is the right type.
if not isinstance(inner, error_type):
raise
# Success.
container.exception = inner | [
"def",
"raises_gathered",
"(",
"error_type",
")",
":",
"container",
"=",
"RaisesGatheredContainer",
"(",
")",
"try",
":",
"yield",
"container",
"except",
"GatheredExceptions",
"as",
"e",
":",
"# Make sure there is exactly one exception.",
"if",
"len",
"(",
"e",
".",
"exceptions",
")",
"!=",
"1",
":",
"raise",
"inner",
"=",
"e",
".",
"exceptions",
"[",
"0",
"]",
"# Make sure the exception is the right type.",
"if",
"not",
"isinstance",
"(",
"inner",
",",
"error_type",
")",
":",
"raise",
"# Success.",
"container",
".",
"exception",
"=",
"inner"
] | For use in tests. Many tests expect a single error to be thrown, and
want it to be of a specific type. This is a helper method for when that
type is inside a gathered exception. | [
"For",
"use",
"in",
"tests",
".",
"Many",
"tests",
"expect",
"a",
"single",
"error",
"to",
"be",
"thrown",
"and",
"want",
"it",
"to",
"be",
"of",
"a",
"specific",
"type",
".",
"This",
"is",
"a",
"helper",
"method",
"for",
"when",
"that",
"type",
"is",
"inside",
"a",
"gathered",
"exception",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_helpers.py#L201-L217 | train |
buildinspace/peru | peru/resources/plugins/curl/curl_plugin.py | get_request_filename | def get_request_filename(request):
'''Figure out the filename for an HTTP download.'''
# Check to see if a filename is specified in the HTTP headers.
if 'Content-Disposition' in request.info():
disposition = request.info()['Content-Disposition']
pieces = re.split(r'\s*;\s*', disposition)
for piece in pieces:
if piece.startswith('filename='):
filename = piece[len('filename='):]
# Strip exactly one " from each end.
if filename.startswith('"'):
filename = filename[1:]
if filename.endswith('"'):
filename = filename[:-1]
# Interpret backslashed quotes.
filename = filename.replace('\\"', '"')
return filename
# If no filename was specified, pick a reasonable default.
return os.path.basename(urlsplit(request.url).path) or 'index.html' | python | def get_request_filename(request):
'''Figure out the filename for an HTTP download.'''
# Check to see if a filename is specified in the HTTP headers.
if 'Content-Disposition' in request.info():
disposition = request.info()['Content-Disposition']
pieces = re.split(r'\s*;\s*', disposition)
for piece in pieces:
if piece.startswith('filename='):
filename = piece[len('filename='):]
# Strip exactly one " from each end.
if filename.startswith('"'):
filename = filename[1:]
if filename.endswith('"'):
filename = filename[:-1]
# Interpret backslashed quotes.
filename = filename.replace('\\"', '"')
return filename
# If no filename was specified, pick a reasonable default.
return os.path.basename(urlsplit(request.url).path) or 'index.html' | [
"def",
"get_request_filename",
"(",
"request",
")",
":",
"# Check to see if a filename is specified in the HTTP headers.",
"if",
"'Content-Disposition'",
"in",
"request",
".",
"info",
"(",
")",
":",
"disposition",
"=",
"request",
".",
"info",
"(",
")",
"[",
"'Content-Disposition'",
"]",
"pieces",
"=",
"re",
".",
"split",
"(",
"r'\\s*;\\s*'",
",",
"disposition",
")",
"for",
"piece",
"in",
"pieces",
":",
"if",
"piece",
".",
"startswith",
"(",
"'filename='",
")",
":",
"filename",
"=",
"piece",
"[",
"len",
"(",
"'filename='",
")",
":",
"]",
"# Strip exactly one \" from each end.",
"if",
"filename",
".",
"startswith",
"(",
"'\"'",
")",
":",
"filename",
"=",
"filename",
"[",
"1",
":",
"]",
"if",
"filename",
".",
"endswith",
"(",
"'\"'",
")",
":",
"filename",
"=",
"filename",
"[",
":",
"-",
"1",
"]",
"# Interpret backslashed quotes.",
"filename",
"=",
"filename",
".",
"replace",
"(",
"'\\\\\"'",
",",
"'\"'",
")",
"return",
"filename",
"# If no filename was specified, pick a reasonable default.",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"urlsplit",
"(",
"request",
".",
"url",
")",
".",
"path",
")",
"or",
"'index.html'"
] | Figure out the filename for an HTTP download. | [
"Figure",
"out",
"the",
"filename",
"for",
"an",
"HTTP",
"download",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/resources/plugins/curl/curl_plugin.py#L16-L34 | train |
buildinspace/peru | peru/parser.py | _extract_optional_list_field | def _extract_optional_list_field(blob, name):
'''Handle optional fields that can be either a string or a list of
strings.'''
value = _optional_list(typesafe_pop(blob, name, []))
if value is None:
raise ParserError(
'"{}" field must be a string or a list.'.format(name))
return value | python | def _extract_optional_list_field(blob, name):
'''Handle optional fields that can be either a string or a list of
strings.'''
value = _optional_list(typesafe_pop(blob, name, []))
if value is None:
raise ParserError(
'"{}" field must be a string or a list.'.format(name))
return value | [
"def",
"_extract_optional_list_field",
"(",
"blob",
",",
"name",
")",
":",
"value",
"=",
"_optional_list",
"(",
"typesafe_pop",
"(",
"blob",
",",
"name",
",",
"[",
"]",
")",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"ParserError",
"(",
"'\"{}\" field must be a string or a list.'",
".",
"format",
"(",
"name",
")",
")",
"return",
"value"
] | Handle optional fields that can be either a string or a list of
strings. | [
"Handle",
"optional",
"fields",
"that",
"can",
"be",
"either",
"a",
"string",
"or",
"a",
"list",
"of",
"strings",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/parser.py#L135-L142 | train |
buildinspace/peru | peru/async_exit_stack.py | AsyncExitStack.pop_all | def pop_all(self):
"""Preserve the context stack by transferring it to a new instance."""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack | python | def pop_all(self):
"""Preserve the context stack by transferring it to a new instance."""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack | [
"def",
"pop_all",
"(",
"self",
")",
":",
"new_stack",
"=",
"type",
"(",
"self",
")",
"(",
")",
"new_stack",
".",
"_exit_callbacks",
"=",
"self",
".",
"_exit_callbacks",
"self",
".",
"_exit_callbacks",
"=",
"deque",
"(",
")",
"return",
"new_stack"
] | Preserve the context stack by transferring it to a new instance. | [
"Preserve",
"the",
"context",
"stack",
"by",
"transferring",
"it",
"to",
"a",
"new",
"instance",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_exit_stack.py#L55-L60 | train |
buildinspace/peru | peru/async_exit_stack.py | AsyncExitStack.callback | def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper)
return callback | python | def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper)
return callback | [
"def",
"callback",
"(",
"self",
",",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"_exit_wrapper",
"=",
"self",
".",
"_create_cb_wrapper",
"(",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"# We changed the signature, so using @wraps is not appropriate, but",
"# setting __wrapped__ may still help with introspection.",
"_exit_wrapper",
".",
"__wrapped__",
"=",
"callback",
"self",
".",
"_push_exit_callback",
"(",
"_exit_wrapper",
")",
"return",
"callback"
] | Registers an arbitrary callback and arguments.
Cannot suppress exceptions. | [
"Registers",
"an",
"arbitrary",
"callback",
"and",
"arguments",
".",
"Cannot",
"suppress",
"exceptions",
"."
] | 76e4012c6c34e85fb53a4c6d85f4ac3633d93f77 | https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_exit_stack.py#L94-L104 | train |