Dataset Viewer
hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d99a20277c32bb1e28312f42ab6d732f38323169
| 241 |
py
|
Python
|
quick_search/admin.py
|
naman1901/django-quick-search
|
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
|
[
"MIT"
] | null | null | null |
quick_search/admin.py
|
naman1901/django-quick-search
|
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
|
[
"MIT"
] | 2 |
2020-02-11T23:28:22.000Z
|
2020-06-05T19:27:40.000Z
|
quick_search/admin.py
|
HereWithoutPermission/django-quick-search
|
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import SearchResult
# Register your models here.
class SearchResultAdmin(admin.ModelAdmin):
fields = ["query", "heading", "url", "text"]
admin.site.register(SearchResult, SearchResultAdmin)
| 30.125 | 52 | 0.771784 | 27 | 241 | 6.888889 | 0.703704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116183 | 241 | 8 | 52 | 30.125 | 0.873239 | 0.107884 | 0 | 0 | 0 | 0 | 0.088785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 3 |
d9a88e74a4ac032ae6e8218d9ec1ed42e6092d32
| 375 |
py
|
Python
|
app/views/web/homestack.py
|
geudrik/hautomation
|
0baae29e85cd68658a0f8578de2e36e42945053f
|
[
"MIT"
] | null | null | null |
app/views/web/homestack.py
|
geudrik/hautomation
|
0baae29e85cd68658a0f8578de2e36e42945053f
|
[
"MIT"
] | null | null | null |
app/views/web/homestack.py
|
geudrik/hautomation
|
0baae29e85cd68658a0f8578de2e36e42945053f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python2.7
# -*- coding: latin-1 -*-
from flask import Blueprint
from flask import current_app
from flask import render_template
from flask_login import login_required
homestack = Blueprint("homestack", __name__, url_prefix="/homestack")
@homestack.route("/", methods=["GET"])
@login_required
def home():
return render_template("homestack/home.html")
| 22.058824 | 69 | 0.749333 | 49 | 375 | 5.510204 | 0.591837 | 0.133333 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009091 | 0.12 | 375 | 16 | 70 | 23.4375 | 0.809091 | 0.128 | 0 | 0 | 0 | 0 | 0.129231 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.444444 | 0.111111 | 0.666667 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 |
0
| 3 |
d9b55a7ee025f94a0ef3f125fa9c30f974dd7d6e
| 211 |
py
|
Python
|
abc/abc165/abc165e.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1 |
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc165/abc165e.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc165/abc165e.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
N, M = map(int, input().split())
for i in range(1, M + 1):
if i % 2 == 1:
j = (i - 1) // 2
print(1 + j, M + 1 - j)
else:
j = (i - 2) // 2
print(M + 2 + j, 2 * M + 1 - j)
| 21.1 | 39 | 0.336493 | 40 | 211 | 1.775 | 0.4 | 0.112676 | 0.084507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 0.445498 | 211 | 9 | 40 | 23.444444 | 0.495727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 3 |
d9b8d42e905cba910e6a30f7d6f38e82d05ab46c
| 2,110 |
py
|
Python
|
graphdb/transformer.py
|
muggat0n/graphdb
|
56dfd5ef8a3321abc6a919faee47494bbe059080
|
[
"MIT"
] | 2 |
2020-08-28T13:42:38.000Z
|
2020-09-05T03:13:45.000Z
|
graphdb/transformer.py
|
muggat0n/graphdb
|
56dfd5ef8a3321abc6a919faee47494bbe059080
|
[
"MIT"
] | null | null | null |
graphdb/transformer.py
|
muggat0n/graphdb
|
56dfd5ef8a3321abc6a919faee47494bbe059080
|
[
"MIT"
] | null | null | null |
"""
A query transformer is a function that accepts a program and returns a program, plus a priority level.
Higher priority transformers are placed closer to the front of the list. We’re ensuring is a function,
because we’re going to evaluate it later 31 .
We’ll assume there won’t be an enormous number of transformer additions,
and walk the list linearly to add a new one.
We’ll leave a note in case this assumption turns out to be false —
a binary search is much more time-optimal for long lists,
but adds a little complexity and doesn’t really speed up short lists.
"""
class Transformer:
def __init__(self):
self.T = []
def transform(self, program):
return program
"""
Dagoba.T = [] # transformers (more than meets the eye)
"""
"""
Dagoba.addTransformer = function(fun, priority) {
if(typeof fun != 'function')
return Dagoba.error('Invalid transformer function')
for(var i = 0; i < Dagoba.T.length; i++) # OPT: binary search
if(priority > Dagoba.T[i].priority) break
Dagoba.T.splice(i, 0, {priority: priority, fun: fun})
}
"""
"""
Dagoba.transform = function(program) {
return Dagoba.T.reduce(function(acc, transformer) {
return transformer.fun(acc)
}, program)
}
"""
"""
Dagoba.addAlias = function(newname, oldname, defaults) {
defaults = defaults || [] # default arguments for the alias
Dagoba.addPipetype(newname, function() {}) # because there's no method catchall in js
Dagoba.addTransformer(function(program) {
return program.map(function(step) {
if(step[0] != newname) return step
return [oldname, Dagoba.extend(step[1], defaults)]
})
}, 100) # these need to run early, so they get a high priority
}
"""
"""
Dagoba.extend = function(list, defaults) {
return Object.keys(defaults).reduce(function(acc, key) {
if(typeof list[key] != 'undefined') return acc
acc[key] = defaults[key]
return acc
}, list)
}
"""
| 30.57971 | 120 | 0.627962 | 273 | 2,110 | 4.842491 | 0.47619 | 0.026475 | 0.016641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005799 | 0.264455 | 2,110 | 68 | 121 | 31.029412 | 0.845361 | 0.267299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0 | 0 | 0.2 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 |
0
| 3 |
d9b9563b7aae9c46b0fbd98073d96eeedfaec4aa
| 91 |
py
|
Python
|
Courses/1 month/2 week/day 6/Formula.py
|
emir-naiz/first_git_lesson
|
1fecf712290f6da3ef03deff518870d91638eb69
|
[
"MIT"
] | null | null | null |
Courses/1 month/2 week/day 6/Formula.py
|
emir-naiz/first_git_lesson
|
1fecf712290f6da3ef03deff518870d91638eb69
|
[
"MIT"
] | null | null | null |
Courses/1 month/2 week/day 6/Formula.py
|
emir-naiz/first_git_lesson
|
1fecf712290f6da3ef03deff518870d91638eb69
|
[
"MIT"
] | null | null | null |
summary = 0
i = 0
while i < 5:
summary = summary + i
print(summary)
i = i + 1
| 11.375 | 25 | 0.516484 | 15 | 91 | 3.133333 | 0.466667 | 0.340426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070175 | 0.373626 | 91 | 7 | 26 | 13 | 0.754386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 3 |
d9b9af3bd25b0d2f9357446b0ff43e3ab614b141
| 243 |
py
|
Python
|
tests/image_saver/image_saver_7.py
|
Vicken-Ghoubiguian/Imtreat
|
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
|
[
"MIT"
] | null | null | null |
tests/image_saver/image_saver_7.py
|
Vicken-Ghoubiguian/Imtreat
|
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
|
[
"MIT"
] | null | null | null |
tests/image_saver/image_saver_7.py
|
Vicken-Ghoubiguian/Imtreat
|
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
|
[
"MIT"
] | null | null | null |
import imtreat
img = imtreat.imageManagerClass.openImageFunction("../images/soleil.png", 0)
img = imtreat.definedModesClass.detailEnhanceFunction(img)
imtreat.imageManagerClass.saveImageFunction("/Téléchargements/", "image_1", ".png", img)
| 30.375 | 88 | 0.794239 | 23 | 243 | 8.347826 | 0.652174 | 0.15625 | 0.28125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008811 | 0.065844 | 243 | 7 | 89 | 34.714286 | 0.837004 | 0 | 0 | 0 | 0 | 0 | 0.197531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 3 |
d9c389b63a2c9720abef56190237f31a2306da19
| 1,972 |
py
|
Python
|
src/biotite/copyable.py
|
danijoo/biotite
|
22072e64676e4e917236eac8493eed4c6a22cc33
|
[
"BSD-3-Clause"
] | 208 |
2018-04-20T15:59:42.000Z
|
2022-03-22T07:47:12.000Z
|
src/biotite/copyable.py
|
danielmuthama/biotite
|
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
|
[
"BSD-3-Clause"
] | 121 |
2017-11-15T14:52:07.000Z
|
2022-03-30T16:31:41.000Z
|
src/biotite/copyable.py
|
danielmuthama/biotite
|
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
|
[
"BSD-3-Clause"
] | 49 |
2018-07-19T09:06:24.000Z
|
2022-03-23T17:21:34.000Z
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite"
__author__ = "Patrick Kunzmann"
__all__ = ["Copyable"]
import abc
class Copyable(metaclass=abc.ABCMeta):
"""
Base class for all objects, that should be copyable.
The public method `copy()` first creates a fresh instance of the
class of the instance, that is copied via the `__copy_create__()`
method. All variables, that could not be set via the constructor,
are then copied via `__copy_fill__()`, starting with the method in
the uppermost base class and ending with the class of the instance
to be copied.
This approach solves the problem of encapsulated variables in
superclasses.
"""
def copy(self):
"""
Create a deep copy of this object.
Returns
-------
copy
A copy of this object.
"""
clone = self.__copy_create__()
self.__copy_fill__(clone)
return clone
def __copy_create__(self):
"""
Instantiate a new object of this class.
Only the constructor should be called in this method.
All further attributes, that need to be copied are handled
in `__copy_fill__()`
Do not call the `super()` method here.
This method must be overridden, if the constructor takes
parameters.
Returns
-------
copy
A freshly instantiated copy of *self*.
"""
return type(self)()
def __copy_fill__(self, clone):
"""
Copy all necessary attributes to the new object.
Always call the `super()` method as first statement.
Parameters
----------
clone
The freshly instantiated copy of *self*.
"""
pass
| 27.774648 | 70 | 0.59432 | 235 | 1,972 | 4.787234 | 0.421277 | 0.017778 | 0.017778 | 0.023111 | 0.088889 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00076 | 0.333164 | 1,972 | 71 | 71 | 27.774648 | 0.854753 | 0.626775 | 0 | 0 | 0 | 0 | 0.077114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.230769 | false | 0.076923 | 0.076923 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 |
0
| 3 |
d9f1f15178cb9e26d9b4f91695b333a07eaa59d6
| 74,778 |
py
|
Python
|
sqlova/model/nl2sql/wikisql_models.py
|
guotong1988/Rule-SQL
|
e826c0d659c8b35a72b64aa2b50d4d943fdd70f1
|
[
"Apache-2.0"
] | 15 |
2019-07-25T12:13:31.000Z
|
2020-10-17T13:42:58.000Z
|
sqlova/model/nl2sql/wikisql_models.py
|
guotong1988/Rule-SQL
|
e826c0d659c8b35a72b64aa2b50d4d943fdd70f1
|
[
"Apache-2.0"
] | 1 |
2020-01-07T05:49:15.000Z
|
2020-04-22T01:22:00.000Z
|
sqlova/model/nl2sql/wikisql_models.py
|
guotong1988/Rule-SQL
|
e826c0d659c8b35a72b64aa2b50d4d943fdd70f1
|
[
"Apache-2.0"
] | 3 |
2019-10-01T09:14:35.000Z
|
2020-07-18T08:39:48.000Z
|
# Copyright 2019-present NAVER Corp.
# Apache License v2.0
# Wonseok Hwang
import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
class Seq2SQL_v1(nn.Module):
def __init__(self, input_size, hidden_size, num_layer, dropout,
number_cond_ops, number_agg_ops, old=False):
super(Seq2SQL_v1, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.max_where_number = 4
self.number_cond_ops = number_cond_ops
self.number_agg_ops = number_agg_ops
self.select_column_predict = SelectColumnPredict(input_size, hidden_size, num_layer, dropout)
self.select_agg_predict = SelectAggPredict(input_size, hidden_size, num_layer, dropout, number_agg_ops, old=old)
self.where_number_predict = WhereNumberPredict(input_size, hidden_size, num_layer, dropout)
self.wcp = WhereColumnPredict(input_size, hidden_size, num_layer, dropout)
self.wop = WhereOpPredict(input_size, hidden_size, num_layer, dropout, number_cond_ops)
self.wvp = WhereValuePredict_startend(input_size, hidden_size, num_layer, dropout, number_cond_ops, old=old) # start-end-search-discriminative model
# emb_question, [16,26,1536]
# len_question, [16]
# emb_header, [102,12,1536]
# len_header_token, [102]
# number_header, [16]
def forward(self, emb_question, len_question, emb_header, len_header_token, number_header,
g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
# sc
s_sc,s_sc_softmax = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=show_p_sc)
if g_sc:
pr_sc = g_sc
else:
pr_sc = pred_sc(s_sc)
# sa
s_sa,s_sa_softmax = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, number_header, pr_sc, show_p_sa=show_p_sa)
if g_sa:
# it's not necessary though.
pr_sa = g_sa
else:
pr_sa = pred_sa(s_sa)
# wn
s_wn,s_wn_softmax = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wn=show_p_wn)
if g_wn:
pr_wn = g_wn
else:
pr_wn = pred_wn(s_wn)
# wc
s_wc,s_wc_softmax = self.wcp(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wc=show_p_wc, penalty=True)
if g_wc:
pr_wc = g_wc
else:
pr_wc = pred_wherecolumn(pr_wn, s_wc)
# wo
s_wo,s_wo_softmax = self.wop(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, show_p_wo=show_p_wo)
if g_wo:
pr_wo = g_wo
else:
pr_wo = pred_wo(pr_wn, s_wo)
# wv
s_wv,s_wv_softmax = self.wvp(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, wo=pr_wo, show_p_wv=show_p_wv)
return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, s_sc_softmax, s_sa_softmax, s_wn_softmax, s_wc_softmax, s_wo_softmax, s_wv_softmax
def beam_forward(self, emb_question, len_question, emb_header, len_header_token, l_header, engine, tb,
nlu_t, nlu_wp_t, wp_to_wh_index, nlu,
beam_size=4,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
"""
Execution-guided beam decoding.
"""
# sc
s_sc,_ = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_sc=show_p_sc)
prob_sc = F.softmax(s_sc, dim=-1)
bS, mcL = s_sc.shape
# minimum_header_length = min(l_header)
# beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size
# sa
# Construct all possible sc_sa_score
prob_sc_sa = torch.zeros([bS, beam_size, self.number_agg_ops]).to(device)
prob_sca = torch.zeros_like(prob_sc_sa).to(device)
# get the top-k indices. pr_sc_beam = [B, beam_size]
pr_sc_beam = pred_sc_beam(s_sc, beam_size)
# calculate and predict s_sa.
for i_beam in range(beam_size):
pr_sc = list( array(pr_sc_beam)[:,i_beam] )
s_sa,_ = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=show_p_sa)
prob_sa = F.softmax(s_sa, dim=-1)
prob_sc_sa[:, i_beam, :] = prob_sa
prob_sc_selected = prob_sc[range(bS), pr_sc] # [B]
prob_sca[:,i_beam,:] = (prob_sa.t() * prob_sc_selected).t()
# [mcL, B] * [B] -> [mcL, B] (element-wise multiplication)
# [mcL, B] -> [B, mcL]
# Calculate the dimension of tensor
# tot_dim = len(prob_sca.shape)
# First flatten to 1-d
idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True)
# Now as sc_idx is already sorted, re-map them properly.
idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx]
idxs_arr = array(idxs)
# [B, beam_size, remainig dim]
# idxs[b][0] gives first probable [sc_idx, sa_idx] pairs.
# idxs[b][1] gives of second.
# Calculate prob_sca, a joint probability
beam_idx_sca = [0] * bS
beam_meet_the_final = [False] * bS
while True:
pr_sc = idxs_arr[range(bS),beam_idx_sca,0]
pr_sa = idxs_arr[range(bS),beam_idx_sca,1]
# map index properly
check = check_sc_sa_pairs(tb, pr_sc, pr_sa)
if sum(check) == bS:
break
else:
for b, check1 in enumerate(check):
if not check1: # wrong pair
beam_idx_sca[b] += 1
if beam_idx_sca[b] >= beam_size:
beam_meet_the_final[b] = True
beam_idx_sca[b] -= 1
else:
beam_meet_the_final[b] = True
if sum(beam_meet_the_final) == bS:
break
# Now pr_sc, pr_sa are properly predicted.
pr_sc_best = list(pr_sc)
pr_sa_best = list(pr_sa)
# Now, Where-clause beam search.
s_wn,_ = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=show_p_wn)
prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy()
# Found "executable" most likely 4(=max_num_of_conditions) where-clauses.
# wc
s_wc,_ = self.wcp(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wc=show_p_wc, penalty=True)
prob_wc = F.sigmoid(s_wc).detach().to('cpu').numpy()
# pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc)
# get max_wn # of most probable columns & their prob.
pr_wn_max = [self.max_where_number] * bS
pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column
prob_wc_max = zeros([bS, self.max_where_number])
for b, pr_wc_max1 in enumerate(pr_wc_max):
prob_wc_max[b,:] = prob_wc[b,pr_wc_max1]
# get most probable max_wn where-clouses
# wo
s_wo_max,_ = self.wop(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, show_p_wo=show_p_wo)
prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy()
# [B, max_wn, n_cond_op]
pr_wvi_beam_op_list = []
prob_wvi_beam_op_list = []
for i_op in range(self.number_cond_ops - 1):
pr_wo_temp = [[i_op] * self.max_where_number] * bS
# wv
s_wv,_ = self.wvp(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, wo=pr_wo_temp, show_p_wv=show_p_wv)
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
# prob_wv
pr_wvi_beam, prob_wvi_beam = pred_wvi_se_beam(self.max_where_number, s_wv, beam_size)
pr_wvi_beam_op_list.append(pr_wvi_beam)
prob_wvi_beam_op_list.append(prob_wvi_beam)
# pr_wvi_beam = [B, max_wn, k_logit**2 [st, ed] paris]
# pred_wv_beam
# Calculate joint probability of where-clause
# prob_w = [batch, wc, wo, wv] = [B, max_wn, n_cond_op, n_pairs]
n_wv_beam_pairs = prob_wvi_beam.shape[2]
prob_w = zeros([bS, self.max_where_number, self.number_cond_ops - 1, n_wv_beam_pairs])
for b in range(bS):
for i_wn in range(self.max_where_number):
for i_op in range(self.number_cond_ops - 1): # do not use final one
for i_wv_beam in range(n_wv_beam_pairs):
# i_wc = pr_wc_max[b][i_wn] # already done
p_wc = prob_wc_max[b, i_wn]
p_wo = prob_wo_max[b, i_wn, i_op]
p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam]
prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv
# Perform execution guided decoding
conds_max = []
prob_conds_max = []
# while len(conds_max) < self.max_wn:
idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True)
# idxs = [B, i_wc_beam, i_op, i_wv_pairs]
# Construct conds1
for b, idxs1 in enumerate(idxs):
conds_max1 = []
prob_conds_max1 = []
for i_wn, idxs11 in enumerate(idxs1):
i_wc = pr_wc_max[b][idxs11[0]]
i_op = idxs11[1]
wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]]
# get wv_str
temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [wp_to_wh_index[b]], [nlu[b]])
merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b])
conds11 = [i_wc, i_op, merged_wv11]
prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2] ]
# test execution
# print(nlu[b])
# print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11])
pr_ans = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], [conds11])
if bool(pr_ans):
# pr_ans is not empty!
conds_max1.append(conds11)
prob_conds_max1.append(prob_conds11)
conds_max.append(conds_max1)
prob_conds_max.append(prob_conds_max1)
# May need to do more exhuastive search?
# i.e. up to.. getting all executable cases.
# Calculate total probability to decide the number of where-clauses
pr_sql_i = []
prob_wn_w = []
pr_wn_based_on_prob = []
for b, prob_wn1 in enumerate(prob_wn):
max_executable_wn1 = len( conds_max[b] )
prob_wn_w1 = []
prob_wn_w1.append(prob_wn1[0]) # wn=0 case.
for i_wn in range(max_executable_wn1):
prob_wn_w11 = prob_wn1[i_wn+1] * prob_conds_max[b][i_wn]
prob_wn_w1.append(prob_wn_w11)
pr_wn_based_on_prob.append(argmax(prob_wn_w1))
prob_wn_w.append(prob_wn_w1)
pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]}
pr_sql_i.append(pr_sql_i1)
# s_wv = [B, max_wn, max_nlu_tokens, 2]
return prob_sca, prob_w, prob_wn_w, pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_sql_i
class SelectColumnPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3):
super(SelectColumnPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.sc_out = nn.Sequential(nn.Tanh(), nn.Linear(2 * hidden_size, 1))
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
# emb_question, [16,26,1536]
# len_question, [16]
# emb_header, [102,12,1536]
# len_header_token, [102]
# number_header, [16]
def forward(self, emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=False):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, number_header) # [b, header, dim]
bS = len(number_header)
mL_n = max(len_question)
# [bS, max_len_header, 100] * [bS, 100, mL_n] -> [bS, max_len_header, mL_n]
att_h = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2))
# Penalty on blank parts
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att_h[b, :, l_n1:] = -10000000000
p_n = self.softmax_dim2(att_h)
if show_p_sc:
# p = [b, header, n]
if p_n.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001, figsize=(12,3.5))
# subplot(6,2,7)
subplot2grid((7,2), (3, 0), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_h in range(number_header[0]):
color_idx = i_h % len(_color)
plot(p_n[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('sc: p_n for each h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# p_n [ bS, max_len_header, mL_n] -> [ bS, max_len_header, mL_n, 1]
# wenc_n [ bS, mL_n, 100] -> [ bS, 1, mL_n, 100]
# -> [bS, max_len_header, mL_n, 100] -> [bS, max_len_header, 100]
c_n = torch.mul(p_n.unsqueeze(3), encoded_question.unsqueeze(1)).sum(dim=2)
vec = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2)
score_select_column = self.sc_out(vec).squeeze(2) # [bS, max_len_header, 1] -> [bS, max_len_header]
score_select_column_softmax = self.softmax_dim_1(score_select_column)
# Penalty
max_len_header = max(number_header)
for b, l_header1 in enumerate(number_header):
if l_header1 < max_len_header:
score_select_column[b, l_header1:] = -10000000000
for b, l_header1 in enumerate(number_header):
if l_header1 < max_len_header:
score_select_column_softmax[b, l_header1:] = 0
return score_select_column,score_select_column_softmax
class SelectAggPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_agg_ops=-1, old=False):
super(SelectAggPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.sa_out = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, n_agg_ops)) # Fixed number of aggregation operator.
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
if old:
# for backwoard compatibility
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=False):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
mL_n = max(len_question)
wenc_header_ob = encoded_header[list(range(bS)), pr_sc] # list, so one sample for each batch.
# [bS, question_len, 100] * [bS, 100, 1] -> [bS, question_len]
att = torch.bmm(self.W_att(encoded_question), wenc_header_ob.unsqueeze(2)).squeeze(2)
# Penalty on blank parts
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, l_n1:] = -10000000000
# [bS, question_len]
p = self.softmax_dim1(att)
if show_p_sa:
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,3)
cla()
plot(p[0].data.numpy(), '--rs', ms=7)
title('sa: nlu_weight')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [bS, question_len, 100] * ( [bS, question_len, 1] -> [bS, question_len, 100])
# -> [bS, question_len, 100] -> [bS, 100]
c_n = torch.mul(encoded_question, p.unsqueeze(2).expand_as(encoded_question)).sum(dim=1)
s_sa = self.sa_out(c_n)
s_sa_softmax = self.softmax_dim_1(s_sa)
return s_sa,s_sa_softmax
class WhereNumberPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, ):
super(WhereNumberPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att_h = nn.Linear(hidden_size, 1)
self.W_hidden = nn.Linear(hidden_size, num_layer * hidden_size)
self.W_cell = nn.Linear(hidden_size, num_layer * hidden_size)
self.W_att_n = nn.Linear(hidden_size, 1)
self.wn_out = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, self.mL_w + 1)) # max number (4 + 1)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=False):
# Encode
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, max_len_header, dim]
bS = len(l_header)
max_len_question = max(len_question)
max_len_header = max(l_header)
# mL_h = max(len_header_token)
# (self-attention?) column Embedding?
# [B, max_len_header, 100] -> [B, max_len_header, 1] -> [B, max_len_header]
att_h = self.W_att_h(encoded_header).squeeze(2)
# Penalty
for b, l_header1 in enumerate(l_header):
if l_header1 < max_len_header:
att_h[b, l_header1:] = -10000000000
p_h = self.softmax_dim1(att_h)
if show_p_wn:
if p_h.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,5)
cla()
plot(p_h[0].data.numpy(), '--rs', ms=7)
title('wn: header_weight')
grid(True)
fig.canvas.draw()
show()
# input('Type Eenter to continue.')
# [B, max_len_header, 100] * [ B, max_len_header, 1] -> [B, max_len_header, 100] -> [B, 100]
c_header = torch.mul(encoded_header, p_h.unsqueeze(2)).sum(1)
# [B, 100] --> [B, 2*100] Enlarge because there are two layers.
hidden = self.W_hidden(c_header) # [B, 4, 200/2]
hidden = hidden.view(bS, self.num_layer * 2, int(
self.hidden_size / 2)) # [4, B, 100/2] # number_of_layer_layer * (bi-direction) # lstm input convention.
hidden = hidden.transpose(0, 1).contiguous()
cell = self.W_cell(c_header) # [B, 4, 100/2]
cell = cell.view(bS, self.num_layer * 2, int(self.hidden_size / 2)) # [4, B, 100/2]
cell = cell.transpose(0, 1).contiguous()
wenc_n = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=(hidden, cell),
last_only=False) # [b, n, dim]
att_n = self.W_att_n(wenc_n).squeeze(2) # [B, max_len, 100] -> [B, max_len, 1] -> [B, max_len]
# Penalty
for b, l_n1 in enumerate(len_question):
if l_n1 < max_len_question:
att_n[b, l_n1:] = -10000000000
p_n = self.softmax_dim1(att_n)
if show_p_wn:
if p_n.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,6)
cla()
plot(p_n[0].data.numpy(), '--rs', ms=7)
title('wn: nlu_weight')
grid(True)
fig.canvas.draw()
show()
# input('Type Enter to continue.')
# [B, mL_n, 100] *([B, mL_n] -> [B, mL_n, 1] -> [B, mL_n, 100] ) -> [B, 100]
c_n = torch.mul(wenc_n, p_n.unsqueeze(2).expand_as(wenc_n)).sum(dim=1)
s_wn = self.wn_out(c_n)
s_wn_softmax = self.softmax_dim_1(s_wn)
return s_wn,s_wn_softmax
# where column predict
class WhereColumnPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3):
super(WhereColumnPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.W_out = nn.Sequential(
nn.Tanh(), nn.Linear(2 * hidden_size, 1)
)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token,
l_header, show_p_wc, penalty=True):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
# attention
# wenc = [bS, mL, hidden_size]
# att = [bS, max_len_header, mL_n]
# att[b, i_h, j_n] = p(j_n| i_h)
att = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2))
# penalty to blank part.
mL_n = max(len_question)
for b_n, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b_n, :, l_n1:] = -10000000000
# make p(j_n | i_h)
p = self.softmax_dim2(att)
if show_p_wc:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
# subplot(6,2,7)
subplot2grid((7,2), (3, 1), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_h in range(l_header[0]):
color_idx = i_h % len(_color)
plot(p[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wc: p_n for each h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# max nlu context vectors
# [bS, max_len_header, mL_n]*[bS, max_len_header, mL_n]
encoded_question = encoded_question.unsqueeze(1) # [ b, n, dim] -> [b, 1, n, dim]
p = p.unsqueeze(3) # [b, header, n] -> [b, header, n, 1]
c_n = torch.mul(encoded_question, p).sum(2) # -> [b, header, dim], c_n for each header.
y = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2) # [b, header, 2*dim]
score = self.W_out(y).squeeze(2) # [b, header]
score[torch.isnan(score)] = 0
score_softmax = self.softmax_dim_1(score)
if penalty:
for b, l_header1 in enumerate(l_header):
score[b, l_header1:] = -1e+10
for b, l_header1 in enumerate(l_header):
score_softmax[b, l_header1:] = 0
return score,score_softmax
# where op predict
class WhereOpPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=3):
super(WhereOpPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.wo_out = nn.Sequential(
nn.Linear(2*hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, n_cond_ops)
)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token,
l_header, wn, wc, wenc_n=None, show_p_wo=False):
# Encode
if not wenc_n:
wenc_n = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
# wn
wenc_header_ob = [] # observed header
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
real = [encoded_header[b, col] for col in wc[b]]
pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later.
wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function.
wenc_header_ob.append(wenc_header_ob1)
# list to [B, 4, dim] tensor.
wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor.
wenc_header_ob = wenc_header_ob.to(device)
# [B, 1, mL_n, dim] * [B, 4, dim, 1]
# -> [B, 4, mL_n, 1] -> [B, 4, mL_n]
# multiplication bewteen NLq-tokens and selected column
att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),
wenc_header_ob.unsqueeze(3)
).squeeze(3)
# Penalty for blank part.
mL_n = max(len_question)
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, :, l_n1:] = -10000000000
p = self.softmax_dim2(att) # p( n| selected_col )
if show_p_wo:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001)
# subplot(6,2,7)
subplot2grid((7,2), (5, 0), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_wn in range(self.mL_w):
color_idx = i_wn % len(_color)
plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wo: p_n for selected h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [B, 1, mL_n, dim] * [B, 4, mL_n, 1]
# --> [B, 4, mL_n, dim]
# --> [B, 4, dim]
c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)
# [bS, 5-1, dim] -> [bS, 5-1, 3]
vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob)], dim=2)
s_wo = self.wo_out(vec)
s_wo_softmax = self.softmax_dim_1(s_wo)
return s_wo,s_wo_softmax
class WhereValuePredict_startend(nn.Module):
"""
Discriminative model
Get start and end.
Here, classifier for [ [투수], [팀1], [팀2], [연도], ...]
Input: Encoded nlu & selected column.
Algorithm: Encoded nlu & selected column. -> classifier -> mask scores -> ...
"""
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=4, old=False):
super(WhereValuePredict_startend, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.W_op = nn.Linear(n_cond_ops, hidden_size)
# self.W_n = nn.Linear(hidden_size, hidden_size)
if old:
self.wv_out = nn.Sequential(
nn.Linear(4 * hidden_size, 2)
)
else:
self.wv_out = nn.Sequential(
nn.Linear(4 * hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 2)
)
# self.wv_out = nn.Sequential(
# nn.Linear(3 * hidden_size, hidden_size),
# nn.Tanh(),
# nn.Linear(hidden_size, self.gdkL)
# )
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, wn, wc, wo, wenc_n=None, show_p_wv=False):
# Encode
if not wenc_n:
wenc_n, hout, cout = encode(self.enc_n, emb_question, len_question,
return_hidden=True,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
wenc_header_ob = [] # observed header
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
real = [encoded_header[b, col] for col in wc[b]]
pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later.
wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function.
wenc_header_ob.append(wenc_header_ob1)
# list to [B, 4, dim] tensor.
wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor.
wenc_header_ob = wenc_header_ob.to(device)
# Column attention
# [B, 1, mL_n, dim] * [B, 4, dim, 1]
# -> [B, 4, mL_n, 1] -> [B, 4, mL_n]
# multiplication bewteen NLq-tokens and selected column
att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),
wenc_header_ob.unsqueeze(3)
).squeeze(3)
# Penalty for blank part.
mL_n = max(len_question)
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, :, l_n1:] = -10000000000
p = self.softmax_dim2(att) # p( n| selected_col )
if show_p_wv:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001)
# subplot(6,2,7)
subplot2grid((7,2), (5, 1), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_wn in range(self.mL_w):
color_idx = i_wn % len(_color)
plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wv: p_n for selected h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [B, 1, mL_n, dim] * [B, 4, mL_n, 1]
# --> [B, 4, mL_n, dim]
# --> [B, 4, dim]
c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)
# Select observed headers only.
# Also generate one_hot vector encoding info of the operator
# [B, 4, dim]
wenc_op = []
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
wenc_op1 = torch.zeros(self.mL_w, self.n_cond_ops)
wo1 = wo[b]
idx_scatter = []
l_wo1 = len(wo1)
for i_wo11 in range(self.mL_w):
if i_wo11 < l_wo1:
wo11 = wo1[i_wo11]
idx_scatter.append([int(wo11)])
else:
idx_scatter.append([0]) # not used anyway
wenc_op1 = wenc_op1.scatter(1, torch.tensor(idx_scatter), 1)
wenc_op.append(wenc_op1)
# list to [B, 4, dim] tensor.
wenc_op = torch.stack(wenc_op) # list to tensor.
wenc_op = wenc_op.to(device)
# Now after concat, calculate logits for each token
# [bS, 5-1, 3*hidden_size] = [bS, 4, 300]
vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob), self.W_op(wenc_op)], dim=2)
# Make extended vector based on encoded nl token containing column and operator information.
# wenc_n = [bS, mL, 100]
# vec2 = [bS, 4, mL, 400]
vec1e = vec.unsqueeze(2).expand(-1,-1, mL_n, -1) # [bS, 4, 1, 300] -> [bS, 4, mL, 300]
wenc_ne = wenc_n.unsqueeze(1).expand(-1, 4, -1, -1) # [bS, 1, mL, 100] -> [bS, 4, mL, 100]
vec2 = torch.cat( [vec1e, wenc_ne], dim=3)
# now make logits
s_wv = self.wv_out(vec2) # [bS, 4, mL, 400] -> [bS, 4, mL, 2]
s_wv_softmax = self.softmax_dim_1(s_wv)
# penalty for spurious tokens
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
s_wv[b, :, l_n1:, :] = -10000000000
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
s_wv_softmax[b, :, l_n1:, :] = 0
return s_wv,s_wv_softmax
def Loss_selectwhere_startend_v2(score_select_column, s_sa, s_wn, s_wc, s_wo,
s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
# loss += Loss_sc(score_select_column, ground_truth_select_column)
# loss += Loss_sa(s_sa, g_sa)
# loss += Loss_wn(s_wn, g_wn)
# loss += Loss_wc(s_wc, g_wc)
# loss += Loss_wo(s_wo, g_wn, g_wo)
# loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss
def Loss_sw_se(score_select_column, s_sa, s_wn, s_wc, s_wo,
s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
loss += Loss_sc(score_select_column, ground_truth_select_column)
loss += Loss_sa(s_sa, g_sa)
loss += Loss_wn(s_wn, g_wn)
loss += Loss_wc(s_wc, g_wc)
loss += Loss_wo(s_wo, g_wn, g_wo)
loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss
def Loss_sc(s_sc, g_sc):
loss = F.cross_entropy(s_sc, torch.tensor(g_sc).to(device))
return loss
def Loss_sa(s_sa, g_sa):
loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device))
return loss
def Loss_wn(s_wn, g_wn):
loss = F.cross_entropy(s_wn, torch.tensor(g_wn).to(device))
return loss
def Loss_wc(s_wc, g_wc):
# Construct index matrix
bS, max_h_len = s_wc.shape
im = torch.zeros([bS, max_h_len]).to(device)
for b, g_wc1 in enumerate(g_wc):
for g_wc11 in g_wc1:
im[b, g_wc11] = 1.0
# Construct prob.
p = F.sigmoid(s_wc)
loss = F.binary_cross_entropy(p, im)
return loss
def Loss_wo(s_wo, g_wn, g_wo):
# Construct index matrix
loss = 0
for b, g_wn1 in enumerate(g_wn):
if g_wn1 == 0:
continue
g_wo1 = g_wo[b]
s_wo1 = s_wo[b]
loss += F.cross_entropy(s_wo1[:g_wn1], torch.tensor(g_wo1).to(device))
return loss
def Loss_wv_se(s_wv, g_wn, g_wvi):
"""
s_wv: [bS, 4, mL, 2], 4 stands for maximum # of condition, 2 tands for start & end logits.
g_wvi: [ [1, 3, 2], [4,3] ] (when B=2, wn(b=1) = 3, wn(b=2) = 2).
"""
loss = 0
# g_wvi = torch.tensor(g_wvi).to(device)
for b, g_wvi1 in enumerate(g_wvi):
# for i_wn, g_wvi11 in enumerate(g_wvi1):
g_wn1 = len(g_wvi1) # 有改动
# g_wn1 = g_wn[b] # 有改动
if g_wn1 == 0:
continue
g_wvi1 = torch.tensor(g_wvi1)[:g_wn1].to(device) # 有改动
g_st1 = g_wvi1[:,0]
g_ed1 = g_wvi1[:,1]
# loss from the start position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,0], g_st1)
# print("st_login: ", s_wv[b,:g_wn1,:,0], g_st1, loss)
# loss from the end position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,1], g_ed1)
# print("ed_login: ", s_wv[b,:g_wn1,:,1], g_ed1, loss)
return loss
# ========= Decoder-Layer ===========
class FT_s2s_1(nn.Module):
""" Decoder-Layer """
def __init__(self, input_size, hidden_size, num_layer, dropout, max_seq_length, n_cond_ops, n_agg_ops, old=False):
super(FT_s2s_1, self).__init__()
self.input_size = input_size # input_size
self.hidden_size = hidden_size # hidden_size
self.ls = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.n_agg_ops = n_agg_ops
self.n_where_num = 4
self.decoder_s2s = Decoder_s2s(input_size, hidden_size, num_layer, dropout, max_seq_length)
def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None):
score = self.decoder_s2s(wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs)
return score
def EG_forward(self, wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size=4, beam_only=True):
""" EG-guided beam-search """
score = self.decoder_s2s.EG_forward(wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size, beam_only)
return score
class Decoder_s2s(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, max_seq_length=222, n_cond_ops=3):
super(Decoder_s2s, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL = max_seq_length
self.Tmax = 200
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.decode_pn = nn.LSTM(input_size=max_seq_length, hidden_size=hidden_size,
num_layers=num_layer, batch_first=True,
dropout=dropout)
self.W_s2s = nn.Linear(input_size, hidden_size)
self.W_pnt = nn.Linear(hidden_size, hidden_size)
self.wv_out = nn.Sequential(nn.Tanh(), nn.Linear(hidden_size, 1))
def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None,):
# Encode
bS, mL_input, input_size = wenc_s2s.shape
# Now, pointer network.
ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200]
ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme
# initial (current) pointer
cpnt = ipnt
# reshape wenc_s2s to incorporate T later
wenc_s2s = wenc_s2s.unsqueeze(1)
# h_0 and c_0 from cls_vec
# They are not bidirectional.
h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
for i_layer in range(self.num_layer):
h_st = (2*i_layer)*self.hidden_size
h_ed = h_st + self.hidden_size
c_st = (2*i_layer+1)*self.hidden_size
c_ed = c_st + self.hidden_size
h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim]
c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim]
if g_pnt_idxs:
pnt_n = torch.zeros(bS, self.Tmax, mL_input).to(device) # one hot
# assign index
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
for t, g_pnt_idx in enumerate(g_pnt_idxs1):
pnt_n[b, t, g_pnt_idx] = 1
# Encode
dec_pn, _ = self.decode_pn(pnt_n, (h_0, c_0))
dec_pn = dec_pn.contiguous()
# [bS, T, input_size]
dec_pn = dec_pn.unsqueeze(2)
# Calculate score
s_wv = self.wv_out(
self.W_s2s(wenc_s2s)
+ self.W_pnt(dec_pn)
).squeeze(3) # [B, T, mL_input, dim] -> [B, T, mL_input, 1] -> [B, T, mL_input]
# s_wv = [B, 4, T, mL_n] = [batch, conds, token idx, score]
# penalty
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv[b, :, l_input1:] = -10000000000
else:
t = 0
s_wv_list = []
cpnt_h = (h_0, c_0)
while t < self.Tmax:
dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm
# [B, 1, 100] -> [B, 1, 1, 100]
dec_pn = dec_pn.unsqueeze(2)
# [bS, T, input_size]
# get score
s_wv1 = self.wv_out(
self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim]
+ self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1
).squeeze(3)
# s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score]
# -> [B, 4, mL_n]
# Masking --
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv1[b, :, l_input1:] = -10000000000
# Collect score--
s_wv_list.append(s_wv1)
# [B, 1, mL_input] -> [B, mL_n] -> [bS*(5-1)]
# (max_val, max_indices)
_val, pnt_n = s_wv1.view(bS, -1).max(dim=1)
# formatting pnt_n as a one-hot input.
cpnt = torch.zeros(bS, mL_input).to(device)
# cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device)
cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1)
cpnt = cpnt.unsqueeze(1) # --> [B * 4, 1, 200]
t += 1
s_wv = torch.stack(s_wv_list, 1) # [B,
s_wv = s_wv.squeeze(2) #
# # Following lines seems to be unnecessary.
# # Penalty to blank parts
# for b, l_input1 in enumerate(l_input):
# if l_input1 < mL_input:
# s_wv[b, :, l_input1:] = -10000000000
return s_wv
def EG_forward(self, wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size, beam_only=True):
# Encode
bS, mL_input, input_size = wenc_s2s.shape
# reshape wenc_s2s to incorperate T later
wenc_s2s = wenc_s2s.unsqueeze(1)
# h_0 and c_0 from cls_vec
# They are not bidirectional.
h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
for i_layer in range(self.num_layer):
h_st = (2*i_layer)*self.hidden_size
h_ed = h_st + self.hidden_size
c_st = (2*i_layer+1)*self.hidden_size
c_ed = c_st + self.hidden_size
h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim]
c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim]
# initial (current) pointer
pnt_list_beam = []
cpnt_beam = []
cpnt_h_beam = []
for i_beam in range(beam_size):
pnt_list_beam1 = []
for b in range(bS):
pnt_list_beam1.append( [ [pnt_start_tok], 0] )
pnt_list_beam.append(pnt_list_beam1)
# initisl cpnt
# Now, initialize pointer network.
ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200]
# Distort ipnt by i_bam on purpose to avoid initial duplication of beam-search
ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme
cpnt_beam.append(ipnt)
cpnt_h_beam.append( (h_0, c_0) )
t = 0
while t < self.Tmax:
# s_wv1_beam = []
candidates = [ [] for b in range(bS) ] # [bS]
# Generate beam
for i_beam, cpnt in enumerate(cpnt_beam):
cpnt_h = cpnt_h_beam[i_beam]
pnt_list_beam1 = pnt_list_beam[i_beam]
dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm
cpnt_h_beam[i_beam] = cpnt_h
# [B, 1, 100] -> [B, 1, 1, 100]
dec_pn = dec_pn.unsqueeze(2)
# [bS, T, input_size]
# get score
s_wv1 = self.wv_out(
self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim]
+ self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1
).squeeze(3)
# s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score]
# -> [B, 4, mL_n]
# Masking --
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv1[b, :, l_input1:] = -10000000000
# Get the candidates only among the input space.
prob, idxs = F.softmax(s_wv1.view(bS, -1), dim=1).topk(dim=1, k=max(l_input))
log_prob = torch.log(prob) # [bS, beam_size]
for b, log_prob1 in enumerate(log_prob):
pnt_list11, score = pnt_list_beam1[b]
for i_can, log_prob11 in enumerate(log_prob1):
# no update if last token was the end-token
previous_pnt = pnt_list11[-1]
if previous_pnt== pnt_end_tok:
new_seq = pnt_list11
new_score = score
else:
new_seq = pnt_list11 + [idxs[b][i_can].item()]
new_score = score + log_prob11.item()
_candidate = [new_seq, new_score]
candidates[b].append(_candidate)
# Execution-guided beam filtering
for b, candidates1 in enumerate(candidates):
new_pnt_list_batch1 = sorted(candidates1, key=lambda list1: list1[-1], reverse=True)
count = 0
selected_candidates1 = []
for new_pnt_list_batch11 in new_pnt_list_batch1:
if new_pnt_list_batch11 not in selected_candidates1:
if beam_only:
selected_candidates1.append(new_pnt_list_batch11)
pnt_list_beam[count][b] = new_pnt_list_batch11
count +=1
else:
# Need to be modified here.
executable = False
testable = False
pr_i_vg_list, pr_i_vg_sub_list = gen_i_vg_from_pnt_idxs([new_pnt_list_batch11[0]], [i_sql_vocab[b]], [i_nlu[b]],
[i_hds[b]])
pr_sql_q_s2s, pr_sql_i = gen_sql_q_from_i_vg([tokens[b]], [nlu[b]], [nlu_t[b]], [hds[b]], [tt_to_t_idx[b]],
pnt_start_tok, pnt_end_tok,
[new_pnt_list_batch11[0]], pr_i_vg_list, pr_i_vg_sub_list)
# check testability from select-clause
try:
# check whether basic elements presents in pr_sql_i
# If so, it is testable.
idx_agg = pr_sql_i[0]["agg"]
idx_sel = pr_sql_i[0]["sel"]
testable = True
except:
testable = False
pass
# check the presence of conds
if testable:
try:
conds = pr_sql_i[0]["conds"]
except:
conds = []
try:
pr_ans1 = engine.execute(tb[b]['id'], idx_sel, idx_agg, conds)
executable = bool(pr_ans1)
except:
executable = False
#
if testable:
if executable:
add_candidate = True
else:
add_candidate = False
else:
add_candidate = True
if add_candidate:
selected_candidates1.append(new_pnt_list_batch11)
pnt_list_beam[count][b] = new_pnt_list_batch11
count += 1
if count == beam_size:
break
if count < beam_size:
# not executable at all..
# add junk sequence.
for i_junk in range(count, beam_size):
pnt_list_beam[i_junk][b] = [[pnt_end_tok],-9999999]
# generate cpnt
# formatting pnt_n as a one-hot input.
for i_beam in range(beam_size):
cpnt = torch.zeros(bS, mL_input).to(device)
# cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device)
idx_batch = [seq_score[0][-1] for seq_score in pnt_list_beam[i_beam]]
pnt_n = torch.tensor(idx_batch).to(device)
cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1)
cpnt = cpnt.unsqueeze(1) # --> [B, t=1, mL_input]
cpnt_beam[i_beam] = cpnt
t += 1
# Generate best pr_pnt_list, p_tot
pr_pnt_idxs = []
p_list = []
for b in range(bS):
pnt_list_beam_best = pnt_list_beam[0]
pr_pnt_idxs.append(pnt_list_beam_best[b][0])
p_list.append( pnt_list_beam_best[b][1])
return pr_pnt_idxs, p_list, pnt_list_beam
# ============= Shallow-Layer ===============
class FT_Scalar_1(nn.Module):
""" Shallow-Layer """
def __init__(self, input_size, hidden_size, num_layer, dropout, n_cond_ops, n_agg_ops, old=False):
super(FT_Scalar_1, self).__init__()
self.input_size = input_size # input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.n_agg_ops = n_agg_ops
self.n_where_num = 4
def scp(self, wemb_h, l_header):
bS, max_header_len, _ = wemb_h.shape
# s_sc
s_sc = torch.zeros(bS, max_header_len).to(device)
s_sc[:, :] = wemb_h[:, :, 0] # s_sc = [B, max_header length, 1]
# s_sc[:,:] = F.tanh(wemb_h[:,:,0]) # s_sc = [B, max_header length, 1]
# s_sc = s_sc.squeeze(2)
# masking
# print(f"s_sc {s_sc}")
for b, l_header1 in enumerate(l_header):
s_sc[b, l_header1:] = -9999999999.0
return s_sc
def sap(self, wemb_h, pr_sc, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
# select of aggregation operator
s_sa = torch.zeros([bS, self.n_agg_ops]).to(device)
for b, pr_sc1 in enumerate(pr_sc):
s_sa[b,:] = wemb_h[b,pr_sc1,idx_st:idx_ed]
return s_sa
def wnp(self, cls_vec):
bS = cls_vec.shape[0]
# [B,hidden_size] -> [B, n_where_num+1]
s_wn = torch.zeros(bS, (self.n_where_num + 1)).to(device)
s_wn[:, :] = cls_vec[:, 0:(self.n_where_num + 1)]
return s_wn
def wcp(self, wemb_h, l_header, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
s_wc = torch.zeros(bS, max_header_len, 1).to(device)
s_wc[:, :, :] = wemb_h[:, :, idx_st:idx_ed]
s_wc = s_wc.squeeze(2) # [B, max_header_length]
# masking
for b, l_header1 in enumerate(l_header):
s_wc[b, l_header1:] = -99999999999.0
return s_wc
def wop(self, wemb_h, pr_wc, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
s_wo = torch.zeros([bS, self.n_where_num, self.n_cond_ops]).to(device)
for b, pr_wc1 in enumerate(pr_wc):
if len(pr_wc1) > 0:
s_wo[b, 0:len(pr_wc1), :] = wemb_h[b, pr_wc1, idx_st:idx_ed]
else:
pass
return s_wo
def wvp(self, emb_question, len_question, pr_wc):
bS, _, _ = emb_question.shape
s_wv = torch.zeros([bS, self.n_where_num, max(len_question), 2]).to(device)
for b, pr_wc1 in enumerate(pr_wc):
if len(pr_wc1) > 0:
# start logit
s_wv[b, 0:len(pr_wc1), :, 0] = emb_question[b, :, pr_wc1].transpose(0, 1)
# end logit
s_wv[b, 0:len(pr_wc1), :, 1] = emb_question[b, :, [pr_wc11 + 100 for pr_wc11 in pr_wc1]].transpose(0, 1)
else:
pass
# masking
# penalty for spurious tokens
for b, l_n1 in enumerate(len_question):
if l_n1 < max(len_question):
s_wv[b, :, l_n1:, :] = -1e+11
return s_wv
def forward(self, emb_question, len_question, wemb_h, l_header, cls_vec,
g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
# emb_question = [B, max_nlu_token_length, hidden_size] # here, # of target_layer is fixed to 1.
# wemb_h = [B, max_header #, hidden_size]
s_sc = self.scp(wemb_h, l_header)
if g_sc:
pr_sc = g_sc
else:
pr_sc = pred_sc(s_sc)
# s_sa
idx_st = 1
idx_ed = 1 + self.n_agg_ops
s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed)
if g_sa:
pr_sa = g_sa
else:
pr_sa = pred_sa(s_sa)
# where_number
s_wn = self.wnp(cls_vec)
if g_wn:
pr_wn = g_wn
else:
pr_wn = pred_wn(s_wn)
# wc
idx_st = idx_ed+1
idx_ed = idx_st+1
s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed)
if g_wc:
pr_wc = g_wc
else:
pr_wc = pred_wherecolumn(pr_wn, s_wc)
# wo
idx_st = idx_ed+1
idx_ed = idx_st + self.n_cond_ops
s_wo = self.wop(wemb_h, pr_wc, idx_st, idx_ed)
if g_wo:
pr_wo = g_wo
else:
pr_wo = pred_wo(pr_wn, s_wo)
# wv
# s_wv = [bS, 4, mL, 2]
s_wv = self.wvp(emb_question, len_question, pr_wc)
# print(s_wv)
# s_wv = F.tanh(s_wv)
return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv
def forward_EG(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_tt, tt_to_t_idx, nlu,
beam_size=4):
"""
Execution-guided beam decoding.
Essentially identical with that of NL2SQL Layer.
"""
# Select-clause
prob_sca, pr_sc_best, pr_sa_best, \
p_sc_best, p_sa_best, p_select \
= self.EG_decoding_select(wemb_h, l_header, tb, beam_size=beam_size)
# Where-clause
prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \
p_where, p_wn_best, p_wc_best, p_wo_best, p_wvi_best \
= self.EG_decoding_where(emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_tt, tt_to_t_idx, nlu,
pr_sc_best, pr_sa_best,
beam_size=4)
p_tot = cal_prob_tot(p_select, p_where)
return pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_wvi_best, \
pr_sql_i, p_tot, p_select, p_where, p_sc_best, p_sa_best, \
p_wn_best, p_wc_best, p_wo_best, p_wvi_best
def EG_decoding_select(self, wemb_h, l_header, tb,
beam_size=4, show_p_sc=False, show_p_sa=False):
# sc
s_sc = self.scp(wemb_h, l_header)
prob_sc = F.softmax(s_sc, dim=-1)
bS, mcL = s_sc.shape
# minimum_header_length = min(l_header)
# beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size
# sa
# Construct all possible sc_sa_score
prob_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device)
score_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device)
prob_sca = torch.zeros_like(prob_sc_sa).to(device)
# get the top-k indices. pr_sc_beam = [B, beam_size]
pr_sc_beam = pred_sc_beam(s_sc, beam_size)
# calculate and predict s_sa.
idx_st = 1
idx_ed = 1 + self.n_agg_ops
for i_beam in range(beam_size):
pr_sc = list(array(pr_sc_beam)[:, i_beam])
s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed)
prob_sa = F.softmax(s_sa, dim=-1)
prob_sc_sa[:, i_beam, :] = prob_sa
score_sc_sa[:, i_beam, :] = s_sa
prob_sc_selected = prob_sc[range(bS), pr_sc] # [B]
prob_sca[:, i_beam, :] = (prob_sa.t() * prob_sc_selected).t()
# [mcL, B] * [B] -> [mcL, B] (element-wise multiplication)
# [mcL, B] -> [B, mcL]
# Calculate the dimension of tensor
# tot_dim = len(prob_sca.shape)
idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True)
# Now as sc_idx is already sorted, re-map them properly.
idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx]
idxs_arr = array(idxs)
# [B, beam_size, remainig dim]
# idxs[b][0] gives first probable [sc_idx, sa_idx] pairs.
# idxs[b][1] gives of second.
# Calculate prob_sca, a joint probability
beam_idx_sca = [0] * bS
beam_meet_the_final = [False] * bS
while True:
pr_sc = idxs_arr[range(bS), beam_idx_sca, 0]
pr_sa = idxs_arr[range(bS), beam_idx_sca, 1]
# map index properly
check = check_sc_sa_pairs(tb, pr_sc, pr_sa)
if sum(check) == bS:
break
else:
for b, check1 in enumerate(check):
if not check1: # wrong pair
beam_idx_sca[b] += 1
if beam_idx_sca[b] >= beam_size:
beam_meet_the_final[b] = True
beam_idx_sca[b] -= 1
else:
beam_meet_the_final[b] = True
if sum(beam_meet_the_final) == bS:
break
# Now pr_sc, pr_sa are properly predicted.
pr_sc_best = list(pr_sc)
pr_sa_best = list(pr_sa)
# output for later analysis.
p_sc_best = cal_prob_sc(s_sc, pr_sc_best)
p_sa_best = cal_prob_sa(score_sc_sa[range(bS), beam_idx_sca, :].squeeze(1), pr_sa_best)
p_select = cal_prob_select(p_sc_best, p_sa_best)
# p_select = prob_sca[range(bS),beam_idx_sca,pr_sa_best].detach().to('cpu').numpy()
return prob_sca, pr_sc_best, pr_sa_best, p_sc_best, p_sa_best, p_select
def EG_decoding_where(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_wp_t, tt_to_t_idx, nlu,
pr_sc_best, pr_sa_best,
beam_size=4, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
bS, max_header_len, _ = wemb_h.shape
# Now, Where-clause beam search.
idx_st = 1
idx_ed = 1 + self.n_agg_ops
s_wn = self.wnp(cls_vec)
prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy()
# Found "executable" most likely 4(=max_num_of_conditions) where-clauses.
# wc
idx_st = idx_ed + 1
idx_ed = idx_st + 1
s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed)
prob_wc = torch.sigmoid(s_wc).detach().to('cpu').numpy()
# pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc)
# get max_wn # of most probable columns & their prob.
pr_wn_max = [self.n_where_num] * bS
pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column
prob_wc_max = zeros([bS, self.n_where_num])
for b, pr_wc_max1 in enumerate(pr_wc_max):
prob_wc_max[b, :] = prob_wc[b, pr_wc_max1]
# get most probable n_where_num where-clouses
# wo
idx_st = idx_ed + 1
idx_ed = idx_st + self.n_cond_ops
s_wo_max = self.wop(wemb_h, pr_wc_max, idx_st, idx_ed)
prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy()
# [B, n_where_num, n_cond_op]
pr_wvi_beam_op_list = []
prob_wvi_beam_op_list = []
prob_wvi_beam_st_op_list = []
prob_wvi_beam_ed_op_list = []
# To re-use code, repeat the calculation unnecessarily.
for i_op in range(self.n_cond_ops - 1):
pr_wo_temp = [[i_op] * self.n_where_num] * bS
# wv
s_wv = self.wvp(emb_question, len_question, pr_wc_max)
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
# prob_wv
pr_wvi_beam, prob_wvi_beam, prob_wvi_beam_st, prob_wvi_beam_ed = pred_wvi_se_beam(self.n_where_num, s_wv, beam_size)
pr_wvi_beam_op_list.append(pr_wvi_beam)
prob_wvi_beam_op_list.append(prob_wvi_beam)
prob_wvi_beam_st_op_list.append(prob_wvi_beam_st)
prob_wvi_beam_ed_op_list.append(prob_wvi_beam_ed)
# pr_wvi_beam = [B, n_where_num, k_logit**2 [st, ed] paris]
# pred_wv_beam
# Calculate joint probability of where-clause
# prob_w = [batch, wc, wo, wv] = [B, n_where_num, n_cond_op, n_pairs]
n_wv_beam_pairs = prob_wvi_beam.shape[2]
prob_w = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wc_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wo_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wvi_st_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wvi_ed_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
for b in range(bS):
for i_wn in range(self.n_where_num):
for i_op in range(self.n_cond_ops - 1): # do not use final one
p_wc = prob_wc_max[b, i_wn]
for i_wv_beam in range(n_wv_beam_pairs):
# i_wc = pr_wc_max[b][i_wn] # already done
p_wo = prob_wo_max[b, i_wn, i_op]
p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam]
prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv
prob_wc_dupl[b, i_wn, i_op, i_wv_beam] = p_wc
prob_wo_dupl[b, i_wn, i_op, i_wv_beam] = p_wo
p_wv_st = prob_wvi_beam_st_op_list[i_op][b, i_wn, i_wv_beam]
p_wv_ed = prob_wvi_beam_ed_op_list[i_op][b, i_wn, i_wv_beam]
prob_wvi_st_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_st
prob_wvi_ed_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_ed
# Perform execution guided decoding
conds_max = []
prob_conds_max = []
# while len(conds_max) < self.n_where_num:
idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True)
# idxs = [B, i_wc_beam, i_op, i_wv_pairs]
# Construct conds1. Collect only executable one. It is descending order of the probability.
pr_wvi_max = []
p_wc_max = []
p_wo_max = []
p_wvi_max = []
for b, idxs1 in enumerate(idxs):
conds_max1 = []
prob_conds_max1 = []
pr_wvi1_max = []
p_wc1_max = []
p_wo1_max = []
p_wvi1_max = []
for i_wn, idxs11 in enumerate(idxs1):
i_wc = pr_wc_max[b][idxs11[0]]
i_op = idxs11[1]
wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]]
# idx11[0]
# get wv_str
temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [tt_to_t_idx[b]],
[nlu[b]])
merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b])
conds11 = [i_wc, i_op, merged_wv11]
prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2]]
p_wc11_max = prob_wc_dupl[b, idxs11[0], idxs11[1], idxs11[2]]
p_wo11_max = prob_wo_dupl[b, idxs11[0], idxs11[1], idxs11[2]]
p_wvi11_max = [ prob_wvi_st_dupl[b, idxs11[0], idxs11[1], idxs11[2]],
prob_wvi_ed_dupl[b, idxs11[0], idxs11[1], idxs11[2]] ]
# test execution
# print(nlu[b])
# print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11])
pr_ans = engine.execute(tb[b]['id'], pr_sc_best[b], pr_sa_best[b], [conds11])
if bool(pr_ans):
# pr_ans is not empty!
conds_max1.append(conds11)
prob_conds_max1.append(prob_conds11)
pr_wvi1_max.append(wvi)
p_wc1_max.append(p_wc11_max)
p_wo1_max.append(p_wo11_max)
p_wvi1_max.append(p_wvi11_max)
conds_max.append(conds_max1)
prob_conds_max.append(prob_conds_max1)
pr_wvi_max.append(pr_wvi1_max)
p_wc_max.append(p_wc1_max)
p_wo_max.append(p_wo1_max)
p_wvi_max.append(p_wvi1_max)
# May need to do more exhuastive search?
# i.e. up to.. getting all executable cases.
# Calculate total probability to decide the number of where-clauses
pr_sql_i = []
prob_wn_w = [] # total where-clause probability
pr_wn_based_on_prob = []
pr_wvi_best = []
p_wc = []
p_wo = []
p_wvi = []
for b, prob_wn1 in enumerate(prob_wn):
max_executable_wn1 = len(conds_max[b])
prob_wn_w1 = []
prob_wn_w1.append(prob_wn1[0]) # wn=0 case.
for i_wn in range(max_executable_wn1):
prob_wn_w11 = prob_wn1[i_wn + 1] * prob_conds_max[b][i_wn]
prob_wn_w1.append(prob_wn_w11)
pr_wn_based_on_prob.append(argmax(prob_wn_w1))
prob_wn_w.append(prob_wn_w1)
pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]}
pr_wvi_best1 = pr_wvi_max[b][:pr_wn_based_on_prob[b]]
pr_sql_i.append(pr_sql_i1)
pr_wvi_best.append(pr_wvi_best1)
p_wc.append( p_wc_max[b][:pr_wn_based_on_prob[b]] )
p_wo.append( p_wo_max[b][:pr_wn_based_on_prob[b]] )
p_wvi.append( p_wvi_max[b][:pr_wn_based_on_prob[b]] )
# s_wv = [B, n_where_num, max_nlu_tokens, 2]
p_wn = cal_prob_wn(s_wn, pr_wn_based_on_prob)
p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi)
return prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \
p_where, p_wn, p_wc, p_wo, p_wvi
def Loss_s2s(score, g_pnt_idxs):
"""
score = [B, T, max_seq_length]
"""
# WHERE string part
loss = 0
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
ed = len(g_pnt_idxs1) - 1
score_part = score[b, :ed]
loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.
return loss
| 39.419083 | 161 | 0.555163 | 10,973 | 74,778 | 3.441903 | 0.05313 | 0.039187 | 0.02187 | 0.018005 | 0.766522 | 0.732075 | 0.704671 | 0.683595 | 0.661115 | 0.641284 | 0 | 0.030277 | 0.333948 | 74,778 | 1,896 | 162 | 39.439873 | 0.728025 | 0.168378 | 0 | 0.597066 | 0 | 0 | 0.006951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035375 | false | 0.002588 | 0.006903 | 0 | 0.077653 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 3 |
d9f32d2b9e677d6893c7269bf23bcedaa4e7f68a
| 363 |
py
|
Python
|
chia/components/sample_transformers/__init__.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | null | null | null |
chia/components/sample_transformers/__init__.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | 2 |
2021-10-06T13:19:09.000Z
|
2021-10-20T17:32:36.000Z
|
chia/components/sample_transformers/__init__.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | null | null | null |
from chia import components
from chia.components.sample_transformers import identity
from chia.components.sample_transformers.sample_transformer import SampleTransformer
class SampleTransformerFactory(components.Factory):
name_to_class_mapping = {"identity": identity.IdentitySampleTransformer}
__all__ = ["SampleTransformer", "SampleTransformerFactory"]
| 33 | 84 | 0.85124 | 34 | 363 | 8.794118 | 0.5 | 0.080268 | 0.120401 | 0.160535 | 0.240803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.082645 | 363 | 10 | 85 | 36.3 | 0.897898 | 0 | 0 | 0 | 0 | 0 | 0.134986 | 0.066116 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 |
0
| 3 |
d9f3cb72d610ec30e4ecf05d60ba2025dc849112
| 416 |
py
|
Python
|
3/3.6/add_guest.py
|
singi2016cn/python-scaffold
|
274e508d1919da67e599aa73be139800c043bce4
|
[
"MIT"
] | null | null | null |
3/3.6/add_guest.py
|
singi2016cn/python-scaffold
|
274e508d1919da67e599aa73be139800c043bce4
|
[
"MIT"
] | null | null | null |
3/3.6/add_guest.py
|
singi2016cn/python-scaffold
|
274e508d1919da67e599aa73be139800c043bce4
|
[
"MIT"
] | null | null | null |
# 添加嘉宾
names = []
names.append('singi')
names.append('lily')
names.append('sam')
print('I find a big dining-table,I can invite more friends.')
names.insert(0, 'xiaoling')
names.insert(2, 'fangsi')
names.append('zhangqing')
greets = ',would you like to have dinner with me ?'
print(names[0]+greets)
print(names[1]+greets)
print(names[2]+greets)
print(names[3]+greets)
print(names[4]+greets)
print(names[5]+greets)
| 20.8 | 61 | 0.711538 | 67 | 416 | 4.41791 | 0.537313 | 0.202703 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02139 | 0.100962 | 416 | 20 | 62 | 20.8 | 0.770053 | 0.009615 | 0 | 0 | 0 | 0 | 0.309002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.466667 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 3 |
8a03248b6fead646cb68e7a6a935435de664969c
| 14,492 |
py
|
Python
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 4 |
2019-07-26T11:32:22.000Z
|
2019-09-11T05:34:59.000Z
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 10 |
2020-05-11T20:29:28.000Z
|
2022-01-13T01:41:27.000Z
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 2 |
2019-08-28T14:57:54.000Z
|
2019-11-26T16:18:30.000Z
|
"""
Contexts are the "values" that Python would return. However Contexts are at the
same time also the "contexts" that a user is currently sitting in.
A ContextSet is typically used to specify the return of a function or any other
static analysis operation. In jedi there are always multiple returns and not
just one.
"""
from functools import reduce
from operator import add
from parso.python.tree import ExprStmt, SyncCompFor
from jedi import debug
from jedi._compatibility import zip_longest, unicode
from jedi.parser_utils import clean_scope_docstring
from jedi.common import BaseContextSet, BaseContext
from jedi.evaluate.helpers import SimpleGetItemNotFound
from jedi.evaluate.utils import safe_property
from jedi.evaluate.cache import evaluator_as_method_param_cache
from jedi.cache import memoize_method
_sentinel = object()
class HelperContextMixin(object):
def get_root_context(self):
context = self
while True:
if context.parent_context is None:
return context
context = context.parent_context
@classmethod
@evaluator_as_method_param_cache()
def create_cached(cls, *args, **kwargs):
return cls(*args, **kwargs)
def execute(self, arguments):
return self.evaluator.execute(self, arguments=arguments)
def execute_evaluated(self, *value_list):
from jedi.evaluate.arguments import ValuesArguments
arguments = ValuesArguments([ContextSet([value]) for value in value_list])
return self.evaluator.execute(self, arguments)
def execute_annotation(self):
return self.execute_evaluated()
def gather_annotation_classes(self):
return ContextSet([self])
def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in self.iterate(contextualized_node, is_async)
)
def py__getattribute__(self, name_or_str, name_context=None, position=None,
search_global=False, is_goto=False,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
from jedi.evaluate import finder
f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def py__await__(self):
await_context_set = self.py__getattribute__(u"__await__")
if not await_context_set:
debug.warning('Tried to run __await__ on context %s', self)
return await_context_set.execute_evaluated()
def eval_node(self, node):
return self.evaluator.eval_element(self, node)
def create_context(self, node, node_is_context=False, node_is_object=False):
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
def iterate(self, contextualized_node=None, is_async=False):
debug.dbg('iterate %s', self)
if is_async:
from jedi.evaluate.lazy_context import LazyKnownContexts
# TODO if no __aiter__ contexts are there, error should be:
# TypeError: 'async for' requires an object with __aiter__ method, got int
return iter([
LazyKnownContexts(
self.py__getattribute__('__aiter__').execute_evaluated()
.py__getattribute__('__anext__').execute_evaluated()
.py__getattribute__('__await__').execute_evaluated()
.py__stop_iteration_returns()
) # noqa
])
return self.py__iter__(contextualized_node)
def is_sub_class_of(self, class_context):
for cls in self.py__mro__():
if cls.is_same_class(class_context):
return True
return False
def is_same_class(self, class2):
# Class matching should prefer comparisons that are not this function.
if type(class2).is_same_class != HelperContextMixin.is_same_class:
return class2.is_same_class(self)
return self == class2
class Context(HelperContextMixin, BaseContext):
"""
Should be defined, otherwise the API returns empty types.
"""
predefined_names = {}
"""
To be defined by subclasses.
"""
tree_node = None
@property
def api_type(self):
# By default just lower name of the class. Can and should be
# overwritten.
return self.__class__.__name__.lower()
def py__getitem__(self, index_context_set, contextualized_node):
from jedi.evaluate import analysis
# TODO this context is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
return NO_CONTEXTS
def py__iter__(self, contextualized_node=None):
if contextualized_node is not None:
from jedi.evaluate import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
def get_signatures(self):
return []
def is_class(self):
return False
def is_instance(self):
return False
def is_function(self):
return False
def is_module(self):
return False
def is_namespace(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
return clean_scope_docstring(self.tree_node)
return None
def get_safe_value(self, default=_sentinel):
if default is _sentinel:
raise ValueError("There exists no safe value for context %s" % self)
return default
def py__call__(self, arguments):
debug.warning("no execution possible %s", self)
return NO_CONTEXTS
def py__stop_iteration_returns(self):
debug.warning("Not possible to return the stop iterations of %s", self)
return NO_CONTEXTS
def get_qualified_names(self):
# Returns Optional[Tuple[str, ...]]
return None
def is_stub(self):
# The root context knows if it's a stub or not.
return self.parent_context.is_stub()
def iterate_contexts(contexts, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all contexts but ignores the ordering and just returns
all contexts that the iterate functions yield.
"""
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in contexts.iterate(contextualized_node, is_async=is_async)
)
class _ContextWrapperBase(HelperContextMixin):
predefined_names = {}
@safe_property
def name(self):
from jedi.evaluate.names import ContextName
wrapped_name = self._wrapped_context.name
if wrapped_name.tree_name is not None:
return ContextName(self, wrapped_name.tree_name)
else:
from jedi.evaluate.compiled import CompiledContextName
return CompiledContextName(self, wrapped_name.string_name)
@classmethod
@evaluator_as_method_param_cache()
def create_cached(cls, evaluator, *args, **kwargs):
return cls(*args, **kwargs)
def __getattr__(self, name):
assert name != '_wrapped_context', 'Problem with _get_wrapped_context'
return getattr(self._wrapped_context, name)
class LazyContextWrapper(_ContextWrapperBase):
@safe_property
@memoize_method
def _wrapped_context(self):
with debug.increase_indent_cm('Resolve lazy context wrapper'):
return self._get_wrapped_context()
def __repr__(self):
return '<%s>' % (self.__class__.__name__)
def _get_wrapped_context(self):
raise NotImplementedError
class ContextWrapper(_ContextWrapperBase):
def __init__(self, wrapped_context):
self._wrapped_context = wrapped_context
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_context)
class TreeContext(Context):
def __init__(self, evaluator, parent_context, tree_node):
super(TreeContext, self).__init__(evaluator, parent_context)
self.predefined_names = {}
self.tree_node = tree_node
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class ContextualizedNode(object):
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
class ContextualizedName(ContextualizedNode):
# TODO merge with TreeNameDefinition?!
@property
def name(self):
return self.node
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
When searching for b in the case ``a, *b, c = [...]`` it will return::
[(slice(1, -1), abc_node)]
"""
indexes = []
is_star_expr = False
node = self.node.parent
compare = self.node
while node is not None:
if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
index = int(i / 2)
if is_star_expr:
from_end = int((len(node.children) - i) / 2)
index = slice(index, -from_end)
indexes.insert(0, (index, node))
break
else:
raise LookupError("Couldn't find the assignment.")
is_star_expr = False
elif node.type == 'star_expr':
is_star_expr = True
elif isinstance(node, (ExprStmt, SyncCompFor)):
break
compare = node
node = node.parent
return indexes
def _getitem(context, index_contexts, contextualized_node):
from jedi.evaluate.context.iterable import Slice
# The actual getitem call.
simple_getitem = getattr(context, 'py__simple_getitem__', None)
result = NO_CONTEXTS
unused_contexts = set()
for index_context in index_contexts:
if simple_getitem is not None:
index = index_context
if isinstance(index_context, Slice):
index = index.obj
try:
method = index.get_safe_value
except AttributeError:
pass
else:
index = method(default=None)
if type(index) in (float, int, str, unicode, slice, bytes):
try:
result |= simple_getitem(index)
continue
except SimpleGetItemNotFound:
pass
unused_contexts.add(index_context)
# The index was somehow not good enough or simply a wrong type.
# Therefore we now iterate through all the contexts and just take
# all results.
if unused_contexts or not index_contexts:
result |= context.py__getitem__(
ContextSet(unused_contexts),
contextualized_node
)
debug.dbg('py__getitem__ result: %s', result)
return result
class ContextSet(BaseContextSet):
def py__class__(self):
return ContextSet(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False):
from jedi.evaluate.lazy_context import get_merged_lazy_context
type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
for lazy_contexts in zip_longest(*type_iters):
yield get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def execute(self, arguments):
return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set)
def execute_evaluated(self, *args, **kwargs):
return ContextSet.from_sets(c.execute_evaluated(*args, **kwargs) for c in self._set)
def py__getattribute__(self, *args, **kwargs):
if kwargs.get('is_goto'):
return reduce(add, [c.py__getattribute__(*args, **kwargs) for c in self._set], [])
return ContextSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
def get_item(self, *args, **kwargs):
return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
def try_merge(self, function_name):
context_set = self.__class__([])
for c in self._set:
try:
method = getattr(c, function_name)
except AttributeError:
pass
else:
context_set |= method()
return context_set
def gather_annotation_classes(self):
return ContextSet.from_sets([c.gather_annotation_classes() for c in self._set])
def get_signatures(self):
return [sig for c in self._set for sig in c.get_signatures()]
NO_CONTEXTS = ContextSet([])
def iterator_to_context_set(func):
def wrapper(*args, **kwargs):
return ContextSet(func(*args, **kwargs))
return wrapper
| 33.162471 | 94 | 0.637524 | 1,718 | 14,492 | 5.089639 | 0.185681 | 0.030878 | 0.021958 | 0.011436 | 0.237077 | 0.180009 | 0.141468 | 0.087374 | 0.064959 | 0.042543 | 0 | 0.001151 | 0.280638 | 14,492 | 436 | 95 | 33.238532 | 0.837602 | 0.110199 | 0 | 0.248322 | 0 | 0 | 0.045142 | 0.004025 | 0 | 0 | 0 | 0.006881 | 0.003356 | 1 | 0.204698 | false | 0.010067 | 0.067114 | 0.110738 | 0.526846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 |
0
| 3 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 27